def probs_metric(inverse=False): rand_p = Vec2(random()*table.width+table.min_point.x, random()*table.height+table.min_point.y) try: bestmeaning, bestsentence = generate_sentence(rand_p, False, scene, speaker, usebest=True, golden=inverse, printing=printing) sampled_landmark, sampled_relation = bestmeaning.args[0], bestmeaning.args[3] golden_posteriors = get_all_sentence_posteriors(bestsentence, meanings, golden=(not inverse), printing=printing) # lmk_prior = speaker.get_landmark_probability(sampled_landmark, landmarks, PointRepresentation(rand_p))[0] all_lmk_probs = speaker.all_landmark_probs(landmarks, Landmark(None, PointRepresentation(rand_p), None)) all_lmk_probs = dict(zip(landmarks, all_lmk_probs)) lmk_prior = all_lmk_probs[sampled_landmark] head_on = speaker.get_head_on_viewpoint(sampled_landmark) rel_prior = speaker.get_probabilities_points( np.array([rand_p]), sampled_relation, head_on, sampled_landmark) lmk_post = golden_posteriors[sampled_landmark] rel_post = golden_posteriors[sampled_relation] ps = np.array([golden_posteriors[lmk]*golden_posteriors[rel] for lmk, rel in meanings]) rank = None for i,p in enumerate(ps): lmk,rel = meanings[i] # logger( '%f, %s' % (p, m2s(lmk,rel))) head_on = speaker.get_head_on_viewpoint(lmk) # ps[i] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0] ps[i] *= all_lmk_probs[lmk] ps[i] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk) if lmk == sampled_landmark and rel == sampled_relation: idx = i ps += epsilon ps = ps/ps.sum() prob = ps[idx] rank = sorted(ps, reverse=True).index(prob) entropy = entropy_of_probs(ps) except (ParseError,RuntimeError) as e: logger( e ) lmk_prior = 0 rel_prior = 0 lmk_post = 0 rel_post = 0 prob = 0 rank = len(meanings)-1 entropy = 0 distances = [[None]] head_on = speaker.get_head_on_viewpoint(sampled_landmark) all_descs = speaker.get_all_meaning_descriptions(trajector, scene, sampled_landmark, sampled_relation, head_on, 1) distances = [] for desc in all_descs: distances.append([edit_distance( bestsentence, desc ), desc]) distances.sort() return lmk_prior,rel_prior,lmk_post,rel_post,\ prob,entropy,rank,distances[0][0],type(sampled_relation)
def probs_metric(): meaning, sentence = generate_sentence(rand_p, consistent, scene, speaker, usebest=True, printing=printing) sampled_landmark,sampled_relation = meaning.args[0],meaning.args[3] print meaning.args[0],meaning.args[3], len(sentence) if sentence == "": prob = 0 entropy = 0 else: logger( 'Generated sentence: %s' % sentence) try: golden_posteriors = get_all_sentence_posteriors(sentence, meanings, golden=True, printing=printing) epsilon = 1e-15 ps = np.array([golden_posteriors[lmk]*golden_posteriors[rel] for lmk, rel in meanings]) temp = None for i,p in enumerate(ps): lmk,rel = meanings[i] # logger( '%f, %s' % (p, m2s(lmk,rel))) head_on = speaker.get_head_on_viewpoint(lmk) ps[i] *= speaker.get_landmark_probability(lmk, landmarks, PointRepresentation(rand_p))[0] ps[i] *= speaker.get_probabilities_points( np.array([rand_p]), rel, head_on, lmk) if lmk == meaning.args[0] and rel == meaning.args[3]: temp = i ps += epsilon ps = ps/ps.sum() prob = ps[temp] rank = sorted(ps, reverse=True).index(prob) entropy = entropy_of_probs(ps) except ParseError as e: logger( e ) prob = 0 rank = len(meanings)-1 entropy = 0 head_on = speaker.get_head_on_viewpoint(sampled_landmark) all_descs = speaker.get_all_meaning_descriptions(trajector, scene, sampled_landmark, sampled_relation, head_on, 1) distances = [] for desc in all_descs: distances.append([edit_distance( sentence, desc ), desc]) distances.sort() return prob,entropy,rank,distances[0][0]