Beispiel #1
0
 def sample_landmark(class_, landmarks, trajector):
     distances = array([lmk.distance_to(trajector.representation) for lmk in landmarks])
     scores = 1.0/(array(distances)**1.5 + class_.epsilon)
     scores[distances == 0] = 0
     lm_probabilities = scores/sum(scores)
     index = lm_probabilities.cumsum().searchsorted( random.sample(1) )[0]
     return index
Beispiel #2
0
    def sample_landmark(self, landmarks, trajector):
        """ Weight by inverse of distance to landmark center and choose probabilistically  """
        epsilon = 0.02
        distances = array(
            [
                trajector.distance_to(lmk.representation)
                if not (
                    isinstance(lmk.representation, RectangleRepresentation)
                    and lmk.representation.contains(trajector.representation)
                )
                else 9 * epsilon
                for lmk in landmarks
            ]
        )
        # distances = array([trajector.distance_to( lmk )
        #     if not (isinstance(lmk.representation,RectangleRepresentation) and lmk.representation.contains(trajector))
        #     else min(poly_to_vec_distance(lmk.representation.get_geometry().to_polygon(), trajector.representation.location),lmk.representation.middle.distance_to(trajector.representation.location))
        #     for lmk in landmarks])
        # scores = 1.0/(distances + epsilon)**0.5
        std = 0.1
        scores = exp(-(distances / std) ** 2)
        lm_probabilities = scores / sum(scores)
        index = lm_probabilities.cumsum().searchsorted(random.sample(1))[0]

        sampled_landmark = landmarks[index]
        head_on = self.get_head_on_viewpoint(sampled_landmark)
        self.set_orientations(sampled_landmark, head_on)

        return sampled_landmark, lm_probabilities[index], self.get_entropy(lm_probabilities), head_on
Beispiel #3
0
 def sample_landmark(class_, landmarks, trajector):
     distances = array(
         [lmk.distance_to(trajector.representation) for lmk in landmarks])
     scores = 1.0 / (array(distances)**1.5 + class_.epsilon)
     scores[distances == 0] = 0
     lm_probabilities = scores / sum(scores)
     index = lm_probabilities.cumsum().searchsorted(random.sample(1))[0]
     return index
Beispiel #4
0
 def sample_point_trajector(self, bounding_box, relation, perspective, landmark, step=0.02):
     """
     Sample a point of interest given a relation and landmark.
     """
     probs, points = self.get_probabilities_box(bounding_box, relation, perspective, landmark)
     probs /= probs.sum()
     index = probs.cumsum().searchsorted( random.sample(1) )[0]
     return Landmark( 'point', Vec2( *points[index] ), None, Landmark.POINT )
Beispiel #5
0
    def sample_relation(class_, perspective, sampled_landmark, trajector):
        rel_scores = []
        rel_instances = []

        for relation in class_.relations:
            rel_instances.append( relation(perspective, sampled_landmark, trajector) )
            rel_scores.append( rel_instances[-1].is_applicable() )

        rel_scores = array(rel_scores)
        rel_probabilities = rel_scores/sum(rel_scores)
        index = rel_probabilities.cumsum().searchsorted( random.sample(1) )[0]
        return rel_instances[index]
Beispiel #6
0
    def sample_relation(class_, perspective, sampled_landmark, trajector):
        rel_scores = []
        rel_instances = []

        for relation in class_.relations:
            rel_instances.append(
                relation(perspective, sampled_landmark, trajector))
            rel_scores.append(rel_instances[-1].is_applicable())

        rel_scores = array(rel_scores)
        rel_probabilities = rel_scores / sum(rel_scores)
        index = rel_probabilities.cumsum().searchsorted(random.sample(1))[0]
        return rel_instances[index]
Beispiel #7
0
 def sample_point_trajector(self,
                            bounding_box,
                            relation,
                            perspective,
                            landmark,
                            step=0.02):
     """
     Sample a point of interest given a relation and landmark.
     """
     probs, points = self.get_probabilities_box(bounding_box, relation,
                                                perspective, landmark)
     probs /= probs.sum()
     index = probs.cumsum().searchsorted(random.sample(1))[0]
     return Landmark('point', Vec2(*points[index]), None, Landmark.POINT)
Beispiel #8
0
    def sample_relation(self, trajector, bounding_box, perspective, landmark, step=0.02, usebest=False):
        """
        Sample a relation given a trajector and landmark.
        Evaluate each relation and probabilisticaly choose the one that is likely to
        generate the trajector given a landmark.
        """
        rel_probabilities, rel_classes = self.all_relation_probs(trajector, bounding_box, perspective, landmark, step)
        if usebest:
            index = index_max(rel_probabilities)
        else:
            index = categorical_sample(rel_probabilities)

        index = rel_probabilities.cumsum().searchsorted( random.sample(1) )[0]

        return rel_classes[index], rel_probabilities[index], self.get_entropy(rel_probabilities)
Beispiel #9
0
    def evaluate_all(self):
        epsilon = 1e-6
        probs = []

        for dist in self.distance_classes:
            for degree in self.degree_classes:
                p = Measurement.get_applicability(self.distance, dist, degree) + epsilon
                probs.append([p, degree, dist])

        ps, degrees, dists = zip(*probs)
        ps = array(ps)
        ps /= sum(ps)

        index = ps.cumsum().searchsorted( random.sample(1) )[0]
        return probs[index]
Beispiel #10
0
    def evaluate_all(self):
        epsilon = 1e-6
        probs = []

        for dist in self.distance_classes:
            for degree in self.degree_classes:
                p = Measurement.get_applicability(self.distance, dist,
                                                  degree) + epsilon
                probs.append([p, degree, dist])

        ps, degrees, dists = zip(*probs)
        ps = array(ps)
        ps /= sum(ps)

        index = ps.cumsum().searchsorted(random.sample(1))[0]
        return probs[index]
Beispiel #11
0
    def sample_relation(self, trajector, bounding_box, perspective, landmark, step=0.02):
        """
        Sample a relation given a trajector and landmark.
        Evaluate each relation and probabilisticaly choose the one that is likely to
        generate the trajector given a landmark.
        """
        rel_scores = []
        rel_classes = []

        for s in [DistanceRelationSet, ContainmentRelationSet]:
            for rel in s.relations:
                rel_scores.append(
                    self.evaluate_trajector_likelihood(trajector, bounding_box, rel, perspective, landmark, step)
                )
                rel_classes.append(rel)

        ori_rel_scores = []
        for rel in OrientationRelationSet.relations:
            p = self.evaluate_trajector_likelihood(trajector, bounding_box, rel, perspective, landmark, step)
            if p > 0:
                ori_rel_scores.append((p, rel))

        if len(ori_rel_scores) > 1:
            assert len(ori_rel_scores) == 2

            dists = []
            for p, rel in ori_rel_scores:
                dists.append([rel(perspective, landmark, trajector).measurement.distance, p, rel])
            dists = sorted(dists)
            dists[0][1] *= dists[0][0] / dists[1][0]

            rel_scores.append(dists[0][1])
            rel_scores.append(dists[1][1])
            rel_classes.append(dists[0][2])
            rel_classes.append(dists[1][2])

        rel_scores = array(rel_scores)
        rel_probabilities = rel_scores / sum(rel_scores)
        index = rel_probabilities.cumsum().searchsorted(random.sample(1))[0]

        return rel_classes[index], rel_probabilities[index], self.get_entropy(rel_probabilities)
Beispiel #12
0
    def sample_relation(self,
                        trajector,
                        bounding_box,
                        perspective,
                        landmark,
                        step=0.02,
                        usebest=False):
        """
        Sample a relation given a trajector and landmark.
        Evaluate each relation and probabilisticaly choose the one that is likely to
        generate the trajector given a landmark.
        """
        rel_probabilities, rel_classes = self.all_relation_probs(
            trajector, bounding_box, perspective, landmark, step)
        if usebest:
            index = index_max(rel_probabilities)
        else:
            index = categorical_sample(rel_probabilities)

        index = rel_probabilities.cumsum().searchsorted(random.sample(1))[0]

        return rel_classes[index], rel_probabilities[index], self.get_entropy(
            rel_probabilities)