def clear_probability_from_objects(self, target, obj): logging.info('Clearing probability from {}'.format(obj.name)) fusion_engine = self.robot.fusion_engine vb = VariationalBayes() if not hasattr(obj, 'relations'): obj.define_relations() if target is not None: prior = fusion_engine.filters[target].probability likelihood = obj.relations.binary_models['Inside'] mu, sigma, beta = vb.update( measurement='Not Inside', likelihood=likelihood, prior=prior, use_LWIS=False, ) gm = GaussianMixture(beta, mu, sigma) fusion_engine.filters[target].probability = gm else: # Update all but combined filters for name, filter_ in fusion_engine.filters.iteritems(): if name == 'combined': pass else: prior = filter_.probability likelihood = obj.relations.binary_models['Inside'] mu, sigma, beta = vb.update( measurement='Not Inside', likelihood=likelihood, prior=prior, use_LWIS=False, ) gm = GaussianMixture(beta, mu, sigma) filter_.probability = gm
def __init__(self, robot_pose=(0, 0, 0), visible=True, default_color=cnames['yellow'], element_dict={}, min_view_dist=0.3, max_view_dist=1.0): self.element_dict = element_dict # Define nominal viewcone self.min_view_dist = min_view_dist # [m] self.max_view_dist = max_view_dist # [m] self.view_angle = math.pi / 2 # [rad] viewcone_pts = [ (0, 0), (self.max_view_dist * math.cos(self.view_angle / 2), self.max_view_dist * math.sin(self.view_angle / 2)), (self.max_view_dist * math.cos(-self.view_angle / 2), self.max_view_dist * math.sin(-self.view_angle / 2)), (0, 0), ] # Create SoftMax model for camera self.detection_model = camera_model_2D(self.min_view_dist, self.max_view_dist) # Instantiate Sensor superclass object update_rate = 1 # [hz] has_physical_dimensions = True super(Camera, self).__init__(update_rate, has_physical_dimensions) # Set the ideal and actual viewcones self.ideal_viewcone = MapObject( 'Ideal viewcone', viewcone_pts, visible=False, blocks_camera=False, color_str='pink', pose=robot_pose, has_relations=False, centroid_at_origin=False, ) self.viewcone = MapObject( 'Viewcone', viewcone_pts, alpha=0.2, visible=True, blocks_camera=False, color_str='lightyellow', pose=robot_pose, has_relations=False, centroid_at_origin=False, ) self.view_pose = (0, 0, 0) # <>TODO: Add in and test an offset of (-0.1,-0.1) self.offset = (0, 0, 0) # [m] offset (x,y,theta) from center of robot self._move_viewcone(robot_pose) # Set up the VB fusion parameters self.vb = VariationalBayes()
def __init__(self, map_=None, web_interface_topic='python', false_alarm_prob=0.2): self.update_rate = None self.has_physical_dimensions = False self.speed_model = speed_model() self.false_alarm_prob = false_alarm_prob super(Human, self).__init__(self.update_rate, self.has_physical_dimensions) # self.certainties = ['think', 'know'] self.certainties = ['know'] self.positivities = ['is not', 'is'] # <>TODO: oh god wtf why does order matter self.relations = { 'object': [ 'behind', 'in front of', 'left of', 'right of', 'near', ], 'area': ['inside', 'near', 'outside'] } self.movement_types = ['moving', 'stopped'] self.movement_qualities = ['slowly', 'moderately', 'quickly'] self.groundings = {} self.groundings['area'] = map_.areas self.groundings['object'] = {} for cop_name, cop in map_.cops.iteritems(): # if cop.has_relations: self.groundings['object'][cop_name] = cop for object_name, obj in map_.objects.iteritems(): if obj.has_relations: self.groundings['object'][object_name] = obj self.target_names = ['nothing', 'a robot'] + map_.robbers.keys() self.utterance = '' self.new_update = False # Set up the VB fusion parameters self.vb = VariationalBayes() if web_interface_topic != 'python': # Subscribe to web interface print web_interface_topic import rospy from std_msgs.msg import String rospy.Subscriber(web_interface_topic, String, self.callback)