def start(self): bag = open_bag(self._nocam_path) bag_start_time = self._get_start_time(self._cam_path) gripper_event_processor = GripperEventProcessor(bag_start_time) for topic, message, time in bag.read_messages(): model = message_factory.model(message) if model is None: continue gripper_event_processor.update(topic, model, time) bag.close() events = gripper_event_processor.events() for i, event in enumerate(events): self._events.append(event) rospy.logwarn('Event {} of {}'.format(i+1, len(events))) rospy.logwarn('Start time: {}'.format(event.start_time)) rospy.logwarn('Duration: {}'.format(event.duration)) subprocess.call( [ 'rosbag', 'play', self._cam_path, '--topics', topics.KINECT_IMAGE, '--start', str(event.start_time), '--duration', str(event.duration), '--rate=8' ] ) has_prompted = False while self._num_written < i+1: if not has_prompted: rospy.logwarn('Publish the object name to gripper_playback/objects.') has_prompted = True
def process_code((path, object_timeline)): """Extract features from a webcam coding.""" print('Processing', path) bag = open_bag(path) if bag is None: return None code_processor = processors.Code(object_timeline) for topic, message, time in bag.read_messages(): model = message_factory.model(message) if model is None: continue code_processor.update(topic, model, time) bag.close() return code_processor.object_stats(), code_processor.timeline()
def process_experiment((path, object_timeline)): """Extract features of the bag file.""" print('Processing', path) bag = open_bag(path) if bag is None: return None object_stats = ObjectStats() time_taken_processor = processors.TimeTaken(object_timeline) camera_movement_processor = processors.CameraMovementTime(object_timeline) marker_movement_processor = processors.MarkerMovementTime(object_timeline) grasp_count_processor = processors.GraspCount(object_timeline) for topic, message, time in bag.read_messages(topics=EXPERIMENT_TOPICS): model = message_factory.model(message) if model is None: continue time_taken_processor.update(topic, model, time) camera_movement_processor.update(topic, model, time) marker_movement_processor.update(topic, model, time) grasp_count_processor.update(topic, model, time) bag.close() time_taken_processor.update_last() object_stats.update(time_taken_processor.object_stats()) object_stats.update(camera_movement_processor.object_stats()) object_stats.update(marker_movement_processor.object_stats()) object_stats.update(grasp_count_processor.object_stats()) for obj, stats in object_stats.items(): stats['other_time'] = ( stats['time_taken'] - stats['camera_movement_time'] - stats['marker_movement_time'] ) timeline = build_timeline( marker_movement_processor.timeline(), camera_movement_processor.timeline()) return object_stats, timeline