def process_image(self, image: Image, timestamp: float) -> None: start_time = time.time() logging.getLogger(__name__).debug( "Processing image at time {0} ...".format(timestamp)) # This is the pose of the previous pose relative to the next one tracking, estimated_motion = self.handle_process_image( self._viso, image, timestamp) logging.getLogger(__name__).debug(" got estimated motion ...") end_time = time.time() frame_result = FrameResult( timestamp=timestamp, image=image.pk, processing_time=end_time - start_time, pose=image.camera_pose, tracking_state=TrackingState.OK if tracking else TrackingState.LOST if self._has_chosen_origin else TrackingState.NOT_INITIALIZED, estimated_motion=estimated_motion, num_matches=self._viso.getNumberOfMatches()) if tracking and not self._has_chosen_origin: # set the intial pose estimate to 0, so we can infer the later ones from the motions self._has_chosen_origin = True frame_result.estimated_pose = tf.Transform() frame_result.estimated_motion = None # This will always be the identity on the first valid frame self._frame_results.append(frame_result) logging.getLogger(__name__).debug(" Processing done.")
def test_profile_constructor_infer_motions(self, ): import cProfile as profile repeats = 3 stats_file = "create_slam_result_motions.prof" random = np.random.RandomState(13) frame_results = [ FrameResult( timestamp=idx + random.normal(0, 0.01), image=image, processing_time=random.uniform(0.01, 1), pose=image.camera_pose, estimated_motion=Transform( (14 + random.normal(0, 1), 0.9 + random.normal(0, 0.05), 0.1 + random.normal(0, 0.05)), tf3d.quaternions.axangle2quat( (1, 2, 4), 5 * np.pi / (2 * self.num_images) + random.normal(0, np.pi / 64)), w_first=True) if idx > (self.num_images // 10) else None, tracking_state=TrackingState.OK, num_features=random.randint(10, 1000), num_matches=random.randint(10, 1000)) for idx, image in enumerate(self.images) ] # Set an initial estimated pose so that the poses will be inferred frame_results[(self.num_images // 10)].estimated_pose = Transform() profile.runctx( "make_trial_result(self.system, self.image_source, frame_results)", locals=locals(), globals=globals(), filename=stats_file)
def process_image(self, image: Image, timestamp: float) -> None: """ Process an image as part of the current run. Should automatically start a new trial if none is currently started. :param image: The image object for this frame :param timestamp: A timestamp or index associated with this image. Sometimes None. :return: void """ if self._input_queue is not None: # Wait here, to throttle the input rate to the queue, and prevent it from growing too large # delay_time = 0 # while self._input_queue.qsize() > 30 and delay_time < 10: # time.sleep(1) # delay_time += 1 logging.getLogger(__name__).debug("Sending frame {0}...".format(len(self._partial_frame_results))) # Add the camera pose to the ground-truth trajectory self._partial_frame_results[timestamp] = FrameResult( timestamp=timestamp, image=image.pk, pose=image.camera_pose ) # Send different input based on the running mode if self.mode == SensorMode.MONOCULAR: self._input_queue.put((image_utils.convert_to_grey(image.pixels), None, timestamp)) elif self.mode == SensorMode.STEREO: self._input_queue.put((image_utils.convert_to_grey(image.left_pixels), image_utils.convert_to_grey(image.right_pixels), timestamp)) elif self.mode == SensorMode.RGBD: self._input_queue.put((image_utils.convert_to_grey(image.pixels), image.depth.astype(np.float32), timestamp))
def make_trials(system: VisionSystem, image_collection: ImageCollection, repeats: int, random: np.random.RandomState): # Get the true motions, for making trials true_motions = [ image_collection.images[frame_idx - 1].camera_pose.find_relative( image_collection.images[frame_idx].camera_pose) if frame_idx > 0 else None for frame_idx in range(len(image_collection)) ] # Make some plausible trial results trial_results = [] for repeat in range(repeats): start_idx = random.randint(0, len(image_collection) - 2) frame_results = [ FrameResult( timestamp=timestamp, image=image, pose=image.camera_pose, processing_time=random.uniform(0.001, 1.0), estimated_motion=true_motions[frame_idx].find_independent( Transform(location=random.normal(0, 1, 3), rotation=t3.quaternions.axangle2quat( random.uniform(-1, 1, 3), random.normal(0, np.pi / 2)), w_first=True)) if frame_idx > start_idx else None, tracking_state=TrackingState.OK if frame_idx > start_idx else TrackingState.NOT_INITIALIZED, num_matches=random.randint(10, 100)) for frame_idx, (timestamp, image) in enumerate(image_collection) ] frame_results[start_idx].estimated_pose = Transform() trial_settings = {'random': random.randint(0, 10), 'repeat': repeat} trial_result = SLAMTrialResult(system=system, image_source=image_collection, success=True, results=frame_results, has_scale=False, settings=trial_settings) trial_result.save() trial_results.append(trial_result) return trial_results
def test_profile(self, _): import cProfile as profile repeats = 3 random = np.random.RandomState(13) # Make some number of trials results to measure trial_results = [] for repeat in range(repeats): frame_results = [ FrameResult( timestamp=idx + random.normal(0, 0.01), image=image, processing_time=random.uniform(0.01, 1), pose=image.camera_pose, estimated_motion=Transform( (14 + random.normal(0, 1), 0.9 + random.normal(0, 0.05), 0.1 + random.normal(0, 0.05)), tf3d.quaternions.axangle2quat( (1, 2, 4), 5 * np.pi / (2 * self.num_images) + random.normal(0, np.pi / 64)), w_first=True ) if idx > 0 else None, tracking_state=TrackingState.OK, num_features=random.randint(10, 1000), num_matches=random.randint(10, 1000) ) for idx, image in enumerate(self.images) ] trial_result = SLAMTrialResult( system=self.system, image_source=self.image_source, success=True, results=frame_results, has_scale=False ) trial_results.append(trial_result) metric = FrameErrorMetric() stats_file = "measure_trials.prof" profile.runctx("metric.measure_results(trial_results)", locals=locals(), globals=globals(), filename=stats_file)
def process_image(self, image: Image, timestamp: float) -> None: """ Process an image as part of the current run. Should automatically start a new trial if none is currently started. :param image: The image object for this frame :param timestamp: A timestamp or index associated with this image. Sometimes None. :return: void """ if self._undistorter is None: raise RuntimeError("Cannot process image, trial has not started yet. Call 'start_trial'") image_data = image_utils.to_uint_image(image_utils.convert_to_grey(image.pixels)) dso_img = self._undistorter.undistort_greyscale(image_data, 0, timestamp, 1.0) self._processing_start_times[timestamp] = time.time() self._system.addActiveFrame(dso_img, self._image_index) self._image_index += 1 self._frame_results[timestamp] = FrameResult( timestamp=timestamp, image=image.pk, pose=image.camera_pose, tracking_state=TrackingState.NOT_INITIALIZED, processing_time=np.nan )