def get_trajectory(td_summary,
                   image_path,
                   start_waypoint_idx,
                   end_waypoint_idx=None):
    semantic_trunks = td_summary['results']['1']['semantic_trunks']
    trunk_points_list = semantic_trunks.values()
    image = cv2.imread(image_path)
    upper_left, lower_right = cv_utils.get_bounding_box(
        image,
        trunk_points_list,
        expand_ratio=config.bounding_box_expand_ratio)
    waypoints_coordinates = []
    for waypoint in trajectory_waypoints[start_waypoint_idx:end_waypoint_idx]:
        point1 = semantic_trunks[waypoint[0]]
        point2 = semantic_trunks[waypoint[1]]
        waypoints_coordinates.append(
            ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2))
    experiment = PathPlanningExperiment(name='path_planning',
                                        data_sources={
                                            'map_image_path': image_path,
                                            'map_upper_left': upper_left,
                                            'map_lower_right': lower_right,
                                            'waypoints': waypoints_coordinates
                                        },
                                        working_dir=execution_dir,
                                        metadata=td_summary['metadata'])
    experiment.run(repetitions=1)
    trajectory = experiment.results[1]['trajectory']
    return trajectory, waypoints_coordinates
def plot_trajectory(td_summary, image_path, trajectory, waypoints_coordinates,
                    start_waypoint_idx, output_file_path):
    semantic_trunks = td_summary['results']['1']['semantic_trunks']
    trunk_points_list = semantic_trunks.values()
    image = cv2.imread(image_path)
    upper_left, lower_right = cv_utils.get_bounding_box(
        image,
        trunk_points_list,
        expand_ratio=config.bounding_box_expand_ratio)
    cropped_image = image[upper_left[1]:lower_right[1],
                          upper_left[0]:lower_right[0]]
    trajectory_image = cv_utils.draw_points_on_image(cropped_image,
                                                     trajectory,
                                                     color=(0, 255, 255),
                                                     radius=5)
    trajectory_image = cv_utils.draw_points_on_image(trajectory_image, [
        tuple(np.array(coordinates) - np.array(upper_left))
        for coordinates in waypoints_coordinates
    ],
                                                     color=(0, 255, 255),
                                                     radius=30)
    label_idx = start_waypoint_idx
    for coordinates in waypoints_coordinates:
        trajectory_image = cv_utils.put_shaded_text_on_image(
            trajectory_image,
            label=chr(label_idx + 65),
            location=tuple(np.array(coordinates) - np.array(upper_left)),
            color=(0, 255, 255),
            offset=(-40, -70))
        label_idx += 1
    cv2.imwrite(os.path.join(output_file_path), trajectory_image)
예제 #3
0
    def task(self, **kwargs):

        launch_rviz = kwargs.get('launch_rviz', False)

        origin_map_image_path = self.data_sources['map_image_path']
        map_semantic_trunks = self.data_sources['map_semantic_trunks']
        scan_bag_path = self.data_sources['scan_bag_path']
        bounding_box_expand_ratio = self.params['bounding_box_expand_ratio']

        # Generate canopies and trunk map images
        map_image = cv2.imread(origin_map_image_path)
        canopies_map_image = maps_generation.generate_canopies_map(map_image)
        upper_left, lower_right = cv_utils.get_bounding_box(
            canopies_map_image,
            map_semantic_trunks.values(),
            expand_ratio=bounding_box_expand_ratio)
        canopies_map_image = canopies_map_image[upper_left[1]:lower_right[1],
                                                upper_left[0]:lower_right[0]]

        canopies_map_yaml_path, _ = ros_utils.save_image_to_map(
            canopies_map_image,
            resolution=self.params['resolution'],
            map_name='canopies_map',
            dir_name=self.repetition_dir)
        cv2.imwrite(os.path.join(self.repetition_dir, 'map_image.jpg'),
                    map_image)

        # Start ROS and RVIZ
        ros_utils.start_master()
        if launch_rviz:
            ros_utils.launch_rviz(
                os.path.join(config.root_dir_path,
                             'src/experiments_framework/framework/amcl.rviz'))

        # Launch localization stack for canopies and trunks
        self._launch_base_to_scan_static_tf(namespace='canopies')
        self._launch_map_server(canopies_map_yaml_path, namespace='canopies')
        self._launch_amcl(namespace='canopies')
        self._launch_icp(namespace='canopies')

        # Start input bag and wait
        _, bag_duration = ros_utils.play_bag(
            scan_bag_path)  # TODO: need to read from data_sources
        time.sleep(bag_duration)

        # Stop recording output bag
        ros_utils.stop_recording_bags()

        # Kill RVIZ
        if launch_rviz:
            ros_utils.kill_rviz()
    def prologue(self):
        origin_map_image_path = self.data_sources['map_image_path']
        map_semantic_trunks = self.data_sources['map_semantic_trunks']
        map_external_trunks = self.data_sources['map_external_trunks']
        localization_semantic_trunks = self.data_sources[
            'localization_semantic_trunks']
        localization_external_trunks = self.data_sources[
            'localization_external_trunks']
        origin_localization_image_path = self.data_sources[
            'localization_image_path']
        trajectory_waypoints = self.data_sources['trajectory_waypoints']
        bounding_box_expand_ratio = self.params['bounding_box_expand_ratio']
        mean_trunk_radius_for_map = self.params['mean_trunk_radius_for_map']
        mean_trunk_radius_for_localization = self.params[
            'mean_trunk_radius_for_localization']
        std_trunk_radius_for_map = self.params['std_trunk_radius_for_map']
        std_trunk_radius_for_localization = self.params[
            'std_trunk_radius_for_localization']

        # Generate canopies and trunks map images
        map_image = cv2.imread(origin_map_image_path)
        cv2.imwrite(os.path.join(self.experiment_dir, 'image_for_map.jpg'),
                    map_image)
        canopies_map_image = maps_generation.generate_canopies_map(map_image)
        trunks_map_image = maps_generation.generate_trunks_map(
            map_image,
            map_semantic_trunks.values() + map_external_trunks,
            mean_trunk_radius_for_map,
            std_trunk_radius_for_map,
            np_random_state=self.np_random_state)
        upper_left, lower_right = cv_utils.get_bounding_box(
            canopies_map_image,
            map_semantic_trunks.values(),
            expand_ratio=bounding_box_expand_ratio)
        canopies_map_image = canopies_map_image[upper_left[1]:lower_right[1],
                                                upper_left[0]:lower_right[0]]
        trunks_map_image = trunks_map_image[upper_left[1]:lower_right[1],
                                            upper_left[0]:lower_right[0]]
        self.canopies_map_yaml_path, _ = ros_utils.save_image_to_map(
            canopies_map_image,
            resolution=self.params['map_resolution'],
            map_name='canopies_map',
            dir_name=self.experiment_dir)
        self.trunks_map_yaml_path, _ = ros_utils.save_image_to_map(
            trunks_map_image,
            resolution=self.params['map_resolution'],
            map_name='trunks_map',
            dir_name=self.experiment_dir)

        # Generate canopies and trunks localization images
        localization_image = cv2.imread(origin_localization_image_path)
        if origin_localization_image_path != origin_map_image_path:
            localization_image, affine_transform = cv_utils.warp_image(
                localization_image,
                localization_semantic_trunks.values(),
                map_semantic_trunks.values(),
                transformation_type='affine')
            affine_transform = np.insert(affine_transform, [2], [0, 0, 1],
                                         axis=0)
            localization_semantic_trunks_np = np.float32(
                localization_semantic_trunks.values()).reshape(-1, 1, 2)
            localization_semantic_trunks_np = cv2.perspectiveTransform(
                localization_semantic_trunks_np, affine_transform)
            localization_semantic_trunks = {
                key: (int(np.round(value[0])), int(np.round(value[1])))
                for key, value in zip(
                    map_semantic_trunks.keys(),
                    localization_semantic_trunks_np[:, 0, :].tolist())
            }
            localization_external_trunks_np = np.float32(
                localization_external_trunks).reshape(-1, 1, 2)
            localization_external_trunks_np = cv2.perspectiveTransform(
                localization_external_trunks_np, affine_transform)
            localization_external_trunks = localization_external_trunks_np[:,
                                                                           0, :].tolist(
                                                                           )
            image_for_trajectory_path = os.path.join(
                self.experiment_dir, 'aligned_image_for_localization.jpg')
            cv2.imwrite(image_for_trajectory_path, localization_image)
        else:
            image_for_trajectory_path = origin_localization_image_path
        canopies_localization_image = maps_generation.generate_canopies_map(
            localization_image)
        trunks_localization_image = maps_generation.generate_trunks_map(
            localization_image,
            localization_semantic_trunks.values() +
            localization_external_trunks,
            mean_trunk_radius_for_localization,
            std_trunk_radius_for_localization,
            np_random_state=self.np_random_state)
        canopies_localization_image = canopies_localization_image[
            upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]]
        trunks_localization_image = trunks_localization_image[
            upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]]
        self.canopies_localization_image_path = os.path.join(
            self.experiment_dir, 'canopies_localization.jpg')
        cv2.imwrite(self.canopies_localization_image_path,
                    canopies_localization_image)
        self.trunks_localization_image_path = os.path.join(
            self.experiment_dir, 'trunks_localization.jpg')
        cv2.imwrite(self.trunks_localization_image_path,
                    trunks_localization_image)
        self.canopies_localization_image_height = canopies_localization_image.shape[
            0]
        self.trunks_localization_image_height = trunks_localization_image.shape[
            0]
        viz_image = cv2.imread(image_for_trajectory_path)[
            upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]]
        self.viz_image_path = os.path.join(self.experiment_dir,
                                           'image_for_visualization.jpg')
        cv2.imwrite(self.viz_image_path, viz_image)

        # Get trajectory
        waypoints_coordinates = []
        for waypoint in trajectory_waypoints:
            if type(waypoint) is tuple and len(waypoint) == 2:
                point1 = localization_semantic_trunks[waypoint[0]]
                point2 = localization_semantic_trunks[waypoint[1]]
                waypoints_coordinates.append(
                    ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2))
            elif type(waypoint) is tuple and len(waypoint) == 6:
                point1 = np.array(
                    localization_semantic_trunks[waypoint[0]]) + np.array(
                        [waypoint[2], waypoint[3]])
                point2 = np.array(
                    localization_semantic_trunks[waypoint[1]]) + np.array(
                        [waypoint[4], waypoint[5]])
                waypoints_coordinates.append(
                    ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2))
            else:
                waypoints_coordinates.append(
                    localization_semantic_trunks[waypoint])
        if path_planning_experiment_summary_path is not None:
            with open(path_planning_experiment_summary_path) as f:
                path_planning_experiment = json.load(f)
            self.results['trajectory'] = path_planning_experiment['results'][
                '1']['trajectory']
        else:
            path_planning_experiment = PathPlanningExperiment(
                name='path_planning',
                data_sources={
                    'map_image_path': image_for_trajectory_path,
                    'map_upper_left': upper_left,
                    'map_lower_right': lower_right,
                    'waypoints': waypoints_coordinates
                },
                working_dir=self.experiment_dir,
                metadata=self.metadata)
            path_planning_experiment.run(repetitions=1)
            self.results['trajectory'] = path_planning_experiment.results[1][
                'trajectory']
        trajectory = self.results['trajectory']
        freq = self.params['target_frequency']
        epsilon_t = 1e-3
        timestamps = np.linspace(start=epsilon_t,
                                 stop=epsilon_t +
                                 (1.0 / freq) * len(trajectory),
                                 num=len(trajectory))
        pose_time_tuples_list = [(x, y, t)
                                 for (x, y), t in zip(trajectory, timestamps)]
        self.trajectory_bag_path = os.path.join(self.experiment_dir,
                                                'trajectory.bag')
        ros_utils.trajectory_to_bag(pose_time_tuples_list,
                                    self.trajectory_bag_path)

        # Generate scan offline
        if canopies_scans_pickle_path is not None:
            self.canopies_scans_pickle_path = canopies_scans_pickle_path
        else:
            self.canopies_scans_pickle_path = os.path.join(
                self.experiment_dir, 'canopies_scan.pkl')
            offline_synthetic_scan_generator.generate_scans_pickle(
                self.trajectory_bag_path,
                canopies_localization_image,
                self.params['min_angle'],
                self.params['max_angle'],
                self.params['samples_num'],
                self.params['min_distance'],
                self.params['max_distance'],
                self.params['localization_resolution'],
                self.params['r_primary_search_samples'],
                self.params['r_secondary_search_step'],
                output_pickle_path=self.canopies_scans_pickle_path)
        if trunks_scans_pickle_path is not None:
            self.trunks_scans_pickle_path = trunks_scans_pickle_path
        else:
            self.trunks_scans_pickle_path = os.path.join(
                self.experiment_dir, 'trunks_scan.pkl')
            offline_synthetic_scan_generator.generate_scans_pickle(
                self.trajectory_bag_path,
                trunks_localization_image,
                self.params['min_angle'],
                self.params['max_angle'],
                self.params['samples_num'],
                self.params['min_distance'],
                self.params['max_distance'],
                self.params['localization_resolution'],
                self.params['r_primary_search_samples'],
                self.params['r_secondary_search_step'],
                output_pickle_path=self.trunks_scans_pickle_path)

        # Calculate valid scans rate
        with open(self.canopies_scans_pickle_path) as f:
            canopies_scans = pickle.load(f)
        with open(self.trunks_scans_pickle_path) as f:
            trunks_scans = pickle.load(f)
        canopies_invalid_scans = np.sum(
            [np.all(np.isnan(scan)) for scan in canopies_scans.values()])
        canopies_total_timestamps = len(canopies_scans.values())
        self.results['canopies_valid_scans_rate'] = float(
            canopies_total_timestamps -
            canopies_invalid_scans) / canopies_total_timestamps
        trunks_invalid_scans = np.sum(
            [np.all(np.isnan(scan)) for scan in trunks_scans.values()])
        trunks_total_timestamps = len(trunks_scans.values())
        self.results['trunks_valid_scans_rate'] = float(
            trunks_total_timestamps -
            trunks_invalid_scans) / trunks_total_timestamps
        execution_dir = utils.create_new_execution_folder(
            'trajectory_tagging_%d' % group_idx)
        group_idx += 1
        with open(markers_locations_json_path) as f:
            markers_locations = json.load(f)
        for key, data_descriptor in snapshots_group.items():
            if relevant_keys is not None:
                if key not in relevant_keys:
                    continue
            points = markers_locations[key]
            image = cv2.imread(data_descriptor.path)
            map_image = maps_generation.generate_canopies_map(image)
            (upper_left_x,
             upper_left_y), (lower_right_x,
                             lower_right_y) = cv_utils.get_bounding_box(
                                 map_image,
                                 points,
                                 expand_ratio=config.bounding_box_expand_ratio)
            map_image = map_image[upper_left_y:lower_right_y,
                                  upper_left_x:lower_right_x]

            if fork_shaped:
                _logger.info('Mark fork-shaped trajectory')
                pose_time_tuples_list = cv_utils.mark_trajectory_on_image(
                    map_image)
                ros_utils.trajectory_to_bag(
                    pose_time_tuples_list,
                    bag_path=os.path.join(execution_dir,
                                          '%s_fork_trajectory.bag' % key))
                ros_utils.downsample_bag(
                    input_bag_path=os.path.join(
                        execution_dir, '%s_fork_trajectory.bag' % key),
    def prologue(self):
        localization_semantic_trunks = self.data_sources[
            'localization_semantic_trunks']
        origin_localization_image_path = self.data_sources[
            'localization_image_path']
        trajectory_waypoints = self.data_sources['trajectory_waypoints']
        bounding_box_expand_ratio = self.params['bounding_box_expand_ratio']
        mean_trunk_radius_for_localization = self.params[
            'mean_trunk_radius_for_localization']
        std_trunk_radius_for_localization = self.params[
            'std_trunk_radius_for_localization']
        localization_external_trunks = self.data_sources[
            'localization_external_trunks']

        # Generate canopies and trunks localization images
        localization_image = cv2.imread(origin_localization_image_path)

        image_for_trajectory_path = origin_localization_image_path

        canopies_localization_image = maps_generation.generate_canopies_map(
            localization_image)

        upper_left, lower_right = cv_utils.get_bounding_box(
            canopies_localization_image,
            localization_semantic_trunks.values(),
            expand_ratio=bounding_box_expand_ratio)

        canopies_localization_image = canopies_localization_image[
            upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]]
        self.canopies_localization_image_path = os.path.join(
            self.experiment_dir, 'canopies_localization.jpg')
        cv2.imwrite(self.canopies_localization_image_path,
                    canopies_localization_image)
        self.canopies_localization_image_height = canopies_localization_image.shape[
            0]
        trunks_localization_image = maps_generation.generate_trunks_map(
            localization_image,
            localization_semantic_trunks.values() +
            localization_external_trunks,
            mean_trunk_radius_for_localization,
            std_trunk_radius_for_localization,
            np_random_state=self.np_random_state)
        trunks_localization_image = trunks_localization_image[
            upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]]
        self.trunks_localization_image_path = os.path.join(
            self.experiment_dir, 'trunks_localization.jpg')
        cv2.imwrite(self.trunks_localization_image_path,
                    trunks_localization_image)
        self.trunks_localization_image_height = trunks_localization_image.shape[
            0]

        # Get trajectory
        waypoints_coordinates = []
        for waypoint in trajectory_waypoints:
            if type(waypoint) is tuple and len(waypoint) == 2:
                point1 = localization_semantic_trunks[waypoint[0]]
                point2 = localization_semantic_trunks[waypoint[1]]
                waypoints_coordinates.append(
                    ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2))
            elif type(waypoint) is tuple and len(waypoint) == 6:
                point1 = np.array(
                    localization_semantic_trunks[waypoint[0]]) + np.array(
                        [waypoint[2], waypoint[3]])
                point2 = np.array(
                    localization_semantic_trunks[waypoint[1]]) + np.array(
                        [waypoint[4], waypoint[5]])
                waypoints_coordinates.append(
                    ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2))
            else:
                waypoints_coordinates.append(
                    localization_semantic_trunks[waypoint])
        path_planning_experiment = PathPlanningExperiment(
            name='path_planning',
            data_sources={
                'map_image_path': image_for_trajectory_path,
                'map_upper_left': upper_left,
                'map_lower_right': lower_right,
                'waypoints': waypoints_coordinates
            },
            working_dir=self.experiment_dir,
            metadata=self.metadata)
        path_planning_experiment.run(repetitions=1)
        self.results['trajectory'] = path_planning_experiment.results[1][
            'trajectory']
        trajectory = self.results['trajectory']
        freq = self.params['target_frequency']
        epsilon_t = 1e-3
        timestamps = np.linspace(start=epsilon_t,
                                 stop=epsilon_t +
                                 (1.0 / freq) * len(trajectory),
                                 num=len(trajectory))
        pose_time_tuples_list = [(x, y, t)
                                 for (x, y), t in zip(trajectory, timestamps)]
        self.trajectory_bag_path = os.path.join(self.experiment_dir,
                                                'trajectory.bag')
        ros_utils.trajectory_to_bag(pose_time_tuples_list,
                                    self.trajectory_bag_path)

        # Generate scan offline
        self.canopies_scans_pickle_path = os.path.join(self.experiment_dir,
                                                       'canopies_scan.pkl')
        offline_synthetic_scan_generator.generate_scans_pickle(
            self.trajectory_bag_path,
            canopies_localization_image,
            self.params['min_angle'],
            self.params['max_angle'],
            self.params['samples_num'],
            self.params['min_distance'],
            self.params['max_distance'],
            self.params['localization_resolution'],
            self.params['r_primary_search_samples'],
            self.params['r_secondary_search_step'],
            output_pickle_path=self.canopies_scans_pickle_path)
        self.trunks_scans_pickle_path = os.path.join(self.experiment_dir,
                                                     'trunks_scan.pkl')
        offline_synthetic_scan_generator.generate_scans_pickle(
            self.trajectory_bag_path,
            trunks_localization_image,
            self.params['min_angle'],
            self.params['max_angle'],
            self.params['samples_num'],
            self.params['min_distance'],
            self.params['max_distance'],
            self.params['localization_resolution'],
            self.params['r_primary_search_samples'],
            self.params['r_secondary_search_step'],
            output_pickle_path=self.trunks_scans_pickle_path)
if __name__ == '__main__':
    execution_dir = utils.create_new_execution_folder(description)
    td_experiment_name = selected_td_experiments[0]
    with open(
            os.path.join(td_results_dir, td_experiment_name,
                         'experiment_summary.json')) as f:
        trunks_detection_summary = json.load(f)
    image_path = trunks_detection_summary['data_sources']
    image_key = trunks_detection_summary['metadata']['image_key']
    semantic_trunks = trunks_detection_summary['results']['1'][
        'semantic_trunks']
    trunk_points_list = semantic_trunks.values()
    image = cv2.imread(image_path)
    upper_left, lower_right = cv_utils.get_bounding_box(
        image,
        trunk_points_list,
        expand_ratio=config.bounding_box_expand_ratio)
    row_labels = np.unique(
        [label.split('/')[0] for label in semantic_trunks.keys()])
    tree_labels = np.unique(
        [label.split('/')[1] for label in semantic_trunks.keys()])
    i = 0
    while i < num_of_trajectories:
        start_label_1 = semantic_trunks.keys()[np.random.randint(
            low=0, high=len(semantic_trunks.keys()))]
        start_label_2 = '/'.join([
            str(int(start_label_1.split('/')[0]) + 1),
            start_label_1.split('/')[1]
        ])
        if start_label_2 not in semantic_trunks.keys():
            continue
    def task(self, **kwargs):

        verbose_mode = kwargs.get('verbose_mode')

        # Read params and data sources
        map_image_path = self.data_sources['map_image_path']
        localization_image_path = self.data_sources['localization_image_path']
        trajectory = self.data_sources['trajectory']
        map_semantic_trunks = self.data_sources['map_semantic_trunks']
        bounding_box_expand_ratio = self.params['bounding_box_expand_ratio']
        roi_size = self.params['roi_size']
        methods = self.params['methods']
        downsample_rate = self.params['downsample_rate']
        localization_resolution = self.params['localization_resolution']
        use_canopies_masks = self.params['use_canopies_masks']

        # Read images
        map_image = cv2.imread(map_image_path)
        localization_image = cv2.imread(localization_image_path)
        upper_left, lower_right = cv_utils.get_bounding_box(
            map_image,
            map_semantic_trunks.values(),
            expand_ratio=bounding_box_expand_ratio)
        map_image = map_image[upper_left[1]:lower_right[1],
                              upper_left[0]:lower_right[0]]
        localization_image = localization_image[upper_left[1]:lower_right[1],
                                                upper_left[0]:lower_right[0]]
        if use_canopies_masks:
            _, map_image = segmentation.extract_canopy_contours(map_image)
            _, localization_image = segmentation.extract_canopy_contours(
                localization_image)
        cv2.imwrite(os.path.join(self.experiment_dir, 'map_image.jpg'),
                    map_image)
        cv2.imwrite(
            os.path.join(self.experiment_dir, 'localization_image.jpg'),
            localization_image)

        # Initialize errors dataframe
        errors = pd.DataFrame(index=map(
            lambda point: '%s_%s' % (point[0], point[1]), trajectory),
                              columns=methods)

        # Loop over points in trajectory
        for ugv_pose_idx, ugv_pose in enumerate(trajectory):
            if ugv_pose_idx % downsample_rate != 0:
                continue
            if ugv_pose_idx % MESSAGING_FREQUENCY == 0:
                _logger.info('At point #%d' % ugv_pose_idx)
            roi_image, _, _ = cv_utils.crop_region(localization_image,
                                                   ugv_pose[0], ugv_pose[1],
                                                   roi_size, roi_size)
            if verbose_mode:
                matches_image = map_image.copy()
                cv2.circle(matches_image,
                           tuple(ugv_pose),
                           radius=15,
                           color=(0, 0, 255),
                           thickness=-1)
                cv2.rectangle(
                    matches_image,
                    (ugv_pose[0] - roi_size / 2, ugv_pose[1] - roi_size / 2),
                    (ugv_pose[0] + roi_size / 2, ugv_pose[1] + roi_size / 2),
                    (0, 0, 255),
                    thickness=2)
            for method in methods:
                matching_result = cv2.matchTemplate(map_image,
                                                    roi_image,
                                                    method=eval('cv2.%s' %
                                                                method))
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(
                    matching_result)
                if method in ['TM_SQDIFF', 'TM_SQDIFF_NORMED']:
                    match_top_left = min_loc
                else:
                    match_top_left = max_loc
                match_bottom_right = (match_top_left[0] + roi_image.shape[1],
                                      match_top_left[1] + roi_image.shape[0])
                match_center = (match_top_left[0] + roi_image.shape[1] / 2,
                                match_top_left[1] + roi_image.shape[0] / 2)
                error = np.sqrt((ugv_pose[0] - match_center[0])**2 +
                                (ugv_pose[1] -
                                 match_center[1])**2) * localization_resolution
                errors.loc['%s_%s' % (ugv_pose[0], ugv_pose[1]),
                           method] = error
                if verbose_mode:
                    cv2.rectangle(matches_image,
                                  match_top_left,
                                  match_bottom_right, (255, 0, 0),
                                  thickness=2)
                    cv2.circle(matches_image,
                               match_center,
                               radius=15,
                               color=(255, 0, 0),
                               thickness=-1)
                    cv2.imwrite(
                        os.path.join(
                            self.repetition_dir,
                            'matches_%s_%s.jpg' % (ugv_pose[0], ugv_pose[1])),
                        matches_image)

        # Save results
        errors.to_csv(os.path.join(self.experiment_dir, 'errors.csv'))
예제 #9
0
metadata_path = r'/home/omer/Downloads/experiment_metadata_baseline.json'

if __name__ == '__main__':

    with open(metadata_path) as f:
        metadata = json.load(f)
    trunks = metadata['results']['1']['trunk_points_list']
    optimized_sigma = metadata['results']['1']['optimized_sigma']

    image = cv2.imread(dji.snapshots_80_meters[image_key].path)
    if viz_mode:
        viz_utils.show_image('image', image)

    trunks = [(int(elem[0]), int(elem[1])) for elem in trunks]
    upper_left, lower_right = cv_utils.get_bounding_box(image,
                                                        trunks,
                                                        expand_ratio=0.1)
    cropped_image = image[upper_left[1]:lower_right[1],
                          upper_left[0]:lower_right[0]]
    trunks = np.array(trunks) - np.array(upper_left)

    if viz_mode:
        viz_utils.show_image('cropped image', cropped_image)

    gaussians = trunks_detection_old_cv.get_gaussians_grid_image(
        trunks,
        optimized_sigma,
        cropped_image.shape[1],
        cropped_image.shape[0],
        scale_factor=0.7,
        square_size_to_sigma_ratio=3,