Example #1
0
def parallel_save_laser_scan_msgs(save_laser_scan_msgs_args):
    bag_file_path, scans_file_path, topic_name = save_laser_scan_msgs_args

    if shared_terminate.value:
        return

    print_info("start : save_laser_scan_msgs {}".format(bag_file_path))
    # noinspection PyBroadException
    try:
        save_laser_scan_msgs(bag_file_path, scans_file_path, topic_name)
    except KeyboardInterrupt:
        print_info(
            "parallel_save_laser_scan_msgs: metrics computation interrupted (bag {})"
            .format(bag_file_path))
        shared_terminate.value = True
    except:
        print_error(
            "parallel_save_laser_scan_msgs: failed metrics computation for bag {}"
            .format(bag_file_path))
        print_error(traceback.format_exc())

    shared_progress.value += 1
    print_info("finish: save_laser_scan_msgs {:3d}% {}".format(
        int(shared_progress.value * 100 / shared_num_runs.value),
        bag_file_path))
    def ps_snapshot_timer_callback(self):
        ps_snapshot_file_path = path.join(
            self.ps_output_folder,
            "ps_{i:08d}.pkl".format(i=self.ps_snapshot_count))

        processes_dicts_list = list()
        for process in self.ps_processes:
            try:
                process_copy = copy.deepcopy(
                    process.as_dict())  # get all information about the process
            except psutil.NoSuchProcess:  # processes may have died, causing this exception to be raised from psutil.Process.as_dict
                continue
            try:
                # delete uninteresting values
                del process_copy['connections']
                del process_copy['memory_maps']
                del process_copy['environ']

                processes_dicts_list.append(process_copy)
            except KeyError:
                pass
        try:
            with open(ps_snapshot_file_path, 'wb') as ps_snapshot_file:
                pickle.dump(processes_dicts_list, ps_snapshot_file)
        except TypeError:
            print_error(traceback.format_exc())

        self.ps_snapshot_count += 1
    def get_result_callback(self, future):
        status = future.result().status

        if status == GoalStatus.STATUS_SUCCEEDED:
            goal_position = self.current_goal.pose.pose.position
            current_position = self.latest_ground_truth_pose_msg.pose.pose.position
            distance_from_goal = np.sqrt(
                (goal_position.x - current_position.x)**2 +
                (goal_position.y - current_position.y)**2)
            if distance_from_goal < self.goal_tolerance:
                self.write_event(self.get_clock().now(), 'target_pose_reached')
                self.goal_succeeded_count += 1
            else:
                print_error(
                    "goal status succeeded but current position farther from goal position than tolerance"
                )
                self.write_event(self.get_clock().now(),
                                 'target_pose_not_reached')
                self.goal_failed_count += 1
        else:
            print_info(
                'navigation action failed with status {}'.format(status))
            self.write_event(self.get_clock().now(), 'target_pose_not_reached')
            self.goal_failed_count += 1

        self.current_goal = None

        # if all goals have been sent end the run, otherwise send the next goal
        if len(self.traversal_path_poses) == 0:
            self.write_event(self.get_clock().now(), 'run_completed')
            rclpy.shutdown()
        else:
            self.send_goal()
def cpu_and_memory_usage_metrics(ps_snapshots_folder_path):
    cpu_and_memory_usage_dict = dict()
    cpu_and_memory_usage_list_dict = defaultdict(list)

    # get list of all ps snapshots
    ps_snapshot_paths_list = sorted(
        glob.glob(path.join(ps_snapshots_folder_path, "ps_*.pkl")))

    for ps_snapshot_path in ps_snapshot_paths_list:
        try:
            with open(ps_snapshot_path, 'rb') as ps_snapshot_file:
                ps_snapshot = pickle.load(ps_snapshot_file)
        except EOFError as e:
            print_error(
                "Could not load pickled ps snapshot. Error: {t} {e}. Pickle file: {f}"
                .format(e=e, t=type(e), f=ps_snapshot_path))
            continue

        for process_info in ps_snapshot:
            process_name = process_info['name']

            cpu_and_memory_usage_list_dict["{p}_uss".format(
                p=process_name)].append(process_info['memory_full_info'].uss)
            cpu_and_memory_usage_list_dict["{p}_rss".format(
                p=process_name)].append(process_info['memory_full_info'].rss)
            cpu_and_memory_usage_list_dict["{p}_accumulated_cpu_time".format(
                p=process_name)].append(process_info['cpu_times'].user +
                                        process_info['cpu_times'].system)

    for metric_name, metric_values in cpu_and_memory_usage_list_dict.items():
        cpu_and_memory_usage_dict[metric_name] = max(
            cpu_and_memory_usage_list_dict[metric_name])

    return cpu_and_memory_usage_dict
Example #5
0
    def __init__(self, map_info_path):

        with open(map_info_path) as map_info_file:
            map_info = yaml.safe_load(map_info_file)

        if path.isabs(map_info['image']):
            map_image_path = map_info['image']
        else:
            map_image_path = path.join(path.dirname(map_info_path), map_info['image'])

        self.map_image = Image.open(map_image_path)
        self.width, self.height = self.map_image.size
        self.map_offset_meters = np.array(map_info['origin'][0:2], dtype=float)

        map_rotation_offset = np.array(map_info['origin'][2:3], dtype=float)
        if map_rotation_offset != 0:
            print_error("convert_grid_map_to_gt_map: map rotation not supported")

        self.resolution = float(map_info['resolution'])  # meter/pixel
        self.map_size_pixels = np.array(self.map_image.size)
        self.map_size_meters = self.map_size_pixels * self.resolution
        self.map_frame_meters = -self.map_offset_meters

        if 'initial_pose' in map_info:
            self.initial_position = map_info['initial_pose']
        else:
            self.initial_position = [0.0, 0.0]

        self._complete_free_voronoi_graph_file_path = path.join(path.dirname(map_info_path), "complete_free_voronoi_graph_cache.pkl")

        self._occupancy_map = None
        self._complete_free_voronoi_graph = None
Example #6
0
def save_laser_scan_msgs(bag_file_path, scans_file_path, topic_name):
    if not path.exists(bag_file_path):
        print_error("file not found: {}".format(bag_file_path))
        return

    if path.exists(scans_file_path):
        print_info("scans file already exists: {}".format(scans_file_path))
        return

    scans_file_dir = path.dirname(scans_file_path)
    if not path.exists(scans_file_dir):
        os.makedirs(scans_file_dir)

    bag = rosbag.Bag(bag_file_path)

    with open(scans_file_path, 'w') as scans_file:
        for _, laser_scan_msg, _ in bag.read_messages(topics=[topic_name]):
            scans_file.write(
                "{t}, {angle_min}, {angle_max}, {angle_increment}, {range_min}, {range_max}, {ranges}\n"
                .format(t=laser_scan_msg.header.stamp.to_sec(),
                        angle_min=laser_scan_msg.angle_min,
                        angle_max=laser_scan_msg.angle_max,
                        angle_increment=laser_scan_msg.angle_increment,
                        range_min=laser_scan_msg.range_min,
                        range_max=laser_scan_msg.range_max,
                        ranges=', '.join(map(str, laser_scan_msg.ranges))))

    bag.close()
    def localization_node_transition_event_callback(
            self, transition_event_msg: lifecycle_msgs.msg.TransitionEvent):
        # send the initial pose as soon as the localization node activates the first time
        if transition_event_msg.goal_state.label == 'active' and not self.localization_node_activated:
            if self.initial_pose is None:
                print_error("initial_pose is still None")
                return

            self.localization_node_activated = True
            self.initial_pose_publisher.publish(self.initial_pose)
Example #8
0
    def shutdown(self):
        if self._launch_task is None:
            print_error(
                "ComponentsLauncher.shutdown: components launcher has not been launched"
            )
            return

        if not self._launch_task.done():
            asyncio.ensure_future(self._launch_service.shutdown(),
                                  loop=self._loop)
            self._loop.run_until_complete(self._launch_task)
Example #9
0
def absolute_localization_error_metrics(estimated_poses_file_path, ground_truth_poses_file_path):

    # check required files exist
    if not path.isfile(estimated_poses_file_path):
        print_error("absolute_localization_error_metrics: estimated_poses file not found {}".format(estimated_poses_file_path))
        return

    if not path.isfile(ground_truth_poses_file_path):
        print_error("absolute_localization_error_metrics: ground_truth_poses file not found {}".format(ground_truth_poses_file_path))
        return

    estimated_poses_df = pd.read_csv(estimated_poses_file_path)
    if len(estimated_poses_df.index) < 2:
        print_error("not enough estimated poses data points")
        return

    ground_truth_poses_df = pd.read_csv(ground_truth_poses_file_path)
    df = compute_ground_truth_interpolated_poses(estimated_poses_df, ground_truth_poses_df)

    # if no matching ground truth data points are found, the metrics con not be computed
    if len(df.index) < 2:
        print_error("no matching ground truth data points were found")
        return

    absolute_error_of_each_segment = np.sqrt((df['x_est'] - df['x_gt'])**2 + (df['y_est'] - df['y_gt'])**2)

    absolute_error_dict = dict()
    absolute_error_dict['sum'] = float(absolute_error_of_each_segment.sum())
    absolute_error_dict['mean'] = float(absolute_error_of_each_segment.mean())
    return absolute_error_dict
Example #10
0
def trajectory_length_metric(ground_truth_poses_file_path):

    # check required files exist
    if not path.isfile(ground_truth_poses_file_path):
        print_error("compute_trajectory_length: ground_truth_poses file not found {}".format(ground_truth_poses_file_path))
        return None

    df = pd.read_csv(ground_truth_poses_file_path)

    squared_deltas = (df[['x', 'y']] - df[['x', 'y']].shift(periods=1)) ** 2  # equivalent to (x_2-x_1)**2, (y_2-y_1)**2, for each row
    sum_of_squared_deltas = squared_deltas['x'] + squared_deltas['y']  # equivalent to (x_2-x_1)**2 + (y_2-y_1)**2, for each row
    euclidean_distance_of_deltas = np.sqrt(sum_of_squared_deltas)  # equivalent to sqrt( (x_2-x_1)**2 + (y_2-y_1)**2 ), for each row
    trajectory_length = euclidean_distance_of_deltas.sum()

    return float(trajectory_length)
    def log(self, event):  # /home/furkan/ds/performance_modelling/output/test_planning/benchmark_log.csv -> writing benchmark_log.csv related event

        if not path.exists(self.benchmark_log_path):
            with open(self.benchmark_log_path, 'a') as output_file:
                output_file.write("timestamp, run_id, event\n")

        t = time.time()

        print_info("t: {t}, run: {run_id}, event: {event}".format(t=t, run_id=self.run_id, event=event))
        try:
            with open(self.benchmark_log_path, 'a') as output_file:
                output_file.write("{t}, {run_id}, {event}\n".format(t=t, run_id=self.run_id, event=event))
        except IOError as e:
            print_error("benchmark_log: could not write event to file: {t}, {run_id}, {event}".format(t=t, run_id=self.run_id, event=event))
            print_error(e)
    def log(self, event):

        if not path.exists(self.benchmark_log_path):
            with open(self.benchmark_log_path, 'a') as output_file:
                output_file.write("timestamp, run_id, event\n")

        t = time.time()

        print_info(f"t: {t}, run: {self.run_id}, event: {event}")
        try:
            with open(self.benchmark_log_path, 'a') as output_file:
                output_file.write(f"{t}, {self.run_id}, {event}\n")
        except IOError as e:
            print_error(
                f"benchmark_log: could not write event to file: {t}, {self.run_id}, {event}"
            )
            print_error(e)
    def goal_response_callback(self, future):
        goal_handle = future.result()

        # if the goal is rejected try with the next goal
        if not goal_handle.accepted:
            print_error('navigation action goal rejected')
            self.write_event(self.get_clock().now(), 'target_pose_rejected')
            self.goal_rejected_count += 1
            self.current_goal = None
            self.send_goal()
            return

        self.write_event(self.get_clock().now(), 'target_pose_accepted')

        self.navigate_to_pose_action_result_future = goal_handle.get_result_async(
        )
        self.navigate_to_pose_action_result_future.add_done_callback(
            self.get_result_callback)
def main(args=None):
    rclpy.init(args=args)

    node = None

    # noinspection PyBroadException
    try:
        node = LocalizationBenchmarkSupervisor()
        node.start_run()
        rclpy.spin(node)

    except KeyboardInterrupt:
        node.ros_shutdown_callback()
    except RunFailException as e:
        print_error(e)
    except Exception:
        print_error(traceback.format_exc())

    finally:
        if node is not None:
            node.end_run()
Example #15
0
def log_packages_and_repos(source_workspace_path, log_dir_path):
    source_workspace_path = path.normpath(
        path.expanduser(source_workspace_path))
    log_dir_path = path.normpath(path.expanduser(log_dir_path))

    if path.isdir(log_dir_path):
        backup_file_if_exists(log_dir_path)
        os.makedirs(log_dir_path)
    else:
        if not path.exists(log_dir_path):
            os.makedirs(log_dir_path)
        else:
            print_error(
                "log_dir_path already exists but is not a directory [{}]".
                format(log_dir_path))
            return False

    source_repos_version_file_path = path.join(log_dir_path,
                                               "source_repos_version")
    source_repos_status_file_path = path.join(log_dir_path,
                                              "source_repos_status")
    installed_packages_version_file_path = path.join(
        log_dir_path, "installed_packages_version")
    ros_packages_file_path = path.join(log_dir_path, "ros_packages")

    if not path.isdir(source_workspace_path):
        print_error(
            "source_workspace_path does not exists or is not a directory [{}]".
            format(source_workspace_path))
        return False

    with open(source_repos_version_file_path,
              'w') as source_repos_version_list_file:
        source_repo_git_path_list = glob.glob(
            path.join(source_workspace_path, "**", ".git"))
        for source_repo_git_path in source_repo_git_path_list:
            source_repo_path = path.abspath(
                path.join(source_repo_git_path, ".."))
            head_hash_output = subprocess.check_output(
                ["git", "rev-parse", "HEAD"], cwd=source_repo_path)
            head_hash = head_hash_output.rstrip().replace('\n', ';')
            source_repos_version_list_file.write("{}, {}\n".format(
                source_repo_path, head_hash))

    with open(source_repos_status_file_path,
              'w') as source_repos_status_list_file:
        source_repo_git_path_list = glob.glob(
            path.join(source_workspace_path, "**", ".git"))
        for source_repo_git_path in source_repo_git_path_list:
            source_repo_path = path.abspath(
                path.join(source_repo_git_path, ".."))
            status_output = subprocess.check_output(["git", "status"],
                                                    cwd=source_repo_path)
            diff_output = subprocess.check_output(["git", "diff"],
                                                  cwd=source_repo_path)
            source_repos_status_list_file.write(
                "STATUS_BEGIN {repo_path}\n{status}STATUS_END\nDIFF_BEGIN {repo_path}\n{diff}\nDIFF_END\n"
                .format(repo_path=source_repo_path,
                        status=status_output,
                        diff=diff_output))

    with open(installed_packages_version_file_path,
              'w') as installed_packages_version_file:
        dpkg_list_output = subprocess.check_output(["dpkg", "-l"])
        installed_packages_version_file.write(dpkg_list_output)

    with open(ros_packages_file_path, 'w') as ros_packages_file:
        rospack_list_output = subprocess.check_output(
            ["rospack", "list"], cwd=source_workspace_path)
        ros_packages_file.write(rospack_list_output)

    return True
Example #16
0
def update_run_parameters(base_run_folder_path):

    base_run_folder = path.expanduser(base_run_folder_path)

    if not path.isdir(base_run_folder):
        print_error(
            "update_run_parameters: base_run_folder does not exists or is not a directory"
            .format(base_run_folder))
        return None, None

    run_folders = sorted(
        list(
            filter(path.isdir,
                   glob.glob(path.abspath(base_run_folder) + '/*'))))

    # collect results from runs not already cached
    print_info("update_run_parameters: reading run data")
    no_output = True
    for i, run_folder in enumerate(run_folders):
        current_run_info_file_path = path.join(run_folder, "run_info.yaml")
        original_run_info_file_path = path.join(run_folder,
                                                "run_info_original.yaml")

        if not path.exists(current_run_info_file_path) and not path.exists(
                original_run_info_file_path):
            print_error(
                "update_run_parameters: run_info file does not exists [{}]".
                format(run_folder))
            no_output = False
            continue

        # backup current run_info if there is not already a backup of the original
        # do not overwrite the original, since it will always be used to create the updated run_info
        if not path.exists(original_run_info_file_path):
            shutil.copyfile(current_run_info_file_path,
                            original_run_info_file_path)

        with open(original_run_info_file_path) as original_run_info_file:
            run_info = yaml.load(original_run_info_file)

        if 'run_parameters' not in run_info:
            print_error(
                "update_run_parameters: run_parameters not in run_info [{}]".
                format(run_folder))
            no_output = False
            continue

        if 'slam_node' not in run_info['run_parameters']:
            print_error(
                "slam_node not in run_info['run_parameters'] [{}]".format(
                    run_folder))
            no_output = False
            continue

        slam_node = run_info['run_parameters']['slam_node']

        # linear_update, angular_update -> linear_angular_update
        if slam_node == 'slam_toolbox':
            slam_config_path = path.join(run_folder,
                                         "components_configuration",
                                         "slam_toolbox",
                                         "slam_toolbox_online_async.yaml")
            if not path.exists(slam_config_path):
                print_error("not path.exists(slam_config_path) [{}]".format(
                    slam_config_path))
                no_output = False
                continue
            slam_config = get_yaml(slam_config_path)
            linear_update = slam_config['minimum_travel_distance']
            angular_update = slam_config['minimum_travel_heading']
            run_info['run_parameters']['linear_angular_update'] = [
                linear_update, angular_update
            ]
        elif slam_node == 'gmapping':
            slam_config_path = path.join(run_folder,
                                         "components_configuration",
                                         "gmapping", "gmapping.yaml")
            if not path.exists(slam_config_path):
                print_error("not path.exists(slam_config_path) [{}]".format(
                    slam_config_path))
                no_output = False
                continue
            slam_config = get_yaml(slam_config_path)
            linear_update = slam_config['linearUpdate']
            angular_update = slam_config['angularUpdate']
            run_info['run_parameters']['linear_angular_update'] = [
                linear_update, angular_update
            ]
        else:
            print_error("slam_node = {}".format(slam_node))
            no_output = False
            continue

        if 'linear_update' in run_info['run_parameters']:
            del run_info['run_parameters']['linear_update']
        if 'angular_update' in run_info['run_parameters']:
            del run_info['run_parameters']['angular_update']

        # remove lidar_model
        if 'lidar_model' in run_info['run_parameters']:
            del run_info['run_parameters']['lidar_model']

        # add goal_tolerance
        nav_config_path = path.join(run_folder, "components_configuration",
                                    "move_base", "move_base_tb3.yaml")
        if not path.exists(nav_config_path):
            print_error("not path.exists(nav_config_path) [{}]".format(
                nav_config_path))
            no_output = False
            continue
        nav_config = get_yaml(nav_config_path)
        xy_goal_tolerance = nav_config['DWAPlannerROS']['xy_goal_tolerance']
        yaw_goal_tolerance = nav_config['DWAPlannerROS']['yaw_goal_tolerance']
        run_info['run_parameters']['goal_tolerance'] = [
            xy_goal_tolerance, yaw_goal_tolerance
        ]

        # add fewer_nav_goals (false if not specified in the configuration)
        sup_config_path = path.join(run_folder, "components_configuration",
                                    "slam_benchmark_supervisor",
                                    "slam_benchmark_supervisor.yaml")
        if not path.exists(sup_config_path):
            print_error("not path.exists(sup_config_path) [{}]".format(
                sup_config_path))
            no_output = False
            continue
        sup_config = get_yaml(sup_config_path)
        if 'fewer_nav_goals' in sup_config:
            fewer_nav_goals = sup_config['fewer_nav_goals']
        else:
            fewer_nav_goals = False
        run_info['run_parameters']['fewer_nav_goals'] = fewer_nav_goals

        with open(current_run_info_file_path, 'w') as current_run_info_file:
            yaml.dump(run_info,
                      current_run_info_file,
                      default_flow_style=False)

        print_info("update_run_parameters: {}%".format(
            int((i + 1) * 100 / len(run_folders))),
                   replace_previous_line=no_output)
        no_output = True
Example #17
0
def execute_grid_benchmark(benchmark_run_object, grid_benchmark_configuration,
                           environment_folders, base_run_folder, num_runs,
                           ignore_executed_params_combinations, shuffle,
                           headless, show_ros_info):

    if not path.exists(base_run_folder):
        os.makedirs(base_run_folder)

    log_file_path = path.join(base_run_folder, "benchmark_log.csv")

    # get list of param combinations already executed in
    executed_params_combinations = defaultdict(int)
    run_folders = sorted(
        list(
            filter(path.isdir,
                   glob.glob(path.abspath(base_run_folder) + '/*'))))
    if not ignore_executed_params_combinations:
        for i, run_folder in enumerate(run_folders):
            run_info_file_path = path.join(run_folder, "run_info.yaml")
            if path.exists(run_info_file_path):
                # noinspection PyBroadException
                try:
                    with open(run_info_file_path) as run_info_file:
                        run_info = yaml.safe_load(run_info_file)
                        params_dict = run_info['run_parameters']
                        params_dict['environment_name'] = path.basename(
                            path.abspath(run_info['environment_folder']))
                        for param_name, param_value in params_dict.items():
                            params_dict[param_name] = tuple(
                                param_value
                            ) if type(
                                param_value
                            ) == list else param_value  # convert any list into a tuple to allow hashing
                        params_hashable_dict = hashable_dict(params_dict)
                        executed_params_combinations[params_hashable_dict] += 1
                except:
                    print_error(traceback.format_exc())

    with open(grid_benchmark_configuration, 'r') as f:
        grid_benchmark_configuration = yaml.safe_load(f)

    if isinstance(grid_benchmark_configuration['combinatorial_parameters'],
                  list):
        combinatorial_parameters_dict_list = grid_benchmark_configuration[
            'combinatorial_parameters']
    elif isinstance(grid_benchmark_configuration['combinatorial_parameters'],
                    dict):
        combinatorial_parameters_dict_list = [
            grid_benchmark_configuration['combinatorial_parameters']
        ]
    else:
        print_error(
            "grid_benchmark_configuration combinatorial_parameters must be a list or dict"
        )
        sys.exit(-1)

    environment_folders_by_name = dict()
    parameters_combinations_dict_list = list()
    for combinatorial_parameters_dict in combinatorial_parameters_dict_list:
        # add environment_name to the parameters and populate the environment_folders_by_name lookup dict
        combinatorial_parameters_dict['environment_name'] = list()
        for environment_folder in environment_folders:
            environment_name = path.basename(path.abspath(environment_folder))
            environment_folders_by_name[environment_name] = environment_folder
            combinatorial_parameters_dict['environment_name'].append(
                environment_name)

        # convert the dict with {parameter_name_1: [parameter_value_1, parameter_value_2], ...}
        # to the list [(parameter_name_1, parameter_value_1), (parameter_name_1, parameter_value_2), ...]
        parameters_alternatives = list()
        for parameter_name, parameter_values_list in combinatorial_parameters_dict.items(
        ):
            parameter_list_of_tuples = list(
                map(lambda parameter_value: (parameter_name, parameter_value),
                    parameter_values_list))
            parameters_alternatives.append(parameter_list_of_tuples)

        # obtain the list of combinations from the list of alternatives
        # ex: itertools.product([(a, 1), (a, 2)], [(b, 0.1), (b, 0.2)]) --> [ [(a, 1), (b, 0.1)], [(a, 1), (b, 0.2)], [(a, 2), (b, 0.1)], [(a, 2), (b, 0.2)] ]
        parameters_combinations_lists = list(
            itertools.product(*parameters_alternatives))

        # convert the list of lists to a list of dicts
        parameters_combinations_dict_list += list(
            map(dict, parameters_combinations_lists))

    remaining_params_combinations = list()
    for parameters_combination_dict in parameters_combinations_dict_list:
        parameters_combination_dict_copy = parameters_combination_dict.copy()
        for param_name, param_value in parameters_combination_dict_copy.items(
        ):
            parameters_combination_dict_copy[param_name] = tuple(
                param_value
            ) if type(
                param_value
            ) == list else param_value  # convert any list into a tuple to allow hashing
        parameters_combination_hashable_dict = hashable_dict(
            parameters_combination_dict_copy)
        executed_repetitions = executed_params_combinations[
            parameters_combination_hashable_dict]
        num_remaining_runs = num_runs - executed_repetitions
        if num_remaining_runs > 0:
            remaining_params_combinations += [parameters_combination_dict
                                              ] * num_remaining_runs
            executed_params_combinations[
                parameters_combination_hashable_dict] += num_remaining_runs  # count this combination in executed_params_combinations in case there are duplicated combinations in parameters_combinations_dict_list

    num_combinations = len(parameters_combinations_dict_list)
    num_runs_remaining = len(remaining_params_combinations)
    num_executed_runs = len(run_folders)
    num_executed_combinations = len(
        list(filter(lambda x: x > 0, executed_params_combinations.values())))

    if ignore_executed_params_combinations:
        print_info("ignoring previous runs")
    else:
        print_info(
            "found {num_executed_runs} executed runs with {num_executed_combinations} params combinations"
            .format(num_executed_runs=num_executed_runs,
                    num_executed_combinations=num_executed_combinations))
    print_info("number of parameter combinations: {}".format(num_combinations))
    print_info("number of repetition runs:        {}".format(num_runs))
    print_info("total number of runs:             {}".format(num_runs *
                                                             num_combinations))
    print_info(
        "remaining number of runs:         {}".format(num_runs_remaining))

    if shuffle:
        # shuffle the remaining params combinations, to avoid executing consecutively the run repetitions with the same combination
        print_info("shuffling remaining params combinations")
        random.shuffle(remaining_params_combinations)
    else:
        print_info("not shuffling remaining params combinations")

    # generate a session id
    session_id = datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S_%f')

    for n_executed_runs, parameters_combination_dict in enumerate(
            remaining_params_combinations):
        environment_folder = environment_folders_by_name[
            parameters_combination_dict['environment_name']]

        # find an available run folder path
        i = 0
        run_folder = path.join(
            base_run_folder,
            "session_{session_id}_run_{run_number:09d}".format(
                session_id=session_id, run_number=i))
        while path.exists(run_folder):
            i += 1
            run_folder = path.join(
                base_run_folder,
                "session_{session_id}_run_{run_number:09d}".format(
                    session_id=session_id, run_number=i))

        print_info(
            "\n\n\nbenchmark: starting run {run_index} ({remaining_runs} remaining)"
            .format(run_index=i,
                    remaining_runs=len(remaining_params_combinations) -
                    n_executed_runs))
        print_info("\tenvironment_folder:", environment_folder)
        print_info("\tparameters_combination_dict:")
        for k, v in parameters_combination_dict.items():
            print_info("\t\t{}: {}".format(k, v))

        # instantiate and execute the run
        # noinspection PyBroadException
        try:
            r = benchmark_run_object(
                run_id=i,
                run_output_folder=run_folder,
                benchmark_log_path=log_file_path,
                environment_folder=environment_folder,
                parameters_combination_dict=parameters_combination_dict,
                benchmark_configuration_dict=grid_benchmark_configuration,
                show_ros_info=show_ros_info,
                headless=headless,
            )

            r.execute_run()

            if r.aborted:
                print_info(
                    "benchmark: run {run_index} aborted".format(run_index=i))
                print_info("benchmark: interrupted")
                sys.exit(0)
            else:
                print_info(
                    "benchmark: run {run_index} completed".format(run_index=i))

        except IOError:
            print_fatal(traceback.format_exc())
        except ValueError:
            print_fatal(traceback.format_exc())
        except ZeroDivisionError:
            print_fatal(traceback.format_exc())
        except SystemExit as system_exit_ex:
            print_info("System exit code: ", system_exit_ex.code)
            sys.exit(0)
        except:
            print_fatal(traceback.format_exc())
            sys.exit(0)

    print_info("benchmark: finished")
Example #18
0
def black_white_to_ground_truth_map(input_map_path,
                                    map_info_path,
                                    trim_borders=False,
                                    occupied_threshold=150,
                                    blur_filter_radius=0,
                                    do_not_recompute=False,
                                    backup_if_exists=False,
                                    map_files_dump_path=None):
    with open(map_info_path) as map_info_file:
        map_info = yaml.safe_load(map_info_file)

    if path.isabs(map_info['image']):
        map_image_path = map_info['image']
    else:
        map_image_path = path.join(path.dirname(map_info_path),
                                   map_info['image'])

    if path.exists(map_image_path) and do_not_recompute:
        print_info("do_not_recompute: will not recompute image {}".format(
            map_image_path))
        return

    input_image = Image.open(input_map_path)

    if input_image.mode != 'RGB':
        # remove alpha channel by pasting on white background
        background = Image.new("RGB", input_image.size,
                               GroundTruthMap.free_rgb)
        background.paste(input_image)
        input_image = background

    # apply a blur filter to reduce artifacts in images that have been saved to lossy formats (may cause the thickness of walls to change)
    if blur_filter_radius > 0:
        input_image = input_image.filter(
            ImageFilter.BoxBlur(blur_filter_radius))

    # saturate all colors below the threshold to black and all values above threshold to the unknown color value (they will be colored to free later)
    # this is needed because some images have been heavily compressed and contain all sort of colors
    threshold_image = input_image.point(
        lambda original_value: GroundTruthMap.occupied
        if original_value < occupied_threshold else GroundTruthMap.unknown)
    threshold_image.save(path.expanduser("~/tmp/threshold_image.pgm"))

    # trim borders not containing black pixels (some simulators ignore non-black borders while placing the pixels in the simulated map and computing its resolution, so they need to be ignored in the following calculations)
    if trim_borders:
        trimmed_image = trim(threshold_image, GroundTruthMap.unknown_rgb)
        trimmed_image.save(path.expanduser("~/tmp/trimmed_image.pgm"))
    else:
        trimmed_image = threshold_image

    map_offset_meters = np.array(map_info['origin'][0:2], dtype=float)

    map_rotation_offset = np.array(map_info['origin'][2:3], dtype=float)
    if map_rotation_offset != 0:
        print_error("convert_grid_map_to_gt_map: map rotation not supported")

    resolution = float(map_info['resolution'])  # meter/pixel
    map_frame_meters = -map_offset_meters

    if 'initial_pose' in map_info:
        initial_position_list = map_info['initial_pose']
    else:
        initial_position_list = [0.0, 0.0]
    initial_position_meters = np.array(initial_position_list, dtype=float)

    w, h = trimmed_image.size
    i_x, i_y = initial_position_pixels = list(
        map(
            int,
            np.array([0, h]) + np.array([1, -1]) *
            (map_frame_meters + initial_position_meters) / resolution))

    if i_x < 0 or i_x >= w or i_y < 0 or i_y >= h:
        print_fatal("initial_position out of map bounds")
        return

    pixels = trimmed_image.load()
    if pixels[i_x, i_y] != GroundTruthMap.unknown_rgb:
        print_fatal("initial position in a wall pixel")
        return

    # rename variable for clarity
    map_image = trimmed_image

    # convert to free the pixels accessible from the initial position
    floodfill(map_image,
              initial_position_pixels,
              GroundTruthMap.free_rgb,
              thresh=10)

    if backup_if_exists:
        backup_file_if_exists(map_image_path)

    try:
        map_image.save(map_image_path)
        if map_files_dump_path is not None:
            print(map_files_dump_path)
            map_files_dump_path = path.abspath(
                path.expanduser(map_files_dump_path))
            print(map_files_dump_path)
            if not path.exists(map_files_dump_path):
                os.makedirs(map_files_dump_path)
            dataset_name = path.basename(
                path.dirname(path.dirname(map_image_path)))
            map_image.save(
                path.join(map_files_dump_path, dataset_name + '.pgm'))
    except IOError:
        print_fatal(
            "Error while saving image {img}:".format(img=map_image_path))
        print_error(traceback.format_exc())
    except TypeError:
        print_fatal(
            "Error while saving image {img}:".format(img=map_image_path))
        print_error(traceback.format_exc())
Example #19
0
            progress * 100 // len(environment_folders), environment_folder))

        map_info_file_path = path.join(environment_folder, "data", "map.yaml")

        # compute GroundTruthMap data from source image
        source_map_file_path = None
        source_pgm_map_file_path = path.join(environment_folder, "data",
                                             "source_map.pgm")
        source_png_map_file_path = path.join(environment_folder, "data",
                                             "source_map.png")
        if path.exists(source_pgm_map_file_path):
            source_map_file_path = source_pgm_map_file_path
        elif path.exists(source_png_map_file_path):
            source_map_file_path = source_png_map_file_path
        else:
            print_error("source_map file not found")

        if source_map_file_path is not None:
            black_white_to_ground_truth_map(
                source_map_file_path,
                map_info_file_path,
                do_not_recompute=not recompute_data,
                map_files_dump_path=dump_path)

        if save_visualization_plots:
            # compute voronoi plot
            robot_radius_plot = 2.5 * 0.2
            voronoi_plot_file_path = path.join(environment_folder, "data",
                                               "visualization", "voronoi.svg")
            reduced_voronoi_plot_file_path = path.join(environment_folder,
                                                       "data", "visualization",
def collect_data(base_run_folder_path, invalidate_cache=False):

    base_run_folder = path.expanduser(base_run_folder_path)
    cache_file_path = path.join(base_run_folder, "run_data_per_waypoint_cache.pkl")

    if not path.isdir(base_run_folder):
        print_error("collect_data: base_run_folder does not exists or is not a directory".format(base_run_folder))
        return None, None

    run_folders = sorted(list(filter(path.isdir, glob.glob(path.abspath(base_run_folder) + '/*'))))

    record_list = list()
    if invalidate_cache or not path.exists(cache_file_path):
        df = pd.DataFrame()
        parameter_names = set()
        cached_run_folders = set()
    else:
        print_info("collect_data: updating cache")
        with open(cache_file_path, 'rb') as f:
            cache = pickle.load(f)
        df = cache['df']
        parameter_names = cache['parameter_names']
        cached_run_folders = set(df['run_folder'])

    # collect results from runs not already cached
    print_info("collect_data: reading run data")
    no_output = True
    for i, run_folder in enumerate(run_folders):
        metric_results_folder = path.join(run_folder, "metric_results")
        run_info_file_path = path.join(run_folder, "run_info.yaml")
        metrics_file_path = path.join(metric_results_folder, "metrics.yaml")

        if not path.exists(metric_results_folder):
            print_error("collect_data: metric_results_folder does not exists [{}]".format(metric_results_folder))
            no_output = False
            continue
        if not path.exists(run_info_file_path):
            print_error("collect_data: run_info file does not exists [{}]".format(run_info_file_path))
            no_output = False
            continue
        if run_folder in cached_run_folders:
            continue

        run_info = get_yaml(run_info_file_path)

        try:
            metrics_dict = get_yaml(metrics_file_path)
        except IOError:
            print_error("metric_results could not be read: {}".format(metrics_file_path))
            no_output = False
            continue

        run_record = dict()

        for parameter_name, parameter_value in run_info['run_parameters'].items():
            parameter_names.add(parameter_name)
            if type(parameter_value) == list:
                parameter_value = tuple(parameter_value)
            run_record[parameter_name] = parameter_value

        # parameter_names.add('environment_name')  # environment name is already inside
        run_record['environment_name'] = path.basename(path.abspath(run_info['environment_folder']))
        run_record['run_folder'] = run_folder


        # collect per waypoint metric results

        euclidean_length_over_voronoi_distance_per_waypoint_dict = dict()
        euclidean_length_over_voronoi_distance_per_waypoint_list = get_yaml_by_path(metrics_dict, ['euclidean_length_over_voronoi_distance'])
        if euclidean_length_over_voronoi_distance_per_waypoint_list is not None:
            for euclidean_length_over_voronoi_distance_per_waypoint in euclidean_length_over_voronoi_distance_per_waypoint_list:
                if euclidean_length_over_voronoi_distance_per_waypoint is not None and 'i_x' and 'i_y' and 'g_x' and 'g_y' in euclidean_length_over_voronoi_distance_per_waypoint:
                    euclidean_length_over_voronoi_distance_per_waypoint_dict[
                        euclidean_length_over_voronoi_distance_per_waypoint[
                            'i_x'], euclidean_length_over_voronoi_distance_per_waypoint[
                            'i_y'], euclidean_length_over_voronoi_distance_per_waypoint[
                            'g_x'], euclidean_length_over_voronoi_distance_per_waypoint[
                            'g_y']] = euclidean_length_over_voronoi_distance_per_waypoint

        planning_time_over_voronoi_distance_per_waypoint_dict = dict()
        planning_time_over_voronoi_distance_per_waypoint_list = get_yaml_by_path(metrics_dict, ['planning_time_over_voronoi_distance'])
        if planning_time_over_voronoi_distance_per_waypoint_list is not None:
            for planning_time_over_voronoi_distance_per_waypoint in planning_time_over_voronoi_distance_per_waypoint_list:
                if planning_time_over_voronoi_distance_per_waypoint is not None and 'i_x' and 'i_y' and 'g_x' and 'g_y' in planning_time_over_voronoi_distance_per_waypoint:
                    planning_time_over_voronoi_distance_per_waypoint_dict[
                        planning_time_over_voronoi_distance_per_waypoint[
                            'i_x'], planning_time_over_voronoi_distance_per_waypoint[
                            'i_y'], planning_time_over_voronoi_distance_per_waypoint[
                            'g_x'], planning_time_over_voronoi_distance_per_waypoint[
                            'g_y']] = planning_time_over_voronoi_distance_per_waypoint

        feasibility_rate_per_waypoint_dict = dict()
        feasibility_rate_per_waypoint_list = get_yaml_by_path(metrics_dict, ['feasibility_rate'])
        if feasibility_rate_per_waypoint_list is not None:
            for feasibility_rate_per_waypoint in feasibility_rate_per_waypoint_list:
                if feasibility_rate_per_waypoint is not None and 'i_x' and 'i_y' and 'g_x' and 'g_y' in feasibility_rate_per_waypoint:
                    feasibility_rate_per_waypoint_dict[
                        feasibility_rate_per_waypoint[
                            'i_x'], feasibility_rate_per_waypoint[
                            'i_y'], feasibility_rate_per_waypoint[
                            'g_x'], feasibility_rate_per_waypoint[
                            'g_y']] = feasibility_rate_per_waypoint

        mean_passage_width_per_waypoint_dict = dict()
        mean_passage_width_per_waypoint_list = get_yaml_by_path(metrics_dict, ['mean_passage_width'])
        if mean_passage_width_per_waypoint_list is not None:
            for mean_passage_width_per_waypoint in mean_passage_width_per_waypoint_list:
                if mean_passage_width_per_waypoint is not None and 'i_x' and 'i_y' and 'g_x' and 'g_y' in mean_passage_width_per_waypoint:
                    mean_passage_width_per_waypoint_dict[
                        mean_passage_width_per_waypoint[
                            'i_x'], mean_passage_width_per_waypoint[
                            'i_y'], mean_passage_width_per_waypoint[
                            'g_x'], mean_passage_width_per_waypoint[
                            'g_y']] = mean_passage_width_per_waypoint

        mean_normalized_passage_width_per_waypoint_dict = dict()
        mean_normalized_passage_width_per_waypoint_list = get_yaml_by_path(metrics_dict, ['mean_normalized_passage_width'])
        if mean_normalized_passage_width_per_waypoint_list is not None:
            for mean_normalized_passage_width_per_waypoint in mean_normalized_passage_width_per_waypoint_list:
                if mean_normalized_passage_width_per_waypoint is not None and 'i_x' and 'i_y' and 'g_x' and 'g_y' in mean_normalized_passage_width_per_waypoint:
                    mean_normalized_passage_width_per_waypoint_dict[
                        mean_normalized_passage_width_per_waypoint[
                            'i_x'], mean_normalized_passage_width_per_waypoint[
                            'i_y'], mean_normalized_passage_width_per_waypoint[
                            'g_x'], mean_normalized_passage_width_per_waypoint[
                            'g_y']] = mean_normalized_passage_width_per_waypoint

        minimum_passage_width_per_waypoint_dict = dict()
        minimum_passage_width_per_waypoint_list = get_yaml_by_path(metrics_dict, ['minimum_passage_width'])
        if minimum_passage_width_per_waypoint_list is not None:
            for minimum_passage_width_per_waypoint in minimum_passage_width_per_waypoint_list:
                if minimum_passage_width_per_waypoint is not None and 'i_x' and 'i_y' and 'g_x' and 'g_y' in minimum_passage_width_per_waypoint:
                    minimum_passage_width_per_waypoint_dict[
                        minimum_passage_width_per_waypoint[
                            'i_x'], minimum_passage_width_per_waypoint[
                            'i_y'], minimum_passage_width_per_waypoint[
                            'g_x'], minimum_passage_width_per_waypoint[
                            'g_y']] = minimum_passage_width_per_waypoint

        for waypoint_position in minimum_passage_width_per_waypoint_dict.keys():
            run_record_per_waypoint = run_record.copy()

            run_record_per_waypoint['i_x'] = waypoint_position[0]   # position i_x
            run_record_per_waypoint['i_y'] = waypoint_position[1]   # position i_y
            run_record_per_waypoint['g_x'] = waypoint_position[2]   # position g_x
            run_record_per_waypoint['g_y'] = waypoint_position[3]   # position g_y

            all_euclidean_length_over_voronoi_distance_metrics = get_yaml_by_path(euclidean_length_over_voronoi_distance_per_waypoint_dict, [waypoint_position])
            if all_euclidean_length_over_voronoi_distance_metrics is not None:
                for euclidean_length_over_voronoi_distance_metric_name, euclidean_length_over_voronoi_distance_metric_value in all_euclidean_length_over_voronoi_distance_metrics.items():
                    run_record_per_waypoint['euclidean_length_over_voronoi_distance_' + euclidean_length_over_voronoi_distance_metric_name] = euclidean_length_over_voronoi_distance_metric_value

            all_planning_time_over_voronoi_distance_metrics = get_yaml_by_path(planning_time_over_voronoi_distance_per_waypoint_dict, [waypoint_position])
            if all_planning_time_over_voronoi_distance_metrics is not None:
                for planning_time_over_voronoi_distance_metric_name, planning_time_over_voronoi_distance_metric_value in all_planning_time_over_voronoi_distance_metrics.items():
                    run_record_per_waypoint['planning_time_over_voronoi_distance_' + planning_time_over_voronoi_distance_metric_name] = planning_time_over_voronoi_distance_metric_value

            all_feasibility_rate_metrics = get_yaml_by_path(feasibility_rate_per_waypoint_dict, [waypoint_position])
            if all_feasibility_rate_metrics is not None:
                for feasibility_rate_metric_name, feasibility_rate_metric_value in all_feasibility_rate_metrics.items():
                    run_record_per_waypoint['feasibility_rate_' + feasibility_rate_metric_name] = feasibility_rate_metric_value

            all_mean_passage_width_metrics = get_yaml_by_path(mean_passage_width_per_waypoint_dict, [waypoint_position])
            if all_mean_passage_width_metrics is not None:
                for mean_passage_width_metric_name, mean_passage_width_metric_value in all_mean_passage_width_metrics.items():
                    run_record_per_waypoint['mean_passage_width_' + mean_passage_width_metric_name] = mean_passage_width_metric_value

            all_mean_normalized_passage_width_metrics = get_yaml_by_path(mean_normalized_passage_width_per_waypoint_dict, [waypoint_position])
            if all_mean_normalized_passage_width_metrics is not None:
                for mean_normalized_passage_width_metric_name, mean_normalized_passage_width_metric_value in all_mean_normalized_passage_width_metrics.items():
                    run_record_per_waypoint[
                        'mean_normalized_passage_width_' + mean_normalized_passage_width_metric_name] = mean_normalized_passage_width_metric_value

            minimum_passage_width_metrics = get_yaml_by_path(minimum_passage_width_per_waypoint_dict, [waypoint_position])
            if minimum_passage_width_metrics is not None:
                for minimum_passage_width_metrics_name, minimum_passage_width_metric_value in minimum_passage_width_metrics.items():
                    run_record_per_waypoint['minimum_passage_width_' + minimum_passage_width_metrics_name] = minimum_passage_width_metric_value

            record_list.append(run_record_per_waypoint)

        print_info("collect_data: reading run data: {}% {}/{} {}".format(int((i + 1)*100/len(run_folders)), i, len(run_folders), path.basename(run_folder)), replace_previous_line=no_output)
        no_output = True

    df = pd.DataFrame(record_list)

    # save cache
    if cache_file_path is not None:
        cache = {'df': df, 'parameter_names': parameter_names}
        with open(cache_file_path, 'wb') as f:
            pickle.dump(cache, f, protocol=2)

    return df, parameter_names
Example #21
0
def relative_localization_error_metrics(log_output_folder, estimated_poses_file_path, ground_truth_poses_file_path, alpha=0.99, max_error=0.02, compute_sequential_relations=False):
    """
    Generates the ordered and the random relations files and computes the metrics
    """

    if not path.exists(log_output_folder):
        os.makedirs(log_output_folder)

    # check required files exist
    if not path.isfile(estimated_poses_file_path):
        print_error("compute_relative_localization_error: estimated_poses file not found {}".format(estimated_poses_file_path))
        return

    if not path.isfile(ground_truth_poses_file_path):
        print_error("compute_relative_localization_error: ground_truth_poses file not found {}".format(ground_truth_poses_file_path))
        return

    relative_errors_dict = dict()

    # find the metricEvaluator executable
    metric_evaluator_exec_path = path.join(path.dirname(path.abspath(__file__)), "metricEvaluator", "metricEvaluator")

    estimated_poses_df = pd.read_csv(estimated_poses_file_path)
    if len(estimated_poses_df.index) < 2:
        print_error("not enough estimated poses data points")
        return

    ground_truth_poses_df = pd.read_csv(ground_truth_poses_file_path)
    interpolated_ground_truth_df = compute_ground_truth_interpolated_poses(estimated_poses_df, ground_truth_poses_df)

    # if no matching ground truth data points are found, the metrics con not be computed
    if len(interpolated_ground_truth_df.index) < 2:
        print_error("no matching ground truth data points were found")
        return

    # convert estimated_poses_file to the CARMEN format
    estimated_poses_carmen_file_path = path.join(log_output_folder, "estimated_poses_carmen_format")
    with open(estimated_poses_carmen_file_path, "w") as estimated_poses_carmen_file:
        for index, row in estimated_poses_df.iterrows():
            estimated_poses_carmen_file.write("FLASER 0 0.0 0.0 0.0 {x} {y} {theta} {t}\n".format(x=row['x'], y=row['y'], theta=row['theta'], t=row['t']))

    # random relations
    relations_re_file_path = path.join(log_output_folder, "re_relations")
    with open(relations_re_file_path, "w") as relations_file_re:

        n_samples = 500
        for _ in range(n_samples):
            two_random_poses = interpolated_ground_truth_df[['t', 'x_gt', 'y_gt', 'theta_gt']].sample(n=2)
            t_1, x_1, y_1, theta_1 = two_random_poses.iloc[0]
            t_2, x_2, y_2, theta_2 = two_random_poses.iloc[1]

            # reorder data so that t_1 is before t_2
            if t_1 > t_2:
                # swap 1 and 2
                t_1, x_1, y_1, theta_1, t_2, x_2, y_2, theta_2 = t_2, x_2, y_2, theta_2, t_1, x_1, y_1, theta_1

            rel = get_matrix_diff((x_1, y_1, theta_1), (x_2, y_2, theta_2))

            x_relative = rel[0, 3]
            y_relative = rel[1, 3]
            theta_relative = math.atan2(rel[1, 0], rel[0, 0])

            relations_file_re.write("{t_1} {t_2} {x} {y} 0.000000 0.000000 0.000000 {theta}\n".format(t_1=t_1, t_2=t_2, x=x_relative, y=y_relative, theta=theta_relative))

    # Run the metric evaluator on this relations file, read the sample standard deviation and exploit it to rebuild a better sample

    # Compute translational sample size
    summary_t_file_path = path.join(log_output_folder, "summary_t_errors")
    metric_evaluator(exec_path=metric_evaluator_exec_path,
                     poses_path=estimated_poses_carmen_file_path,
                     relations_path=relations_re_file_path,
                     weights="{1, 1, 1, 0, 0, 0}",
                     log_path=path.join(log_output_folder, "summary_t.log"),
                     errors_path=summary_t_file_path)

    error_file = open(summary_t_file_path, "r")
    content = error_file.readlines()
    words = content[1].split(", ")
    std = float(words[1])
    var = math.pow(std, 2)
    z_a_2 = t.ppf(alpha, n_samples - 1)
    delta = max_error
    n_samples_t = int(math.pow(z_a_2, 2) * var / math.pow(delta, 2))

    # Compute rotational sample size
    summary_r_file_path = path.join(log_output_folder, "summary_r_errors")
    metric_evaluator(exec_path=metric_evaluator_exec_path,
                     poses_path=estimated_poses_carmen_file_path,
                     relations_path=relations_re_file_path,
                     weights="{0, 0, 0, 1, 1, 1}",
                     log_path=path.join(log_output_folder, "summary_r.log"),
                     errors_path=summary_r_file_path)

    error_file = open(summary_r_file_path, "r")
    content = error_file.readlines()
    words = content[1].split(", ")
    std = float(words[1])
    var = math.pow(std, 2)
    z_a_2 = t.ppf(alpha, n_samples - 1)
    delta = max_error
    n_samples_r = int(math.pow(z_a_2, 2) * var / math.pow(delta, 2))

    # Select the biggest of the two
    n_samples = max(n_samples_t, n_samples_r)
    if n_samples < 10:
        print_error("n_samples too low", n_samples, n_samples_t, n_samples_r)
        return

    with open(relations_re_file_path, "w") as relations_file_re:
        for _ in range(n_samples):
            two_random_poses = interpolated_ground_truth_df[['t', 'x_gt', 'y_gt', 'theta_gt']].sample(n=2)
            t_1, x_1, y_1, theta_1 = two_random_poses.iloc[0]
            t_2, x_2, y_2, theta_2 = two_random_poses.iloc[1]

            # reorder data so that t_1 is before t_2
            if t_1 > t_2:
                # swap 1 and 2
                t_1, x_1, y_1, theta_1, t_2, x_2, y_2, theta_2 = t_2, x_2, y_2, theta_2, t_1, x_1, y_1, theta_1

            rel = get_matrix_diff((x_1, y_1, theta_1), (x_2, y_2, theta_2))

            x_relative = rel[0, 3]
            y_relative = rel[1, 3]
            theta_relative = math.atan2(rel[1, 0], rel[0, 0])

            relations_file_re.write("{t_1} {t_2} {x} {y} 0.000000 0.000000 0.000000 {theta}\n".format(t_1=t_1, t_2=t_2, x=x_relative, y=y_relative, theta=theta_relative))

    relative_errors_dict['random_relations'] = dict()

    metric_evaluator_re_t_results_csv_path = path.join(log_output_folder, "re_t.csv")
    metric_evaluator(exec_path=metric_evaluator_exec_path,
                     poses_path=estimated_poses_carmen_file_path,
                     relations_path=relations_re_file_path,
                     weights="{1, 1, 1, 0, 0, 0}",
                     log_path=path.join(log_output_folder, "re_t.log"),
                     errors_path=metric_evaluator_re_t_results_csv_path,
                     unsorted_errors_path=path.join(log_output_folder, "re_t_unsorted_errors"))

    metric_evaluator_re_t_results_df = pd.read_csv(metric_evaluator_re_t_results_csv_path, sep=', ', engine='python')
    relative_errors_dict['random_relations']['translation'] = dict()
    relative_errors_dict['random_relations']['translation']['mean'] = float(metric_evaluator_re_t_results_df['Mean'][0])
    relative_errors_dict['random_relations']['translation']['std'] = float(metric_evaluator_re_t_results_df['Std'][0])
    relative_errors_dict['random_relations']['translation']['min'] = float(metric_evaluator_re_t_results_df['Min'][0])
    relative_errors_dict['random_relations']['translation']['max'] = float(metric_evaluator_re_t_results_df['Max'][0])
    relative_errors_dict['random_relations']['translation']['n'] = float(metric_evaluator_re_t_results_df['NumMeasures'][0])

    metric_evaluator_re_r_results_csv_path = path.join(log_output_folder, "re_r.csv")
    metric_evaluator(exec_path=metric_evaluator_exec_path,
                     poses_path=estimated_poses_carmen_file_path,
                     relations_path=relations_re_file_path,
                     weights="{0, 0, 0, 1, 1, 1}",
                     log_path=path.join(log_output_folder, "re_r.log"),
                     errors_path=metric_evaluator_re_r_results_csv_path,
                     unsorted_errors_path=path.join(log_output_folder, "re_r_unsorted_errors"))

    metric_evaluator_re_r_results_df = pd.read_csv(metric_evaluator_re_r_results_csv_path, sep=', ', engine='python')
    relative_errors_dict['random_relations']['rotation'] = dict()
    relative_errors_dict['random_relations']['rotation']['mean'] = float(metric_evaluator_re_r_results_df['Mean'][0])
    relative_errors_dict['random_relations']['rotation']['std'] = float(metric_evaluator_re_r_results_df['Std'][0])
    relative_errors_dict['random_relations']['rotation']['min'] = float(metric_evaluator_re_r_results_df['Min'][0])
    relative_errors_dict['random_relations']['rotation']['max'] = float(metric_evaluator_re_r_results_df['Max'][0])
    relative_errors_dict['random_relations']['rotation']['n'] = float(metric_evaluator_re_r_results_df['NumMeasures'][0])

    # sequential relations
    if compute_sequential_relations:
        ordered_relations_file_path = path.join(log_output_folder, "ordered_relations")
        with open(ordered_relations_file_path, "w") as relations_file_ordered:

            idx_delta = int(len(interpolated_ground_truth_df.index)/n_samples)
            if idx_delta == 0:
                idx_delta = 1

            for idx, first_row in interpolated_ground_truth_df.iloc[::idx_delta].iloc[0: -1].iterrows():
                second_row = interpolated_ground_truth_df.iloc[idx + idx_delta]

                rel = get_matrix_diff((first_row['x_gt'], first_row['y_gt'], first_row['theta_gt']), (second_row['x_gt'], second_row['y_gt'], second_row['theta_gt']))
                x_relative = rel[0, 3]
                y_relative = rel[1, 3]
                theta_relative = math.atan2(rel[1, 0], rel[0, 0])

                relations_file_ordered.write("{t_1} {t_2} {x} {y} 0.000000 0.000000 0.000000 {theta}\n".format(t_1=first_row['t'], t_2=second_row['t'], x=x_relative, y=y_relative, theta=theta_relative))

        relative_errors_dict['sequential_relations'] = dict()

        metric_evaluator_ordered_t_results_csv_path = path.join(log_output_folder, "ordered_t.csv")
        metric_evaluator(exec_path=metric_evaluator_exec_path,
                         poses_path=estimated_poses_carmen_file_path,
                         relations_path=ordered_relations_file_path,
                         weights="{1, 1, 1, 0, 0, 0}",
                         log_path=path.join(log_output_folder, "ordered_t.log"),
                         errors_path=metric_evaluator_ordered_t_results_csv_path,
                         unsorted_errors_path=path.join(log_output_folder, "ordered_t_unsorted_errors"))

        metric_evaluator_ordered_t_results_df = pd.read_csv(metric_evaluator_ordered_t_results_csv_path, sep=', ', engine='python')
        relative_errors_dict['sequential_relations']['translation'] = dict()
        relative_errors_dict['sequential_relations']['translation']['mean'] = float(metric_evaluator_ordered_t_results_df['Mean'][0])
        relative_errors_dict['sequential_relations']['translation']['std'] = float(metric_evaluator_ordered_t_results_df['Std'][0])
        relative_errors_dict['sequential_relations']['translation']['min'] = float(metric_evaluator_ordered_t_results_df['Min'][0])
        relative_errors_dict['sequential_relations']['translation']['max'] = float(metric_evaluator_ordered_t_results_df['Max'][0])
        relative_errors_dict['sequential_relations']['translation']['n'] = float(metric_evaluator_ordered_t_results_df['NumMeasures'][0])

        metric_evaluator_ordered_r_results_csv_path = path.join(log_output_folder, "ordered_r.csv")
        metric_evaluator(exec_path=metric_evaluator_exec_path,
                         poses_path=estimated_poses_carmen_file_path,
                         relations_path=ordered_relations_file_path,
                         weights="{0, 0, 0, 1, 1, 1}",
                         log_path=path.join(log_output_folder, "ordered_r.log"),
                         errors_path=metric_evaluator_ordered_r_results_csv_path,
                         unsorted_errors_path=path.join(log_output_folder, "ordered_r_unsorted_errors"))

        metric_evaluator_ordered_r_results_df = pd.read_csv(metric_evaluator_ordered_r_results_csv_path, sep=', ', engine='python')
        relative_errors_dict['sequential_relations']['rotation'] = dict()
        relative_errors_dict['sequential_relations']['rotation']['mean'] = float(metric_evaluator_ordered_r_results_df['Mean'][0])
        relative_errors_dict['sequential_relations']['rotation']['std'] = float(metric_evaluator_ordered_r_results_df['Std'][0])
        relative_errors_dict['sequential_relations']['rotation']['min'] = float(metric_evaluator_ordered_r_results_df['Min'][0])
        relative_errors_dict['sequential_relations']['rotation']['max'] = float(metric_evaluator_ordered_r_results_df['Max'][0])
        relative_errors_dict['sequential_relations']['rotation']['n'] = float(metric_evaluator_ordered_r_results_df['NumMeasures'][0])

    return relative_errors_dict
Example #22
0
def absolute_error_vs_scan_range(estimated_poses_file_path, ground_truth_poses_file_path, scans_file_path, samples_per_second=1.0):

    # check required files exist
    if not path.isfile(estimated_poses_file_path):
        print_error("absolute_localization_error_metrics: estimated_poses file not found {}".format(estimated_poses_file_path))
        return

    if not path.isfile(ground_truth_poses_file_path):
        print_error("absolute_localization_error_metrics: ground_truth_poses file not found {}".format(ground_truth_poses_file_path))
        return

    if not path.isfile(scans_file_path):
        print_error("absolute_localization_error_metrics: scans_file_path file not found {}".format(scans_file_path))
        return

    start_time = time.time()
    estimated_poses_df = pd.read_csv(estimated_poses_file_path)
    if len(estimated_poses_df.index) < 2:
        print_error("not enough estimated poses data points")
        return
    print_info("estimated_poses_df", time.time() - start_time)

    with open(scans_file_path) as scans_file:
        scan_lines = scans_file.read().split('\n')

    start_time = time.time()
    scans_df = pd.DataFrame(columns=['t', 'min_range', 'max_range', 'median_range', 'num_valid_ranges'])
    for scan_line in scan_lines:
        scan_fields = scan_line.split(', ')
        if len(scan_fields) > 1:
            t, angle_min, angle_max, angle_increment, range_min, range_max = map(float, scan_fields[0:6])
            ranges = map(float, scan_fields[6:])
            num_valid_ranges = sum(map(lambda r: range_min < r < range_max, ranges))
            record = {
                't': t,
                'min_range': min(ranges),
                'max_range': max(ranges),
                'median_range': np.median(ranges),
                'num_valid_ranges': num_valid_ranges,
                'sensor_range_min': range_min,
                'sensor_range_max': range_max,
                'num_ranges': len(ranges),
                'angle_increment': angle_increment,
                'fov_rad': angle_max - angle_min
            }
            scans_df = scans_df.append(record, ignore_index=True)
    print_info("scans_df", time.time() - start_time)

    # print(scans_df)

    start_time = time.time()
    ground_truth_poses_df = pd.read_csv(ground_truth_poses_file_path)
    print_info("ground_truth_poses_df", time.time() - start_time)

    start_time = time.time()
    ground_truth_interpolated_poses_file_path = path.splitext(estimated_poses_file_path)[0] + "ground_truth_interpolated.csv"
    if path.exists(ground_truth_interpolated_poses_file_path):
        complete_trajectory_df = pd.read_csv(ground_truth_interpolated_poses_file_path)
    else:
        complete_trajectory_df = compute_ground_truth_interpolated_poses(estimated_poses_df, ground_truth_poses_df)
        complete_trajectory_df.to_csv(ground_truth_interpolated_poses_file_path)

    run_duration = ground_truth_poses_df.iloc[-1]['t'] - ground_truth_poses_df.iloc[0]['t']
    resample_n = int(run_duration*samples_per_second)
    resample_ratio = max(1, len(complete_trajectory_df) // resample_n)
    trajectory_df = complete_trajectory_df.iloc[::resample_ratio].copy(deep=False)
    trajectory_df['abs_err'] = np.sqrt((trajectory_df['x_est'] - trajectory_df['x_gt'])**2 + (trajectory_df['y_est'] - trajectory_df['y_gt'])**2)
    # print(trajectory_df)
    print_info("trajectory_df", time.time() - start_time)

    start_time = time.time()
    merge_tolerance = 0.25
    tolerance = pd.Timedelta('{}s'.format(merge_tolerance))
    trajectory_df['t_datetime'] = pd.to_datetime(trajectory_df['t'], unit='s')
    scans_df['t_datetime'] = pd.to_datetime(scans_df['t'], unit='s')
    near_matches_df = pd.merge_asof(
        left=scans_df,
        right=trajectory_df,
        on='t_datetime',
        direction='nearest',
        tolerance=tolerance,
        suffixes=('_scan', '_gt')
    )
    trajectory_and_scan_df = near_matches_df[(pd.notnull(near_matches_df['t_scan'])) & (pd.notnull(near_matches_df['t_gt']))].copy(deep=False)
    print_info("trajectory_and_scan_df", time.time() - start_time)

    # import matplotlib.pyplot as plt
    # plt.scatter(trajectory_and_scan_df['min_range'], trajectory_and_scan_df['abs_err'])  # , color='red', s=4.0, marker='o')
    # plt.xlabel("min_range")
    # plt.ylabel("abs_err")
    # plt.show()
    # plt.cla()
    #
    # plt.plot(trajectory_and_scan_df['t_gt'], trajectory_and_scan_df['abs_err'])
    # plt.plot(trajectory_and_scan_df['t_scan'], trajectory_and_scan_df['min_range']/trajectory_and_scan_df['sensor_range_max'])
    # plt.plot(trajectory_and_scan_df['t_scan'], trajectory_and_scan_df['num_valid_ranges']/trajectory_and_scan_df['num_ranges'])
    # plt.legend()
    # plt.show()
    # plt.cla()

    return trajectory_and_scan_df
    def __init__(self, run_id, run_output_folder, benchmark_log_path, environment_folder, parameters_combination_dict, benchmark_configuration_dict, show_ros_info, headless):
        # print("PRINT:\n")
        # print(run_id,'\n')                        #exmaple: 0
        # print(run_output_folder,'\n')             #exmaple: /home/furkan/ds/performance_modelling/output/test_planning/session_2020-09-30_17-09-01_964405_run_000000000
        # print(benchmark_log_path,'\n')            #exmaple: /home/furkan/ds/performance_modelling/output/test_planning/benchmark_log.csv
        # print(environment_folder,'\n')            #exmaple: /home/furkan/ds/performance_modelling/test_datasets/dataset/airlab
        # print(parameters_combination_dict,'\n')   #exmaple: {'use_dijkstra': True, 'environment_name': 'airlab', 'global_planner_name': 'GlobalPlanner'}
        # print(benchmark_configuration_dict,'\n')  #exmaple: {'components_configurations_folder': '~/turtlebot3_melodic_ws/src/global_planning_performance_modelling/config/component_configurations',
        #                                           #          'supervisor_component': 'global_planning_benchmark_supervisor', 
        #                                           #          'components_configuration': {'move_base': {'GlobalPlanner': 'move_base/globalPlanner.yaml'}, 
        #                                           #                                       'supervisor': 'global_planning_benchmark_supervisor/global_planning_benchmark_supervisor.yaml', 
        #                                           #                                       'rviz': 'rviz/default_view.rviz'}, 
        #                                           #          'combinatorial_parameters': {'use_dijkstra': [True, False], 'environment_name': ['airlab'], 'global_planner_name': ['GlobalPlanner']}} 
        # print(show_ros_info,'\n')                 #exmaple: False
        # print(headless,'\n')                      #exmaple: False 

        # run configuration
        self.run_id = run_id
        self.run_output_folder = run_output_folder
        self.benchmark_log_path = benchmark_log_path
        self.run_parameters = parameters_combination_dict
        self.benchmark_configuration = benchmark_configuration_dict
        self.components_ros_output = 'screen' if show_ros_info else 'log'
        self.headless = headless

        # environment parameters
        self.environment_folder = environment_folder
        self.map_info_file_path = path.join(environment_folder, "data", "map.yaml")

        # take run parameters from parameters_combination_dictionary
        global_planner_name = self.run_parameters['global_planner_name']

        if global_planner_name == 'GlobalPlanner':
            robot_kinematic = self.run_parameters['robot_kinematic']
            robot_radius = self.run_parameters['robot_radius']
            robot_major_radius = self.run_parameters['robot_major_radius']
            planner_type = self.run_parameters['planner_type']
            if planner_type == 'dijkstra':
                use_dijkstra = True
            else:
                use_dijkstra = False
                #use_dijkstra = self.run_parameters['use_dijkstra']
            lethal_cost = self.run_parameters['lethal_cost']
        elif global_planner_name == 'SBPLLatticePlanner':
            sbpl_primitives_directory_path = path.expanduser(self.benchmark_configuration['sbpl_primitives_path'])
            planner_type = self.run_parameters['planner_type']
            robot_kinematic = self.run_parameters['robot_kinematic']
            robot_radius = self.run_parameters['robot_radius']
            robot_major_radius = self.run_parameters['robot_major_radius']
            resolution = self.run_parameters['resolution']
            if resolution == 0.05:
                resolution_str = "005"
            elif resolution == 0.1:
                resolution_str = "01"
            elif resolution == 0.25:
                resolution_str = "025"
            else:
                raise ValueError()
            if robot_kinematic == 'unicycle':
                primitives_per_angle = self.run_parameters['primitives_per_angle']
                sbpl_primitives_name ="res" + str(resolution_str) + robot_kinematic + "16prim" + str(primitives_per_angle) + ".mprim"
            elif robot_kinematic == 'bicycle':
                direction = self.run_parameters['direction']
                max_steering_angle = self.run_parameters['max_steering_angle']
                sbpl_primitives_name = "res" + str(resolution_str) + robot_kinematic + "16prim" + str(max_steering_angle) + str(direction)  + ".mprim"
            else:
                print_error("Wrong robot kinematic model defined. Only unicycle or bicycle model can be used.")
                raise ValueError()
            sbpl_primitives_file_path = path.join(sbpl_primitives_directory_path, sbpl_primitives_name)
        elif global_planner_name == 'OmplGlobalPlanner':
            robot_kinematic = self.run_parameters['robot_kinematic']
            robot_radius = self.run_parameters['robot_radius']
            robot_major_radius = self.run_parameters['robot_major_radius']
            planner_type = self.run_parameters['planner_type']
            lethal_cost = self.run_parameters['lethal_cost']
            time_out = self.run_parameters['time_out']
            # TODO add some parameters for OMPL
        else:
            raise ValueError()

        # run variables
        self.aborted = False

        # prepare folder structure
        run_configuration_path = path.join(self.run_output_folder, "components_configuration")
        run_info_file_path = path.join(self.run_output_folder, "run_info.yaml")
        source_workspace_path = self.benchmark_configuration['source_workspace_path']
        software_versions_log_path = path.join(self.run_output_folder, "software_versions_log")
        backup_file_if_exists(self.run_output_folder)
        os.mkdir(self.run_output_folder)
        os.mkdir(run_configuration_path)

        # benchmark_configuration_parameters
        self.run_timeout = self.benchmark_configuration['run_timeout']
        self.robot_footprint_turtlebot3 = self.benchmark_configuration['robot_model_turtlebot3']
        self.robot_footprint_agilex_hunter = self.benchmark_configuration['robot_model_agilex_hunter']

        # components original configuration paths (inside your workspace path)
        components_configurations_folder = path.expanduser(self.benchmark_configuration['components_configurations_folder'])
        original_supervisor_configuration_path = path.join(components_configurations_folder, self.benchmark_configuration['components_configuration']['supervisor'])
        original_move_base_configuration_path = path.join(components_configurations_folder, self.benchmark_configuration['components_configuration']['move_base'])
        if global_planner_name == 'GlobalPlanner':
            original_move_base_global_planner_configuration_path = path.join(components_configurations_folder, self.benchmark_configuration['components_configuration']['move_base_global_planner'])
        elif global_planner_name == 'SBPLLatticePlanner':
            original_move_base_global_planner_configuration_path = path.join(components_configurations_folder, self.benchmark_configuration[ 'components_configuration']['move_base_sbpl_planner'])
        elif global_planner_name == 'OmplGlobalPlanner':
            original_move_base_global_planner_configuration_path = path.join(components_configurations_folder, self.benchmark_configuration['components_configuration']['move_base_ompl_planner'])
        else:
            raise ValueError()
        self.original_rviz_configuration_path = path.join(components_configurations_folder, self.benchmark_configuration['components_configuration']['rviz'])
        original_robot_urdf_path = path.join(environment_folder, "gazebo", "robot.urdf")

        # components configuration relative paths
        supervisor_configuration_relative_path = path.join("components_configuration", self.benchmark_configuration['components_configuration']['supervisor'])
        move_base_configuration_relative_path = path.join("components_configuration", self.benchmark_configuration['components_configuration']['move_base'])
        if global_planner_name == 'GlobalPlanner':
            move_base_global_planner_configuration_relative_path = path.join("components_configuration", self.benchmark_configuration['components_configuration']['move_base_global_planner'])
        elif global_planner_name == 'SBPLLatticePlanner':
            move_base_global_planner_configuration_relative_path = path.join("components_configuration", self.benchmark_configuration[ 'components_configuration']['move_base_sbpl_planner'])
        elif global_planner_name == 'OmplGlobalPlanner':
            move_base_global_planner_configuration_relative_path = path.join("components_configuration", self.benchmark_configuration['components_configuration']['move_base_ompl_planner'])
        else:
            raise ValueError()
        robot_realistic_urdf_relative_path = path.join("components_configuration", "gazebo", "robot_realistic.urdf")

        # components configuration paths in run folder (inside ds output file path)
        self.supervisor_configuration_path = path.join(self.run_output_folder, supervisor_configuration_relative_path)
        self.move_base_configuration_path = path.join(self.run_output_folder, move_base_configuration_relative_path)
        self.move_base_global_planner_configuration_path = path.join(self.run_output_folder, move_base_global_planner_configuration_relative_path)
        self.robot_realistic_urdf_path = path.join(self.run_output_folder, robot_realistic_urdf_relative_path)

        # copy the configuration of the supervisor to the run folder and update its parameters
        with open(original_supervisor_configuration_path) as supervisor_configuration_file:
            supervisor_configuration = yaml.load(supervisor_configuration_file)
        supervisor_configuration['run_output_folder'] = self.run_output_folder
        supervisor_configuration['pid_father'] = os.getpid()
        supervisor_configuration['ground_truth_map_info_path'] = self.map_info_file_path
        supervisor_configuration['run_timeout'] = self.run_timeout
        

        # copy the configuration of move_base to the run folder
        # move_base global planner config
        with open(original_move_base_global_planner_configuration_path) as move_base_global_planner_configuration_file:
            move_base_global_planner_configuration = yaml.load(move_base_global_planner_configuration_file)
            move_base_global_planner_configuration['planner_patience'] = self.run_timeout
            move_base_global_planner_configuration['controller_patience'] = self.run_timeout
            if robot_kinematic == 'unicycle':
                move_base_global_planner_configuration['footprint'] = self.robot_footprint_turtlebot3
            elif robot_kinematic == 'bicycle':
                move_base_global_planner_configuration['footprint'] = self.robot_footprint_agilex_hunter
            else:
                raise ValueError()

        if global_planner_name == 'GlobalPlanner':
            supervisor_configuration['robot_kinematic'] = robot_kinematic
            supervisor_configuration['robot_radius'] = robot_radius
            supervisor_configuration['robot_major_radius'] = robot_major_radius
            move_base_global_planner_configuration['GlobalPlanner']['use_dijkstra'] = use_dijkstra
            move_base_global_planner_configuration['GlobalPlanner']['use_grid_path'] = not use_dijkstra
            move_base_global_planner_configuration['GlobalPlanner']['lethal_cost'] = lethal_cost
            # todo we can add neutral_cost and cost_factor
        elif global_planner_name == 'SBPLLatticePlanner':
            supervisor_configuration['robot_kinematic'] = robot_kinematic
            supervisor_configuration['robot_radius'] = robot_radius
            supervisor_configuration['robot_major_radius'] = robot_major_radius
            move_base_global_planner_configuration['SBPLLatticePlanner']['planner_type'] = planner_type
            move_base_global_planner_configuration['SBPLLatticePlanner']['primitive_filename'] = sbpl_primitives_file_path
            # todo
        elif global_planner_name == 'OmplGlobalPlanner':
            supervisor_configuration['robot_kinematic'] = robot_kinematic
            supervisor_configuration['robot_radius'] = robot_radius
            supervisor_configuration['robot_major_radius'] = robot_major_radius
            move_base_global_planner_configuration['OmplGlobalPlanner']['planner_type'] = planner_type
            move_base_global_planner_configuration['OmplGlobalPlanner']['lethal_cost'] = lethal_cost
            move_base_global_planner_configuration['OmplGlobalPlanner']['time_out'] = time_out
        else:
            raise ValueError()
        
        #move_base global config
        if not path.exists(path.dirname(self.move_base_global_planner_configuration_path)):
            os.makedirs(path.dirname(self.move_base_global_planner_configuration_path))
        with open(self.move_base_global_planner_configuration_path, 'w') as move_base_global_planner_configuration_file:
            yaml.dump(move_base_global_planner_configuration, move_base_global_planner_configuration_file, default_flow_style=False)

        #supervisor global config
        if not path.exists(path.dirname(self.supervisor_configuration_path)):
            os.makedirs(path.dirname(self.supervisor_configuration_path))
        with open(self.supervisor_configuration_path, 'w') as supervisor_configuration_file:
            yaml.dump(supervisor_configuration, supervisor_configuration_file, default_flow_style=False)

        # move_base general config (costmaps, local_planner, etc)
        if not path.exists(path.dirname(self.move_base_configuration_path)):
            os.makedirs(path.dirname(self.move_base_configuration_path))
        shutil.copyfile(original_move_base_configuration_path, self.move_base_configuration_path)

        # copy the configuration of the robot urdf to the run folder and update the link names for realistic data
        if not path.exists(path.dirname(self.robot_realistic_urdf_path)):
            os.makedirs(path.dirname(self.robot_realistic_urdf_path))
        shutil.copyfile(original_robot_urdf_path, self.robot_realistic_urdf_path)

        # write run info to file
        run_info_dict = dict()
        run_info_dict["run_id"] = self.run_id
        run_info_dict["run_folder"] = self.run_output_folder
        run_info_dict["environment_folder"] = environment_folder
        run_info_dict["run_parameters"] = self.run_parameters
        run_info_dict["local_components_configuration"] = {
            'supervisor': supervisor_configuration_relative_path,
            'move_base': move_base_configuration_relative_path,
            'move_base_global_planner': move_base_global_planner_configuration_relative_path,
            'robot_realistic_urdf': robot_realistic_urdf_relative_path,
        }

        with open(run_info_file_path, 'w') as run_info_file:
            yaml.dump(run_info_dict, run_info_file, default_flow_style=False)

        # log packages and software versions and status
        log_packages_and_repos(source_workspace_path=source_workspace_path, log_dir_path=software_versions_log_path)
Example #24
0
def explored_area_metrics(map_file_path, map_info_file_path,
                          ground_truth_map_file_path,
                          ground_truth_map_info_file_path):

    # check required files exist
    if not path.isfile(map_file_path):
        print_error(
            "compute_map_metrics: map file not found {}".format(map_file_path))
        return

    if not path.isfile(map_info_file_path):
        print_error("compute_map_metrics: map_info file not found {}".format(
            map_info_file_path))
        return

    if not path.isfile(ground_truth_map_file_path):
        print_error(
            "compute_map_metrics: ground_truth_map file not found {}".format(
                ground_truth_map_file_path))
        return

    if not path.isfile(ground_truth_map_info_file_path):
        print_error(
            "compute_map_metrics: ground_truth_map_info file not found {}".
            format(ground_truth_map_info_file_path))
        return

    with open(ground_truth_map_info_file_path) as stage_info_file:
        stage_info_yaml = yaml.load(stage_info_file)

    ground_truth_map = Image.open(ground_truth_map_file_path)

    ground_truth_map_size_meters = np.array([
        float(stage_info_yaml['map']['size']['x']),
        float(stage_info_yaml['map']['size']['y'])
    ])
    ground_truth_map_size_pixels = np.array(ground_truth_map.size)
    ground_truth_resolution = ground_truth_map_size_meters / ground_truth_map_size_pixels  # meter/pixel, on both axis, except y axis is inverted in image
    ground_truth_cell_area = float(
        ground_truth_resolution[0] *
        ground_truth_resolution[1])  # width × height of one pixel, meters^2

    ground_truth_free_cell_count = 0
    ground_truth_occupied_cell_count = 0
    ground_truth_unknown_cell_count = 0
    ground_truth_total_cell_count = ground_truth_map.size[
        0] * ground_truth_map.size[1]
    ground_truth_pixels = ground_truth_map.load()
    for i in range(ground_truth_map.size[0]):
        for j in range(ground_truth_map.size[1]):
            ground_truth_free_cell_count += ground_truth_pixels[i, j] == (254,
                                                                          254,
                                                                          254)
            ground_truth_occupied_cell_count += ground_truth_pixels[i,
                                                                    j] == (0,
                                                                           0,
                                                                           0)
            ground_truth_unknown_cell_count += ground_truth_pixels[i,
                                                                   j] == (205,
                                                                          205,
                                                                          205)

    explorable_area = ground_truth_free_cell_count * ground_truth_cell_area

    result_map = Image.open(map_file_path)

    with open(map_info_file_path) as result_map_info_file:
        result_map_info_yaml = yaml.load(result_map_info_file)

    # ground_truth_map_size_pixels = np.array(result_map.size)
    result_map_resolution = float(result_map_info_yaml['info']
                                  ['resolution'])  # meter/pixel, on both axis
    result_cell_area = result_map_resolution**2  # length of one pixel squared, meters^2

    result_free_cell_count = 0
    result_occupied_cell_count = 0
    result_unknown_cell_count = 0
    result_total_cell_count = result_map.size[0] * result_map.size[1]
    result_map_pixels = result_map.load()
    for i in range(result_map.size[0]):
        for j in range(result_map.size[1]):
            result_free_cell_count += result_map_pixels[i, j] == 254
            result_occupied_cell_count += result_map_pixels[i, j] == 0
            result_unknown_cell_count += result_map_pixels[i, j] == 205

    explored_area = result_free_cell_count * result_cell_area

    explored_area_dict = {
        'result_map': {
            'count': {
                'free': result_free_cell_count,
                'occupied': result_occupied_cell_count,
                'unknown': result_unknown_cell_count,
                'total': result_total_cell_count,
            },
            'area': {
                'free': explored_area,
                'occupied': result_occupied_cell_count * result_cell_area,
                'unknown': result_unknown_cell_count * result_cell_area,
                'total': result_total_cell_count * result_cell_area,
            },
        },
        'ground_truth_map': {
            'count': {
                'free': ground_truth_free_cell_count,
                'occupied': ground_truth_occupied_cell_count,
                'unknown': ground_truth_unknown_cell_count,
                'total': ground_truth_total_cell_count,
            },
            'area': {
                'free': explorable_area,
                'occupied':
                ground_truth_occupied_cell_count * ground_truth_cell_area,
                'unknown':
                ground_truth_unknown_cell_count * ground_truth_cell_area,
                'total':
                ground_truth_total_cell_count * ground_truth_cell_area,
            },
        },
        'normalised_explored_area': float(explored_area / explorable_area)
    }

    return explored_area_dict
    def execute_run(self):

        # components parameters
        rviz_params = {
            'rviz_config_file': self.original_rviz_configuration_path,
        }
        environment_params = {
            'gazebo_model_path_env_var': self.gazebo_model_path_env_var,
            'gazebo_plugin_path_env_var': self.gazebo_plugin_path_env_var,
            'world_model_file': self.gazebo_world_model_path,
            'robot_gt_urdf_file': self.robot_gt_urdf_path,
            'robot_realistic_urdf_file': self.robot_realistic_urdf_path,
            'headless': self.headless,
        }
        localization_params = {
            'params_file': self.nav2_amcl_configuration_path,
            'map': self.map_info_file_path,
            'use_sim_time': self.use_sim_time,
        }
        navigation_params = {
            'params_file': self.nav2_navigation_configuration_path,
            'use_sim_time': self.use_sim_time,
            'map_subscribe_transient_local': True,
        }
        supervisor_params = {
            'configuration': self.supervisor_configuration_path,
            'use_sim_time': self.use_sim_time
        }

        # declare components
        rviz = Component('rviz', 'localization_performance_modelling',
                         'rviz.launch.py', rviz_params)
        # recorder = Component('recorder', 'localization_performance_modelling', 'rosbag_recorder.launch.py', recorder_params)
        environment = Component('gazebo', 'localization_performance_modelling',
                                'gazebo.launch.py', environment_params)
        localization = Component('nav2_amcl',
                                 'localization_performance_modelling',
                                 'nav2_amcl.launch.py', localization_params)
        navigation = Component('nav2_navigation',
                               'localization_performance_modelling',
                               'nav2_navigation.launch.py', navigation_params)
        supervisor = Component('supervisor',
                               'localization_performance_modelling',
                               'localization_benchmark_supervisor.launch.py',
                               supervisor_params)

        # TODO manage launch exceptions in Component.__init__

        # add components to launcher
        components_launcher = ComponentsLauncher()
        if not self.headless:
            components_launcher.add_component(rviz)
        # recorder.launch()
        components_launcher.add_component(environment)
        components_launcher.add_component(navigation)
        components_launcher.add_component(localization)
        components_launcher.add_component(supervisor)

        # launch components and wait for the supervisor to finish
        self.log(event="waiting_supervisor_finish")
        components_launcher.launch()
        self.log(event="supervisor_shutdown")

        # make sure remaining components have shutdown
        components_launcher.shutdown()
        print_info("execute_run: components shutdown completed")

        # compute all relevant metrics and visualisations
        # noinspection PyBroadException
        try:
            self.log(event="start_compute_metrics")
            compute_metrics(self.run_output_folder)
        except:
            print_error("failed metrics computation")
            print_error(traceback.format_exc())

        self.log(event="run_end")
        print_info(f"run {self.run_id} completed")
Example #26
0
def absolute_error_vs_geometric_similarity(estimated_poses_file_path, ground_truth_poses_file_path, ground_truth_map, horizon_length=3.5, samples_per_second=10.0, max_iterations=20):
    # import matplotlib.pyplot as plt
    from icp import iterative_closest_point
    from skimage.draw import line, circle_perimeter
    # plt.rcParams["figure.figsize"] = (10, 10)

    start_time = time.time()
    estimated_poses_df = pd.read_csv(estimated_poses_file_path)
    if len(estimated_poses_df.index) < 2:
        print_error("not enough estimated poses data points")
        return
    print_info("estimated_poses_df", time.time() - start_time)

    start_time = time.time()
    ground_truth_poses_df = pd.read_csv(ground_truth_poses_file_path)
    print_info("ground_truth_poses_df", time.time() - start_time)

    start_time = time.time()
    ground_truth_interpolated_poses_file_path = path.splitext(estimated_poses_file_path)[0] + "ground_truth_interpolated.csv"
    if path.exists(ground_truth_interpolated_poses_file_path):
        print_info("using cached ground_truth_interpolated_poses [{}]".format(ground_truth_interpolated_poses_file_path))
        complete_trajectory_df = pd.read_csv(ground_truth_interpolated_poses_file_path)
    else:
        complete_trajectory_df = compute_ground_truth_interpolated_poses(estimated_poses_df, ground_truth_poses_df)
        complete_trajectory_df.to_csv(ground_truth_interpolated_poses_file_path)

    run_duration = ground_truth_poses_df.iloc[-1]['t'] - ground_truth_poses_df.iloc[0]['t']
    resample_n = int(run_duration*samples_per_second)
    resample_ratio = max(1, len(complete_trajectory_df) // resample_n)
    trajectory_df = complete_trajectory_df.iloc[::resample_ratio].copy(deep=False)
    trajectory_df['abs_err'] = np.sqrt((trajectory_df['x_est'] - trajectory_df['x_gt'])**2 + (trajectory_df['y_est'] - trajectory_df['y_gt'])**2)
    print_info("trajectory_df", time.time() - start_time)

    delta_length = 0.2  # displacement of x and x_prime
    ic = ground_truth_map.map_frame_to_image_coordinates
    mf = ground_truth_map.image_to_map_frame_coordinates

    map_image_array = np.array(ground_truth_map.map_image.convert(mode="L")).transpose()
    occupancy_grid = map_image_array == 0

    translation_score_0_column = list()
    translation_score_column = list()
    rotation_score_0_column = list()
    rotation_score_column = list()
    plot_lines = list()
    icp_time = 0
    ray_tracing_time = 0

    rows_count = len(trajectory_df[['x_gt', 'y_gt']])
    progress_count = 0

    for i, row_df in trajectory_df[['x_gt', 'y_gt']].iterrows():
        x_mf = np.array(row_df)
        p_x, p_y = ic(x_mf)

        ray_tracing_start_time = time.time()
        visible_points_x = set()
        perimeter_points = np.array(circle_perimeter(p_x, p_y, int((horizon_length + delta_length)/ground_truth_map.resolution), method='andres')).transpose()
        for perimeter_x, perimeter_y in perimeter_points:

            ray_points = np.array(line(p_x, p_y, perimeter_x, perimeter_y)).transpose()
            for ray_x, ray_y in ray_points:
                if occupancy_grid[ray_x, ray_y]:
                    visible_points_x.add((ray_x, ray_y))
                    break

        visible_points_x_mf = np.array(list(map(mf, visible_points_x)))
        if len(visible_points_x_mf) == 0:
            translation_score_0_column.append(np.nan)
            rotation_score_0_column.append(np.nan)
            translation_score_column.append(np.nan)
            rotation_score_column.append(np.nan)
            continue

        visible_points_x_mf_o = visible_points_x_mf - x_mf
        ray_tracing_time += time.time() - ray_tracing_start_time

        delta_trans_list = delta_length * np.array([
            (1, 0),
            (0, 1),
            (-1, 0),
            (0, -1),
            (.7, .7),
            (.7, -.7),
            (-.7, .7),
            (-.7, -.7),
        ])

        delta_theta_list = [-0.1, -0.05, 0.05, 0.1]

        translation_score_0_list = list()
        translation_score_list = list()
        for delta_trans in delta_trans_list:
            x_prime_mf = x_mf + delta_trans
            p_x_prime, p_y_prime = ic(x_prime_mf)

            ray_tracing_start_time = time.time()
            visible_points_x_prime = set()
            perimeter_points_prime = np.array(circle_perimeter(p_x_prime, p_y_prime, int(horizon_length / ground_truth_map.resolution), method='andres')).transpose()
            for perimeter_x_prime, perimeter_y_prime in perimeter_points_prime:

                ray_points_prime = np.array(line(p_x_prime, p_y_prime, perimeter_x_prime, perimeter_y_prime)).transpose()
                for ray_x_prime, ray_y_prime in ray_points_prime:
                    if occupancy_grid[ray_x_prime, ray_y_prime]:
                        visible_points_x_prime.add((ray_x_prime, ray_y_prime))
                        break

            visible_points_x_prime_mf = np.array(list(map(mf, visible_points_x_prime)))
            ray_tracing_time += time.time() - ray_tracing_start_time

            if len(visible_points_x_prime_mf) == 0:
                continue

            visible_points_x_prime_mf_o = visible_points_x_prime_mf - x_prime_mf

            start_time = time.time()
            transform, scores, transformed_points_prime, num_iterations = iterative_closest_point(visible_points_x_prime_mf_o, visible_points_x_mf_o, max_iterations=max_iterations, tolerance=0.001)

            translation_score_0_list.append(scores[0])

            translation = transform[:2, 2]
            translation_score = float(np.sqrt(np.sum((translation - delta_trans) ** 2)) / np.sqrt(np.sum(delta_trans ** 2)))

            # print("translation score: {s:1.4f} \t translation score_0: {s0:1.4f} \t i: {i}".format(s=translation_score, s0=scores[0], i=num_iterations))

            translation_score_list.append(translation_score)
            icp_time += time.time() - start_time

            # if translation_score > 0.5:
            #     plt.scatter(*visible_points_x_mf_o.transpose(), color='black', marker='x')
            #     plt.scatter(*visible_points_x_prime_mf_o.transpose(), color='red', marker='x')
            #     plt.scatter(*transformed_points_prime.transpose(), color='blue', marker='x')
            #     hl = horizon_length + delta_length
            #     plt.xlim(-hl, hl)
            #     plt.ylim(-hl, hl)
            #     plt.gca().set_aspect('equal', adjustable='box')
            #     plt.show()

            plot_lines.append((x_mf, x_prime_mf, translation_score))

        if len(translation_score_0_list) == 0:
            translation_score_0_column.append(np.nan)
            translation_score_column.append(np.nan)
        else:
            translation_score_0_column.append(np.mean(translation_score_0_list))
            translation_score_column.append(np.mean(translation_score_list))

        rotation_score_0_list = list()
        rotation_score_list = list()
        for delta_theta in delta_theta_list:
            rot_mat = np.array([
                [np.cos(delta_theta), np.sin(delta_theta)],
                [-np.sin(delta_theta), np.cos(delta_theta)]
            ])
            visible_points_x_prime_mf_o = np.dot(visible_points_x_mf_o, rot_mat)

            start_time = time.time()
            transform, scores, transformed_points_prime, num_iterations = iterative_closest_point(visible_points_x_prime_mf_o, visible_points_x_mf_o, max_iterations=max_iterations, tolerance=0.001)

            rotation_score_0_list.append(scores[0])

            rotation_mat = transform[:2, :2].T
            rotation = math.atan2(rotation_mat[0, 1], rotation_mat[0, 0])
            rotation_score = float(np.fabs(rotation + delta_theta)) / np.fabs(delta_theta)

            # print("rotation_score: {s:1.4f} \t rotation_score_0: {s0:1.4f} \t i: {i} \t transform angle: {r:1.4f} \t delta theta: {dt:1.4f}".format(s=rotation_score, s0=scores[0], i=num_iterations, r=rotation, dt=delta_theta))

            rotation_score_list.append(rotation_score)
            icp_time += time.time() - start_time

            # if rotation_score > 0.5:
            #     plt.scatter(*visible_points_x_mf_o.transpose(), color='black', marker='x')
            #     plt.scatter(*visible_points_x_prime_mf_o.transpose(), color='red', marker='x')
            #     plt.scatter(*transformed_points_prime.transpose(), color='blue', marker='x')
            #     hl = horizon_length + delta_length
            #     plt.xlim(-hl, hl)
            #     plt.ylim(-hl, hl)
            #     plt.gca().set_aspect('equal', adjustable='box')
            #     plt.show()

            # plot_lines.append((x_mf, x_prime_mf, translation_score))

        if len(rotation_score_0_list) == 0:
            rotation_score_0_column.append(np.nan)
            rotation_score_column.append(np.nan)
        else:
            rotation_score_0_column.append(np.mean(rotation_score_0_list))
            rotation_score_column.append(np.mean(rotation_score_list))

        progress_count += 1
        print_info(int(progress_count / float(rows_count) * 100), "%", replace_previous_line=True)

    trajectory_df['translation_score_0'] = translation_score_0_column
    trajectory_df['translation_score'] = translation_score_column
    trajectory_df['rotation_score_0'] = rotation_score_0_column
    trajectory_df['rotation_score'] = rotation_score_column

    # print(trajectory_df)

    print_info("icp_time", icp_time)
    print_info("ray_tracing_time", ray_tracing_time)

    # for (x_0, y_0), (x_1, y_1), score in plot_lines:
    #     plt.plot([x_0, x_1], [y_0, y_1], linewidth=3*score, color='black')
    # plt.plot([trajectory_df['x_gt'], trajectory_df['x_est']], [trajectory_df['y_gt'], trajectory_df['y_est']], color='red')
    # plt.show()

    # plt.plot(trajectory_df['t'], trajectory_df['abs_err'], label='abs_err')
    # plt.plot(trajectory_df['t'], trajectory_df['rotation_score_0'], label='rotation_score_0')
    # plt.legend()
    # plt.show()
    #
    # plt.scatter(trajectory_df['rotation_score_0'], trajectory_df['abs_err'])
    # plt.xlabel("rotation_score_0")
    # plt.ylabel("abs_err")
    # plt.legend()
    # plt.show()
    #
    # plt.plot(trajectory_df['t'], trajectory_df['abs_err'], label='abs_err')
    # plt.plot(trajectory_df['t'], trajectory_df['rotation_score'], label='rotation_score')
    # plt.legend()
    # plt.show()
    #
    # plt.scatter(trajectory_df['rotation_score'], trajectory_df['abs_err'], label='abs_err vs rotation_score')
    # plt.xlabel("rotation_score")
    # plt.ylabel("abs_err")
    # plt.legend()
    # plt.show()
    #
    # plt.plot(trajectory_df['t'], trajectory_df['abs_err'], label='abs_err')
    # plt.plot(trajectory_df['t'], trajectory_df['translation_score_0'], label='translation_score_0')
    # plt.plot(trajectory_df['t'], trajectory_df['translation_score'], label='translation_score')
    # plt.legend()
    # plt.show()
    #
    # plt.scatter(trajectory_df['translation_score_0'], trajectory_df['abs_err'], label='abs_err vs translation_score_0')
    # plt.xlabel("translation_score_0")
    # plt.ylabel("abs_err")
    # plt.legend()
    # plt.show()
    #
    # plt.scatter(trajectory_df['translation_score'], trajectory_df['abs_err'], label='abs_err vs translation_score')
    # plt.xlabel("translation_score")
    # plt.ylabel("abs_err")
    # plt.legend()
    # plt.show()

    return trajectory_df
Example #27
0
        '-t',
        dest='gazebo_template_path',
        help='Folder containing the gazebo template. Example: {}'.format(
            default_gazebo_template_path),
        type=str,
        required=True)

    args = parser.parse_args()

    gazebo_template_path = path.expanduser(args.gazebo_template_path)
    target_dataset_path = path.expanduser(args.target_dataset_path)
    source_environment_paths = filter(path.isdir,
                                      glob.glob(target_dataset_path))

    if not path.exists(gazebo_template_path):
        print_error("gazebo_template_path does not exists:",
                    gazebo_template_path)
        sys.exit(-1)

    for target_environment_path in source_environment_paths:
        environment_name = path.basename(target_environment_path)
        print("environment_name", environment_name)
        print("target_environment_path", target_environment_path)

        gazebo_file_relative_paths = map(
            lambda ap: path.relpath(ap, gazebo_template_path),
            filter(path.isfile,
                   glob.glob(gazebo_template_path + '/**', recursive=True)))

        for gazebo_file_relative_path in gazebo_file_relative_paths:
            source_gazebo_file_path = path.join(gazebo_template_path,
                                                gazebo_file_relative_path)
    def execute_run(self):

        # components parameters
        rviz_params = {
            'rviz_config_file': self.original_rviz_configuration_path,
            'headless': self.headless,
            'output': "log"
        }
        state_publisher_param = {
            'robot_realistic_urdf_file': self.robot_realistic_urdf_path,
            'output': "log"
        }
        navigation_params = {
            'params_file': self.move_base_configuration_path,
            'global_planner_params_file': self.move_base_global_planner_configuration_path,
            'map_file': self.map_info_file_path,
            'output': "log"
        }
        supervisor_params = {
            'params_file': self.supervisor_configuration_path,
            'output': "screen"
        }
        recorder_params = {
            'bag_file_path': path.join(self.run_output_folder, "benchmark_data.bag"),
            'not_recorded_topics' : "/move_base/DWAPlannerROS/(.*)",
            'output': "log"
        }
        # recorder_params2 = {
        #     'bag_file_path': path.join(self.run_output_folder, "benchmark_data2.bag"),
        #     'topics': "/cmd_vel /initialpose /map /map_metadata /map_updates /move_base/DWAPlannerROS/cost_cloud /move_base/DWAPlannerROS/global_plan \
        #                  /move_base/DWAPlannerROS/local_plan /move_base/DWAPlannerROS/parameter_descriptions /move_base/DWAPlannerROS/parameter_updates \
        #                  /move_base/DWAPlannerROS/trajectory_cloud /move_base/(.*)/plan /move_base/SBPLLatticePlanner/sbpl_lattice_planner_stats /move_base/cancel /move_base/current_goal \
        #                  /move_base/feedback /move_base/global_costmap/costmap /move_base/global_costmap/costmap_updates /move_base/global_costmap/footprint \
        #                  /move_base/global_costmap/inflation_layer/parameter_descriptions /move_base/global_costmap/inflation_layer/parameter_updates \
        #                  /move_base/global_costmap/parameter_descriptions /move_base/global_costmap/parameter_updates /move_base/global_costmap/static_layer/parameter_descriptions \
        #                  /move_base/global_costmap/static_layer/parameter_updates /move_base/goal /move_base/local_costmap/costmap /move_base/local_costmap/costmap_updates \
        #                  /move_base/local_costmap/footprint /move_base/local_costmap/inflation_layer/parameter_descriptions /move_base/local_costmap/inflation_layer/parameter_updates \
        #                  /move_base/local_costmap/parameter_descriptions /move_base/local_costmap/parameter_updates /move_base/local_costmap/static_layer/parameter_descriptions \
        #                  /move_base/local_costmap/static_layer/parameter_updates /move_base/parameter_descriptions /move_base/parameter_updates /move_base/result /move_base/status \
        #                  /move_base_simple/goal /odom /rosout /rosout_agg /tf /tf_static",
        #     'output': "log"
        # }


        # declare components
        roscore = Component('roscore', 'global_planning_performance_modelling', 'roscore.launch')
        state_publisher = Component('state_publisher', 'global_planning_performance_modelling', 'robot_state_launcher.launch', state_publisher_param)
        rviz = Component('rviz', 'global_planning_performance_modelling', 'rviz.launch', rviz_params)
        navigation = Component('move_base', 'global_planning_performance_modelling', 'move_base.launch', navigation_params)
        supervisor = Component('supervisor', 'global_planning_performance_modelling', 'global_planning_benchmark_supervisor.launch', supervisor_params)
        recorder = Component('recorder', 'global_planning_performance_modelling', 'rosbag_recorder.launch', recorder_params)
        # recorder2 = Component('recorder', 'global_planning_performance_modelling', 'rosbag_recorder2.launch', recorder_params2)

        # launch roscore and setup a node to monitor ros
        roscore.launch()
        rospy.init_node("benchmark_monitor", anonymous=True)

        # launch components
        rviz.launch()
        navigation.launch()
        supervisor.launch()
        recorder.launch()
        # recorder2.launch()

        # launch components and wait for the supervisor to finish
        self.log(event="waiting_supervisor_finish")
        supervisor.wait_to_finish()
        self.log(event="supervisor_shutdown")

        # check if the rosnode is still ok, otherwise the ros infrastructure has been shutdown and the benchmark is aborted
        if rospy.is_shutdown():
            print_error("execute_run: supervisor finished by ros_shutdown")
            self.aborted = True


        # shut down components
        navigation.shutdown()
        rviz.shutdown()
        roscore.shutdown()
        recorder.shutdown()
        # recorder2.shutdown()
        print_info("execute_run: components shutdown completed")

        # compute all relevant metrics and visualisations
        # try:
        #     self.log(event="start_compute_metrics")
        #     compute_metrics(self.run_output_folder)                      # open here to calculate metric
        # except:
        #     print_error("failed metrics computation")
        #     print_error(traceback.format_exc())

        self.log(event="run_end")
        print_info("run {run_id} completed".format(run_id=self.run_id))
def collect_data(base_run_folder_path, invalidate_cache=False):

    base_run_folder = path.expanduser(base_run_folder_path)
    cache_file_path = path.join(base_run_folder, "run_data_cache.pkl")

    if not path.isdir(base_run_folder):
        print_error(
            "collect_data: base_run_folder does not exists or is not a directory"
            .format(base_run_folder))
        return None, None

    run_folders = sorted(
        list(
            filter(path.isdir,
                   glob.glob(path.abspath(base_run_folder) + '/*'))))

    if invalidate_cache or not path.exists(cache_file_path):
        df = pd.DataFrame()
        parameter_names = set()
        cached_run_folders = set()
    else:
        print_info("collect_data: updating cache")
        with open(cache_file_path, 'rb') as f:
            cache = pickle.load(f)
        df = cache['df']
        parameter_names = cache['parameter_names']
        cached_run_folders = set(df['run_folder'])

    # collect results from runs not already cached
    print_info("collect_data: reading run data")
    no_output = True
    for i, run_folder in enumerate(run_folders):
        try:
            metric_results_folder = path.join(run_folder, "metric_results")
            benchmark_data_folder = path.join(run_folder, "benchmark_data")
            run_info_file_path = path.join(run_folder, "run_info.yaml")

            if not path.exists(metric_results_folder):
                print_error(
                    "collect_data: metric_results_folder does not exists [{}]".
                    format(metric_results_folder))
                no_output = False
                continue
            if not path.exists(run_info_file_path):
                print_error(
                    "collect_data: run_info file does not exists [{}]".format(
                        run_info_file_path))
                no_output = False
                continue
            if run_folder in cached_run_folders:
                continue

            run_info = get_yaml(run_info_file_path)

            run_record = dict()

            for parameter_name, parameter_value in run_info[
                    'run_parameters'].items():
                parameter_names.add(parameter_name)
                if type(parameter_value) == list:
                    parameter_value = tuple(parameter_value)
                run_record[parameter_name] = parameter_value

            # parameter_names.add('environment_name')   # in my run_info.yaml file inside run_parameters we have environment_name
            # run_record['environment_name'] = path.basename(path.abspath(run_info['environment_folder']))
            run_record['environment_name'] = run_info['run_parameters'][
                'environment_name']
            run_record['run_folder'] = run_folder
            run_record['failure_rate'] = 0

            try:
                run_events = pd.read_csv(
                    path.join(benchmark_data_folder, "run_events.csv"))
                metrics_dict = get_yaml(
                    path.join(metric_results_folder, "metrics.yaml"))
            except IOError:
                run_record['failure_rate'] = 1
                df = df.append(run_record, ignore_index=True)
                continue

            feasibility_rate = metrics_dict[
                'feasibility_rate'] if 'feasibility_rate' in metrics_dict else None
            run_record['feasibility_rate'] = feasibility_rate
            if feasibility_rate is None:
                run_record['failure_rate'] = 1
                df = df.append(run_record, ignore_index=True)
                continue

            if 'average_planning_time' in metrics_dict and metrics_dict[
                    'average_planning_time'] is not None:
                run_record['average_planning_time'] = metrics_dict[
                    'average_planning_time']

            if 'normalised_plan_length' in metrics_dict and metrics_dict[
                    'normalised_plan_length'] is not None:
                run_record['normalised_plan_length'] = metrics_dict[
                    'normalised_plan_length']

            if 'normalised_planning_time' in metrics_dict and metrics_dict[
                    'normalised_planning_time'] is not None:
                run_record['normalised_planning_time'] = metrics_dict[
                    'normalised_planning_time']

            run_record['num_target_pose_set'] = len(
                run_events[run_events["event"] == "target_pose_set"])
            run_record['num_target_pose_reached'] = len(
                run_events[run_events["event"] == "target_pose_reached"])
            run_record['num_target_pose_aborted'] = len(
                run_events[run_events["event"] == "target_pose_aborted"])
            run_record['num_initial_pose_true'] = len(
                run_events[run_events["event"] == "initial_pose_true"])
            run_record['num_away_from_initial_pose'] = len(
                run_events[run_events["event"] == "away_from_initial_pose"])
            run_record['num_goal_pose_true'] = len(
                run_events[run_events["event"] == "goal_pose_true"])
            run_record['num_away_from_goal_pose'] = len(
                run_events[run_events["event"] == "away_from_goal_pose"])

            run_start_events = run_events["event"] == "run_start"
            run_completed_events = run_events["event"] == "run_completed"
            if len(run_start_events) == 0 or len(run_completed_events) == 0:
                run_record['failure_rate'] = 1
                df = df.append(run_record, ignore_index=True)
                continue

            if len(run_events[run_events["event"] == "run_start"]
                   ["time"]) == 0:
                print_error("collect_data: run_start event does not exists")
                no_output = False
                run_record['failure_rate'] = 1
                df = df.append(run_record, ignore_index=True)
                continue

            if len(run_events[run_events["event"] == "run_completed"]
                   ["time"]) == 0:
                print_error(
                    "collect_data: run_completed event does not exists")
                no_output = False
                run_record['failure_rate'] = 1
                df = df.append(run_record, ignore_index=True)
                continue

            run_start_time = float(
                run_events[run_events["event"] == "run_start"]["time"])
            supervisor_finish_time = float(
                run_events[run_events["event"] == "run_completed"]["time"])
            run_execution_time = supervisor_finish_time - run_start_time
            run_record['run_execution_time'] = run_execution_time

            # if metrics_dict['cpu_and_memory_usage'] is not None and 'amcl_accumulated_cpu_time' in metrics_dict['cpu_and_memory_usage']:
            #     run_record['normalised_cpu_time'] = metrics_dict['cpu_and_memory_usage']['amcl_accumulated_cpu_time'] / run_execution_time
            #
            # if metrics_dict['cpu_and_memory_usage'] is not None and 'amcl_uss' in metrics_dict['cpu_and_memory_usage']:
            #     run_record['max_memory'] = metrics_dict['cpu_and_memory_usage']['amcl_uss']
            #
            # if metrics_dict['cpu_and_memory_usage'] is not None and 'localization_slam_toolbox_node_accumulated_cpu_time' in metrics_dict['cpu_and_memory_usage']:
            #     run_record['normalised_cpu_time'] = metrics_dict['cpu_and_memory_usage']['localization_slam_toolbox_node_accumulated_cpu_time'] / run_execution_time
            #
            # if metrics_dict['cpu_and_memory_usage'] is not None and 'localization_slam_toolbox_node_uss' in metrics_dict['cpu_and_memory_usage']:
            #     run_record['max_memory'] = metrics_dict['cpu_and_memory_usage']['localization_slam_toolbox_node_uss']

            df = df.append(run_record, ignore_index=True)

            print_info("collect_data: reading run data: {}%".format(
                int((i + 1) * 100 / len(run_folders))),
                       replace_previous_line=no_output)
            no_output = True
        except:
            traceback.format_exc()

    # save cache
    if cache_file_path is not None:
        cache = {'df': df, 'parameter_names': parameter_names}
        with open(cache_file_path, 'wb') as f:
            pickle.dump(cache, f, protocol=2)

    return df, parameter_names
def collect_data(base_run_folder_path, invalidate_cache=False):

    base_run_folder = path.expanduser(base_run_folder_path)
    cache_file_path = path.join(base_run_folder, "run_data_cache.pkl")

    if not path.isdir(base_run_folder):
        print_error(
            "collect_data: base_run_folder does not exists or is not a directory"
            .format(base_run_folder))
        return None, None

    run_folders = sorted(
        list(
            filter(path.isdir,
                   glob.glob(path.abspath(base_run_folder) + '/*'))))

    if invalidate_cache or not path.exists(cache_file_path):
        df = pd.DataFrame()
        parameter_names = set()
        cached_run_folders = set()
    else:
        print_info("collect_data: updating cache")
        with open(cache_file_path, 'rb') as f:
            cache = pickle.load(f)
        df = cache['df']
        parameter_names = cache['parameter_names']
        cached_run_folders = set(df['run_folder'])

    # collect results from runs not already cached
    print_info("collect_data: reading run data")
    no_output = True
    for i, run_folder in enumerate(run_folders):
        metric_results_folder = path.join(run_folder, "metric_results")
        benchmark_data_folder = path.join(run_folder, "benchmark_data")
        run_info_file_path = path.join(run_folder, "run_info.yaml")

        if not path.exists(metric_results_folder):
            print_error(
                "collect_data: metric_results_folder does not exists [{}]".
                format(metric_results_folder))
            no_output = False
            continue
        if not path.exists(run_info_file_path):
            print_error(
                "collect_data: run_info file does not exists [{}]".format(
                    run_info_file_path))
            no_output = False
            continue
        if run_folder in cached_run_folders:
            continue

        run_info = get_yaml(run_info_file_path)

        run_record = dict()

        for parameter_name, parameter_value in run_info[
                'run_parameters'].items():
            parameter_names.add(parameter_name)
            if type(parameter_value) == list:
                parameter_value = tuple(parameter_value)
            run_record[parameter_name] = parameter_value

        parameter_names.add('environment_name')
        run_record['environment_name'] = path.basename(
            path.abspath(run_info['environment_folder']))
        run_record['run_folder'] = run_folder
        run_record['failure_rate'] = 0

        try:
            run_events = get_csv(
                path.join(benchmark_data_folder, "run_events.csv"))
            metrics_dict = get_yaml(
                path.join(metric_results_folder, "metrics.yaml"))
        except IOError:
            run_record['failure_rate'] = 1
            df = df.append(run_record, ignore_index=True)
            continue

        trajectory_length = get_yaml_by_path(metrics_dict,
                                             ['trajectory_length'])
        run_record['trajectory_length'] = trajectory_length
        # if trajectory_length is None or trajectory_length < 3.0:
        #     run_record['failure_rate'] = 1
        #     df = df.append(run_record, ignore_index=True)
        #     continue

        run_record['mean_absolute_error'] = get_yaml_by_path(
            metrics_dict, ['absolute_localization_error', 'mean'])
        run_record['mean_relative_translation_error'] = get_yaml_by_path(
            metrics_dict, [
                'relative_localization_error', 'random_relations',
                'translation', 'mean'
            ])
        run_record['mean_relative_rotation_error'] = get_yaml_by_path(
            metrics_dict, [
                'relative_localization_error', 'random_relations', 'rotation',
                'mean'
            ])

        run_record['num_target_pose_set'] = len(
            run_events[run_events["event"] == "target_pose_set"])
        run_record['num_target_pose_reached'] = len(
            run_events[run_events["event"] == "target_pose_reached"])
        run_record['num_target_pose_not_reached'] = len(
            run_events[run_events["event"] == "target_pose_not_reached"])

        run_start_events = run_events["event"] == "run_start"
        run_completed_events = run_events["event"] == "run_completed"
        if len(run_start_events) == 0 or len(run_completed_events) == 0:
            run_record['failure_rate'] = 1
            df = df.append(run_record, ignore_index=True)
            continue

        if len(run_events[run_events["event"] == "run_start"]
               ["timestamp"]) == 0:
            print_error("collect_data: run_start event does not exists")
            no_output = False
            run_record['failure_rate'] = 1
            df = df.append(run_record, ignore_index=True)
            continue

        if len(run_events[run_events["event"] == "run_completed"]
               ["timestamp"]) == 0:
            print_error("collect_data: run_completed event does not exists")
            no_output = False
            run_record['failure_rate'] = 1
            df = df.append(run_record, ignore_index=True)
            continue

        run_start_time = float(
            run_events[run_events["event"] == "run_start"]["timestamp"])
        supervisor_finish_time = float(
            run_events[run_events["event"] == "run_completed"]["timestamp"])
        run_execution_time = supervisor_finish_time - run_start_time
        run_record['run_execution_time'] = run_execution_time

        amcl_accumulated_cpu_time = get_yaml_by_path(
            metrics_dict,
            ['cpu_and_memory_usage', 'amcl_accumulated_cpu_time'])
        if amcl_accumulated_cpu_time is not None:
            run_record[
                'normalised_cpu_time'] = amcl_accumulated_cpu_time / run_execution_time
            run_record['max_memory'] = get_yaml_by_path(
                metrics_dict, ['cpu_and_memory_usage', 'amcl_uss'])

        slam_toolbox_localization_accumulated_cpu_time = get_yaml_by_path(
            metrics_dict, [
                'cpu_and_memory_usage',
                'localization_slam_toolbox_node_accumulated_cpu_time'
            ])
        if slam_toolbox_localization_accumulated_cpu_time is not None:
            run_record[
                'normalised_cpu_time'] = slam_toolbox_localization_accumulated_cpu_time / run_execution_time
            run_record['max_memory'] = get_yaml_by_path(
                metrics_dict,
                ['cpu_and_memory_usage', 'localization_slam_toolbox_node_uss'])

        gmapping_accumulated_cpu_time = get_yaml_by_path(
            metrics_dict,
            ['cpu_and_memory_usage', 'slam_gmapping_accumulated_cpu_time'])
        if gmapping_accumulated_cpu_time is not None:
            run_record[
                'normalised_cpu_time'] = gmapping_accumulated_cpu_time / run_execution_time
            run_record['max_memory'] = get_yaml_by_path(
                metrics_dict, ['cpu_and_memory_usage', 'slam_gmapping_uss'])

        slam_toolbox_slam_accumulated_cpu_time = get_yaml_by_path(
            metrics_dict, [
                'cpu_and_memory_usage',
                'async_slam_toolbox_node_accumulated_cpu_time'
            ])
        if slam_toolbox_slam_accumulated_cpu_time is not None:
            run_record[
                'normalised_cpu_time'] = slam_toolbox_slam_accumulated_cpu_time / run_execution_time
            run_record['max_memory'] = get_yaml_by_path(
                metrics_dict,
                ['cpu_and_memory_usage', 'async_slam_toolbox_node_uss'])

        df = df.append(run_record, ignore_index=True)

        print_info("collect_data: reading run data: {}%".format(
            int((i + 1) * 100 / len(run_folders))),
                   replace_previous_line=no_output)
        no_output = True

        # save cache
        if cache_file_path is not None:
            cache = {'df': df, 'parameter_names': parameter_names}
            with open(cache_file_path, 'wb') as f:
                pickle.dump(cache, f, protocol=2)

    return df, parameter_names