def test_generate_kpis(self): self.assertIn('recording.bag', os.listdir(ROOT_PATH),'recording.bag cannot be found') sim_eval = Evaluation(ROSBAG, RESULTS_DIR) sim_eval.compute_kpis() self.assertTrue(type(sim_eval.get_kpis()) == dict, 'KPIs structure is not a dictionary')
def test_generate_kpis(self): runner = SimulationRunner(PARAMS, TASK, RESULTS_DIR, True) runner.run(PARAMS) self.assertIn('recording.bag', os.listdir(runner.current_sim_results_dir),'recording.bag cannot be found') sim_eval = Evaluation(runner.recording_filename, runner.current_sim_results_dir) sim_eval.compute_kpis() self.assertTrue(type(sim_eval.get_kpis()) == dict, 'KPIs structure is not a dictionary')
def gen_evaluation(output_dir, bag_filename, task_filename): """Create a new evaluation object for a ROS bag.""" if not os.path.isdir(output_dir): os.makedirs(output_dir) sim_eval = Evaluation(bag_filename, output_dir) sim_eval.save_evaluation() if os.path.isfile(task_filename): shutil.copy(task_filename, output_dir) del sim_eval
def gen_evaluations(bags, output_dir): """Generate evaluation instances for each ROS bag file in the bags array.""" sim_evals = list() for bag in bags: print '\tOPENING BAG: ', bag sim_evals.append(Evaluation(bag, output_dir)) return sim_evals
def test_store_kpis(self): self.assertIn('recording.bag', os.listdir(ROOT_PATH),'recording.bag cannot be found') sim_eval = Evaluation(ROSBAG, RESULTS_DIR) sim_eval.compute_kpis() sim_eval.save_kpis() self.assertIn('computed_kpis.yaml', os.listdir(RESULTS_DIR), 'KPIs were not stored in file computed_kpis.yaml') self.assertIn('kpi_labels.yaml', os.listdir(RESULTS_DIR), 'KPIs labels were not stored in file kpis_labels.yaml')
def test_store_kpis(self): runner = SimulationRunner(PARAMS, TASK, RESULTS_DIR, True) runner.run(PARAMS) self.assertIn('recording.bag', os.listdir(runner.current_sim_results_dir),'recording.bag cannot be found') sim_eval = Evaluation(runner.recording_filename, runner.current_sim_results_dir) sim_eval.compute_kpis() sim_eval.save_kpis() self.assertIn('computed_kpis.yaml', os.listdir(runner.current_sim_results_dir), 'KPIs were not stored in file computed_kpis.yaml') self.assertIn('kpi_labels.yaml', os.listdir(runner.current_sim_results_dir), 'KPIs labels were not stored in file kpis_labels.yaml')
def test_store_images(self): self.assertIn('recording.bag', os.listdir(ROOT_PATH), 'recording.bag cannot be found') sim_eval = Evaluation(ROSBAG, RESULTS_DIR) sim_eval.compute_kpis() sim_eval.save_evaluation() pdf_files = list() for f in os.listdir(RESULTS_DIR): if '.pdf' in f: pdf_files.append(f)
def test_store_images(self): runner = SimulationRunner(PARAMS, TASK, RESULTS_DIR, True) runner.run(PARAMS) self.assertIn('recording.bag', os.listdir(runner.current_sim_results_dir),'recording.bag cannot be found') sim_eval = Evaluation(runner.recording_filename, runner.current_sim_results_dir) sim_eval.compute_kpis() sim_eval.save_evaluation() pdf_files = list() for f in os.listdir(runner.current_sim_results_dir): if '.pdf' in f: pdf_files.append(f) self.assertGreater(len(pdf_files), 0, 'PDF files were not generated')
task = opt_config['task'] results_dir = opt_config['output_dir'] record_all = False params = parse_input(args, opt_config['input_map']) if 'store_all_results' in opt_config: record_all = opt_config['store_all_results'] try: runner = SimulationRunner(params, task, results_dir, record_all) runner.run(params) sim_eval = Evaluation(runner.recording_filename, runner.current_sim_results_dir) output_path = deepcopy(runner.current_sim_results_dir) sim_eval.compute_kpis() if 'store_kpis_only' in opt_config: if opt_config['store_kpis_only']: sim_eval.save_kpis() else: sim_eval.save_evaluation() else: sim_eval.save_kpis() cost = 0.0 for tag in opt_config['cost_fcn']: cost += sim_eval.get_kpi(tag) * opt_config['cost_fcn'][tag]
def plot_comparison_pose_error(output_dir, bags, labels, title, filename): """Generate comparative plots for the ROS bags in the bags array regarding the position and heading errors.""" assert len(labels) == len(bags), 'Number of labels and bags is different' fig = plt.figure(figsize=(PLOT_CONFIGS['plot']['figsize'][0], PLOT_CONFIGS['plot']['figsize'][1])) ax = fig.gca() min_t = None max_t = None min_pos = 0.0 max_pos = 0.0 for i in range(len(bags)): sim_eval = Evaluation(bags[i], output_dir) t = sim_eval.get_error_time() if min_t is None: min_t = np.min(t) max_t = np.max(t) else: min_t = np.min([np.min(t), min_t]) max_t = np.min([np.max(t), max_t]) error = sim_eval.get_error_from_data('position') min_pos = np.min([np.min(error), min_pos]) max_pos = np.max([np.max(error), max_pos]) ax.plot(sim_eval.get_error_time(), sim_eval.get_error_from_data('position'), linewidth=PLOT_CONFIGS['plot']['linewidth'], label=labels[i], zorder=len(bags) - i) fig.canvas.draw() del sim_eval sim_eval = Evaluation(bags[0], output_dir) plot_disturbance_areas(fig, ax, sim_eval, min_pos, max_pos) del sim_eval ax.set_xlabel('Time [s]', fontsize=PLOT_CONFIGS['plot']['label_fontsize']) ax.set_ylabel('Position error [m]', fontsize=PLOT_CONFIGS['plot']['label_fontsize']) ax.legend(fancybox=True, framealpha=0.5, loc='upper left', fontsize=PLOT_CONFIGS['plot']['legend']['fontsize']) ax.grid(True) ax.tick_params(axis='both', labelsize=PLOT_CONFIGS['plot']['tick_fontsize']) ax.set_xlim(min_t, max_t) ax.set_ylim(min_pos, max_pos) plt.tight_layout() plt.savefig(os.path.join(output_dir, 'position_' + filename)) plt.close(fig) # Plotting heading error fig = plt.figure(figsize=(PLOT_CONFIGS['plot']['figsize'][0], PLOT_CONFIGS['plot']['figsize'][1])) ax = fig.gca() min_t = None max_t = None min_yaw = 0.0 max_yaw = 0.0 for i in range(len(bags)): sim_eval = Evaluation(bags[i], output_dir) t = sim_eval.get_error_time() if min_t is None: min_t = np.min(t) max_t = np.max(t) else: min_t = np.min([np.min(t), min_t]) max_t = np.min([np.max(t), max_t]) error = sim_eval.get_error_set_data('yaw') min_yaw = np.min([np.min(error), min_yaw]) max_yaw = np.max([np.max(error), max_yaw]) ax.plot(sim_eval.get_error_time(), sim_eval.get_error_set_data('yaw'), linewidth=PLOT_CONFIGS['plot']['linewidth'], label=labels[i], zorder=len(bags) - i) fig.canvas.draw() del sim_eval sim_eval = Evaluation(bags[0], output_dir) plot_disturbance_areas(fig, ax, sim_eval, min_yaw, max_yaw) del sim_eval ax.set_xlim(min_t, max_t) ax.set_ylim(min_pos, max_pos) ax.set_xlabel('Time [s]', fontsize=PLOT_CONFIGS['plot']['label_fontsize']) ax.set_ylabel('Heading error [rad]', fontsize=PLOT_CONFIGS['plot']['label_fontsize']) ax.legend(fancybox=True, framealpha=0.5, loc='upper left', fontsize=PLOT_CONFIGS['plot']['legend']['fontsize']) ax.grid(True) ax.tick_params(axis='both', labelsize=PLOT_CONFIGS['plot']['tick_fontsize']) ax.set_xlim(min_t, max_t) ax.set_ylim(min_yaw, max_yaw) plt.tight_layout() plt.savefig(os.path.join(output_dir, 'heading_' + filename)) plt.close(fig)
def plot_paths(output_dir, bags, labels, title, filename): """Generate path plots for the ROS bags provided""" assert len(labels) == len(bags), 'Number of labels and bags is different' fig = plt.figure(figsize=(PLOT_CONFIGS['paths']['figsize'][0], PLOT_CONFIGS['paths']['figsize'][1])) ax = fig.gca(projection='3d') target_path = False min_z = None max_z = None for i in range(len(bags)): sim_eval = Evaluation(bags[i], output_dir) if not target_path: traj = sim_eval.get_trajectory_coord('desired') ax.plot(traj[0], traj[1], traj[2], 'g--', label='Reference path', linewidth=PLOT_CONFIGS['paths']['linewidth']) fig.canvas.draw() target_path = True traj = sim_eval.get_trajectory_coord('actual') ax.plot(traj[0], traj[1], traj[2], label=labels[i], linewidth=PLOT_CONFIGS['paths']['linewidth']) if min_z is None: min_z = np.min(traj[2]) max_z = np.max(traj[2]) else: min_z = min(np.min(traj[2]), min_z) max_z = max(np.max(traj[2]), max_z) fig.canvas.draw() ax.set_xlabel('X [m]', fontsize=PLOT_CONFIGS['paths']['label_fontsize']) ax.set_ylabel('Y [m]', fontsize=PLOT_CONFIGS['paths']['label_fontsize']) ax.set_zlabel('Z [m]', fontsize=PLOT_CONFIGS['paths']['label_fontsize']) ax.tick_params(axis='x', labelsize=PLOT_CONFIGS['paths']['tick_fontsize'], pad=15) ax.tick_params(axis='y', labelsize=PLOT_CONFIGS['paths']['tick_fontsize'], pad=15) ax.tick_params(axis='z', labelsize=PLOT_CONFIGS['paths']['tick_fontsize'], pad=15) ax.xaxis.labelpad = 30 ax.yaxis.labelpad = 30 ax.zaxis.labelpad = 30 ax.set_zlim(min_z - 1, max_z + 1) ax.set_title(title, fontsize=PLOT_CONFIGS['paths']['title_fontsize']) ax.legend(loc=PLOT_CONFIGS['paths']['legend']['loc'], fancybox=True, framealpha=0.8, fontsize=PLOT_CONFIGS['paths']['legend']['fontsize']) ax.grid(True) fig.tight_layout() fig.savefig(os.path.join(output_dir, filename)) plt.close(fig)
logger.info('Processing directory = ' + d) logger.info('Directory label = ' + label) tasks[d] = list() kpis[d] = list() tasks_cost_fcn[d] = list() for item in sorted(os.listdir(d)): p = os.path.join(d, item) if os.path.isdir(p): if 'recording.bag' not in os.listdir(p): continue cur_kpi = None try: if 'computed_kpis.yaml' not in os.listdir(p): logger.info('KPIs are not yet available') logger.info('Computing KPIs') sim_eval = Evaluation(os.path.join(p, 'recording.bag'), p) sim_eval.save_kpis() del sim_eval for f in os.listdir(p): if 'computed_kpis' in f: kpi_filename = os.path.join(p, f) with open(kpi_filename, 'r') as k_file: cur_kpi = yaml.load(k_file) if cost_fcn is not None: tasks_cost_fcn[d].append(0.0) for tag in cost_fcn: tasks_cost_fcn[d][ -1] += cost_fcn[tag] * cur_kpi[tag]
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import argparse import numpy as np import os import yaml import sys from bag_evaluation import Evaluation import roslib import rospy roslib.load_manifest('uuv_control_evaluation') if __name__ == "__main__": parser = argparse.ArgumentParser(description="Analyze bag file") parser.add_argument("bagfile", type=str) parser.add_argument("output_dir", type=str) args = parser.parse_args(rospy.myargv()[1:]) sim_eval = Evaluation(args.bagfile, args.output_dir) sim_eval.compute_kpis() sim_eval.save_evaluation()
tag, value = item.split('=') parsed_params[tag] = float(value[1:-1]) params = parse_input(parsed_params, opt_config['input_map']) task = os.path.join(args.input_dir, opt_config['task']) print idx, sub_result_folder runner = SimulationRunner(params, task, sub_result_folder, True, add_folder_timestamp=False) runner.run(params) sim_eval = Evaluation(runner.recording_filename, runner.current_sim_results_dir) sim_eval.compute_kpis() sim_eval.save_evaluation() if desired is None: desired = sim_eval.get_trajectory_coord('desired') traj.append(sim_eval.get_trajectory_coord('actual')) error_t.append(sim_eval._error_set.get_time()) error_vec.append( KPI.get_error(sim_eval._error_set.get_data('position'))) error_yaw_vec.append(sim_eval._error_set.get_data('yaw')) if t_cur is None: t_cur, vec_cur = sim_eval._bag.get_current_vel()