def replay_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim): loadresultfile = h5py.File(args.replay.loadresultfile, 'r') loadresult_items = eval_util.get_indexed_items(loadresultfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end) num_successes = 0 num_total = 0 for i_task, task_info in loadresult_items: redprint("task %s" % i_task) for i_step in range(len(task_info)): redprint("task %s step %i" % (i_task, i_step)) replay_results = eval_util.load_task_results_step(args.replay.loadresultfile, i_task, i_step) sim_state = replay_results['sim_state'] if i_step > 0: # sanity check for reproducibility sim_util.reset_arms_to_side(sim) if sim.simulation_state_equal(sim_state, sim.get_state()): yellowprint("Reproducible results OK") else: yellowprint("The replayed simulation state doesn't match the one from the result file") sim.set_state(sim_state) if args.replay.simulate_traj_steps is not None and i_step not in args.replay.simulate_traj_steps: continue if i_step in args.replay.compute_traj_steps: # compute the trajectory in this step best_root_action = replay_results['best_action'] scene_state = replay_results['scene_state'] # plot cloud of the test scene handles = [] if args.plotting: handles.append(sim.env.plot3(scene_state.cloud[:,:3], 2, scene_state.color if scene_state.color is not None else (0,0,1))) sim.viewer.Step() test_aug_traj = reg_and_traj_transferer.transfer(GlobalVars.demos[best_root_action], scene_state, plotting=args.plotting) else: test_aug_traj = replay_results['aug_traj'] feasible, misgrasp = lfd_env.execute_augmented_trajectory(test_aug_traj, step_viewer=args.animation, interactive=args.interactive, check_feasible=args.eval.check_feasible) if replay_results['knot']: num_successes += 1 num_total += 1 redprint('REPLAY Successes / Total: ' + str(num_successes) + '/' + str(num_total))
def estimate_performance(fname): results_file = h5py.File(fname, 'r') loadresult_items = eval_util.get_indexed_items(results_file) num_knots = 0 num_misgrasps = 0 num_infeasible = 0 action_time = 0 exec_time = 0 for i_task, task_info in loadresult_items: knot_exists = False infeasible = False misgrasp = False for i_step in range(len(task_info)): results = eval_util.load_task_results_step(fname, i_task, i_step) eval_stats = results['eval_stats'] misgrasp |= eval_stats.misgrasp infeasible |= not eval_stats.feasible action_time += eval_stats.action_elapsed_time exec_time += eval_stats.exec_elapsed_time if results['knot']: knot_exists = True elif i_step == len(task_info)-1: print i_task if infeasible: num_infeasible += 1 if misgrasp: num_misgrasps += 1 if knot_exists: num_knots += 1 num_tasks = len(loadresult_items) print "# Misgrasps:", num_misgrasps print "# Infeasible:", num_infeasible print "Time taken to choose demo:", action_time, "seconds" print "Time taken to warp and execute demo:", exec_time, "seconds" return num_knots, num_tasks
def estimate_performance(fname): results_file = h5py.File(fname, 'r') loadresult_items = eval_util.get_indexed_items(results_file) num_knots = 0 num_misgrasps = 0 num_infeasible = 0 action_time = 0 exec_time = 0 for i_task, task_info in loadresult_items: knot_exists = False infeasible = False misgrasp = False for i_step in range(len(task_info)): results = eval_util.load_task_results_step(fname, i_task, i_step) eval_stats = results['eval_stats'] misgrasp |= eval_stats.misgrasp infeasible |= not eval_stats.feasible action_time += eval_stats.action_elapsed_time exec_time += eval_stats.exec_elapsed_time if results['knot']: knot_exists = True elif i_step == len(task_info) - 1: print i_task if infeasible: num_infeasible += 1 if misgrasp: num_misgrasps += 1 if knot_exists: num_knots += 1 num_tasks = len(loadresult_items) print "# Misgrasps:", num_misgrasps print "# Infeasible:", num_infeasible print "Time taken to choose demo:", action_time, "seconds" print "Time taken to warp and execute demo:", exec_time, "seconds" return num_knots, num_tasks