def main(args): config_path = args.config_path if config_path is None: config_path = utils.select_run() if config_path is None: return print(config_path) cfg = utils.load_config(config_path) # Create env if args.real: real_robot_indices = list(map(int, args.real_robot_indices.split(','))) real_cube_indices = list(map(int, args.real_cube_indices.split(','))) env = utils.get_env_from_cfg(cfg, real=True, real_robot_indices=real_robot_indices, real_cube_indices=real_cube_indices) else: env = utils.get_env_from_cfg(cfg, show_gui=True) # Create policy policy = utils.get_policy_from_cfg(cfg, env.get_robot_group_types()) # Run policy state = env.reset() try: while True: action = policy.step(state) state, _, done, _ = env.step(action) if done: state = env.reset() finally: env.close()
def main(args): config_path = args.config_path if config_path is None: config_path = utils.select_run() if config_path is None: return cfg = utils.load_config(config_path) eval_dir = utils.get_eval_dir() eval_path = eval_dir / '{}.npy'.format(cfg.run_name) data = run_eval(cfg) if not eval_dir.exists(): eval_dir.mkdir(parents=True, exist_ok=True) np.save(eval_path, np.array(data, dtype=object)) print(eval_path)
def main(args): config_path = args.config_path if config_path is None: config_path = utils.select_run() if config_path is None: print('Please provide a config path') return cfg = utils.read_config(config_path) env = utils.get_env_from_cfg(cfg, use_gui=True) policy = utils.get_policy_from_cfg(cfg, env.get_action_space()) state = env.reset() while True: action, _ = policy.step(state) state, _, done, _ = env.step(action) if done: state = env.reset()
def main(args): config_path = args.config_path if config_path is None: config_path = utils.select_run() if config_path is None: print('Please provide a config path') return cfg = utils.read_config(config_path) eval_dir = Path(cfg.logs_dir).parent / 'eval' if not eval_dir.exists(): eval_dir.mkdir(parents=True, exist_ok=True) eval_path = eval_dir / '{}.npy'.format(cfg.run_name) data = _run_eval(cfg) np.save(eval_path, data) print(eval_path)
def main(args): config_path = args.config_path if config_path is None: config_path = utils.select_run() if config_path is None: return print(config_path) cfg = utils.load_config(config_path) # Create env env = utils.get_env_from_cfg(cfg, show_gui=True) tf_env = components.get_tf_py_env(env, cfg.num_input_channels) # Load policies policies = components.load_policies(cfg) # Run policies time_step = tf_env.reset() while True: robot_group_index = tf_env.pyenv.envs[0].current_robot_group_index() action_step = policies[robot_group_index].action(time_step) time_step = tf_env.step(action_step.action)
] torch.save(checkpoint, str(checkpoint_path)) # Save updated config file cfg.policy_path = str(policy_path) cfg.checkpoint_path = str(checkpoint_path) utils.save_config(log_dir / 'config.yml', cfg) # Remove old checkpoint checkpoint_paths = list( checkpoint_dir.glob('checkpoint_*.pth.tar')) checkpoint_paths.remove(checkpoint_path) for old_checkpoint_path in checkpoint_paths: old_checkpoint_path.unlink() env.close() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--config-path') config_path = parser.parse_args().config_path if config_path is None: if sys.platform == 'darwin': config_path = 'config/local/lifting_4-small_empty-local.yml' else: config_path = utils.select_run() if config_path is not None: config_path = utils.setup_run(config_path) main(utils.load_config(config_path))
def main(args): # Connect to aruco server for pose estimates try: conn = Client(('localhost', 6000), authkey=b'secret password') except ConnectionRefusedError: print('Could not connect to aruco server for pose estimates') return # Create action executor for the physical robot action_executor = vector_action_executor.VectorActionExecutor( args.robot_index) # Create env config_path = args.config_path if config_path is None: config_path = utils.select_run() if config_path is None: print('Please provide a config path') return cfg = utils.read_config(config_path) kwargs = {'num_cubes': args.num_cubes} if args.debug: kwargs['use_gui'] = True cube_indices = list(range(args.num_cubes)) env = utils.get_env_from_cfg(cfg, physical_env=True, robot_index=action_executor.robot_index, cube_indices=cube_indices, **kwargs) env.reset() # Create policy policy = utils.get_policy_from_cfg(cfg, env.get_action_space()) # Debug visualization if args.debug: cv2.namedWindow('out', cv2.WINDOW_NORMAL) #cv2.resizeWindow('out', 960, 480) try: while True: # Get new pose estimates poses = None while conn.poll(): # ensure up-to-date data poses = conn.recv() if poses is None: continue # Update poses in the simulation env.update_poses(poses) # Get new action state = env.get_state() if action_executor.is_action_completed() and args.debug: action, info = policy.step(state, debug=True) # Visualize assert not cfg.use_steering_commands output = info['output'].cpu().numpy() cv2.imshow( 'out', utils.get_state_and_output_visualization( state, output)[:, :, ::-1]) cv2.waitKey(1) else: action, _ = policy.step(state) # Run selected action through simulation try_action_result = env.try_action(action) if action_executor.is_action_completed(): # Update action executor action_executor.update_try_action_result(try_action_result) # Run action executor action_executor.step(poses) finally: action_executor.disconnect()