def main(): import logging logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('--outdir', type=str, default='results', help='Directory path to save output files.' ' If it does not exist, it will be created.') parser.add_argument('--env', type=str, default='Humanoid-v2') parser.add_argument('--seed', type=int, default=0, help='Random seed [0, 2 ** 32)') parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--final-exploration-steps', type=int, default=10**6) parser.add_argument('--actor-lr', type=float, default=1e-4) parser.add_argument('--critic-lr', type=float, default=1e-3) parser.add_argument('--load', type=str, default='') parser.add_argument('--steps', type=int, default=10**7) parser.add_argument('--n-hidden-channels', type=int, default=300) parser.add_argument('--n-hidden-layers', type=int, default=3) parser.add_argument('--replay-start-size', type=int, default=5000) parser.add_argument('--n-update-times', type=int, default=1) parser.add_argument('--target-update-interval', type=int, default=1) parser.add_argument('--target-update-method', type=str, default='soft', choices=['hard', 'soft']) parser.add_argument('--soft-update-tau', type=float, default=1e-2) parser.add_argument('--update-interval', type=int, default=4) parser.add_argument('--eval-n-runs', type=int, default=100) parser.add_argument('--eval-interval', type=int, default=10**5) parser.add_argument('--gamma', type=float, default=0.995) parser.add_argument('--minibatch-size', type=int, default=200) parser.add_argument('--render', action='store_true') parser.add_argument('--demo', action='store_true') parser.add_argument('--use-bn', action='store_true', default=False) parser.add_argument('--monitor', action='store_true') parser.add_argument('--reward-scale-factor', type=float, default=1e-2) args = parser.parse_args() args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print('Output files are saved in {}'.format(args.outdir)) # Set a random seed used in ChainerRL misc.set_random_seed(args.seed, gpus=(args.gpu, )) def clip_action_filter(a): return np.clip(a, action_space.low, action_space.high) def reward_filter(r): return r * args.reward_scale_factor def make_env(test): env = gym.make(args.env) # Use different random seeds for train and test envs env_seed = 2**32 - 1 - args.seed if test else args.seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = chainerrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = chainerrl.wrappers.Monitor(env, args.outdir) if isinstance(env.action_space, spaces.Box): misc.env_modifiers.make_action_filtered(env, clip_action_filter) if not test: # Scale rewards (and thus returns) to a reasonable range so that # training is easier env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor) if args.render and not test: env = chainerrl.wrappers.Render(env) return env env = make_env(test=False) timestep_limit = env.spec.tags.get( 'wrapper_config.TimeLimit.max_episode_steps') obs_size = np.asarray(env.observation_space.shape).prod() action_space = env.action_space action_size = np.asarray(action_space.shape).prod() if args.use_bn: q_func = q_functions.FCBNLateActionSAQFunction( obs_size, action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, normalize_input=True) pi = policy.FCBNDeterministicPolicy( obs_size, action_size=action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, min_action=action_space.low, max_action=action_space.high, bound_action=True, normalize_input=True) else: q_func = q_functions.FCSAQFunction( obs_size, action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers) pi = policy.FCDeterministicPolicy( obs_size, action_size=action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, min_action=action_space.low, max_action=action_space.high, bound_action=True) model = DDPGModel(q_func=q_func, policy=pi) opt_a = optimizers.Adam(alpha=args.actor_lr) opt_c = optimizers.Adam(alpha=args.critic_lr) opt_a.setup(model['policy']) opt_c.setup(model['q_function']) opt_a.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_a') opt_c.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_c') rbuf = replay_buffer.ReplayBuffer(5 * 10**5) def random_action(): a = action_space.sample() if isinstance(a, np.ndarray): a = a.astype(np.float32) return a ou_sigma = (action_space.high - action_space.low) * 0.2 explorer = explorers.AdditiveOU(sigma=ou_sigma) agent = DDPG(model, opt_a, opt_c, rbuf, gamma=args.gamma, explorer=explorer, replay_start_size=args.replay_start_size, target_update_method=args.target_update_method, target_update_interval=args.target_update_interval, update_interval=args.update_interval, soft_update_tau=args.soft_update_tau, n_times_update=args.n_update_times, gpu=args.gpu, minibatch_size=args.minibatch_size) if len(args.load) > 0: agent.load(args.load) eval_env = make_env(test=True) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit) print('n_runs: {} mean: {} median: {} stdev {}'.format( args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev'])) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_env=eval_env, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, train_max_episode_len=timestep_limit)
def main(): import logging logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('--outdir', type=str, default='out') parser.add_argument('--env', type=str, default='Humanoid-v1') parser.add_argument('--seed', type=int, default=None) parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--final-exploration-steps', type=int, default=10**6) parser.add_argument('--actor-lr', type=float, default=1e-4) parser.add_argument('--critic-lr', type=float, default=1e-3) parser.add_argument('--load', type=str, default='') parser.add_argument('--steps', type=int, default=10**7) parser.add_argument('--n-hidden-channels', type=int, default=300) parser.add_argument('--n-hidden-layers', type=int, default=3) parser.add_argument('--replay-start-size', type=int, default=5000) parser.add_argument('--n-update-times', type=int, default=1) parser.add_argument('--target-update-frequency', type=int, default=1) parser.add_argument('--target-update-method', type=str, default='soft', choices=['hard', 'soft']) parser.add_argument('--soft-update-tau', type=float, default=1e-2) parser.add_argument('--update-frequency', type=int, default=4) parser.add_argument('--eval-n-runs', type=int, default=100) parser.add_argument('--eval-frequency', type=int, default=10**5) parser.add_argument('--gamma', type=float, default=0.995) parser.add_argument('--minibatch-size', type=int, default=200) parser.add_argument('--render', action='store_true') parser.add_argument('--demo', action='store_true') parser.add_argument('--use-bn', action='store_true', default=False) parser.add_argument('--monitor', action='store_true') parser.add_argument('--reward-scale-factor', type=float, default=1e-2) args = parser.parse_args() args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print('Output files are saved in {}'.format(args.outdir)) if args.seed is not None: misc.set_random_seed(args.seed) def clip_action_filter(a): return np.clip(a, action_space.low, action_space.high) def reward_filter(r): return r * args.reward_scale_factor def make_env(): env = gym.make(args.env) if args.monitor: env = gym.wrappers.Monitor(env, args.outdir) if isinstance(env.action_space, spaces.Box): misc.env_modifiers.make_action_filtered(env, clip_action_filter) misc.env_modifiers.make_reward_filtered(env, reward_filter) if args.render: misc.env_modifiers.make_rendered(env) def __exit__(self, *args): pass env.__exit__ = __exit__ return env env = make_env() timestep_limit = env.spec.tags.get( 'wrapper_config.TimeLimit.max_episode_steps') obs_size = np.asarray(env.observation_space.shape).prod() action_space = env.action_space action_size = np.asarray(action_space.shape).prod() if args.use_bn: q_func = q_functions.FCBNLateActionSAQFunction( obs_size, action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, normalize_input=True) pi = policy.FCBNDeterministicPolicy( obs_size, action_size=action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, min_action=action_space.low, max_action=action_space.high, bound_action=True, normalize_input=True) else: q_func = q_functions.FCSAQFunction( obs_size, action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers) pi = policy.FCDeterministicPolicy( obs_size, action_size=action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, min_action=action_space.low, max_action=action_space.high, bound_action=True) model = DDPGModel(q_func=q_func, policy=pi) opt_a = optimizers.Adam(alpha=args.actor_lr) opt_c = optimizers.Adam(alpha=args.critic_lr) opt_a.setup(model['policy']) opt_c.setup(model['q_function']) opt_a.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_a') opt_c.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_c') rbuf = replay_buffer.ReplayBuffer(5 * 10**5) def phi(obs): return obs.astype(np.float32) def random_action(): a = action_space.sample() if isinstance(a, np.ndarray): a = a.astype(np.float32) return a ou_sigma = (action_space.high - action_space.low) * 0.2 explorer = explorers.AdditiveOU(sigma=ou_sigma) agent = DDPG(model, opt_a, opt_c, rbuf, gamma=args.gamma, explorer=explorer, replay_start_size=args.replay_start_size, target_update_method=args.target_update_method, target_update_frequency=args.target_update_frequency, update_frequency=args.update_frequency, soft_update_tau=args.soft_update_tau, n_times_update=args.n_update_times, phi=phi, gpu=args.gpu, minibatch_size=args.minibatch_size) agent.logger.setLevel(logging.DEBUG) if len(args.load) > 0: agent.load(args.load) if args.demo: mean, median, stdev = experiments.eval_performance( env=env, agent=agent, n_runs=args.eval_n_runs, max_episode_len=timestep_limit) print('n_runs: {} mean: {} median: {} stdev'.format( args.eval_n_runs, mean, median, stdev)) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_runs=args.eval_n_runs, eval_frequency=args.eval_frequency, outdir=args.outdir, max_episode_len=timestep_limit)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--outdir', type=str, default='results', help='Directory path to save output files.' ' If it does not exist, it will be created.') parser.add_argument('--env', type=str, default='Hopper-v2', help='OpenAI Gym MuJoCo env to perform algorithm on.') parser.add_argument('--seed', type=int, default=0, help='Random seed [0, 2 ** 32)') parser.add_argument('--gpu', type=int, default=0, help='GPU to use, set to -1 if no GPU.') parser.add_argument('--load', type=str, default='', help='Directory to load agent from.') parser.add_argument('--steps', type=int, default=10**6, help='Total number of timesteps to train the agent.') parser.add_argument('--eval-n-runs', type=int, default=10, help='Number of episodes run for each evaluation.') parser.add_argument('--eval-interval', type=int, default=5000, help='Interval in timesteps between evaluations.') parser.add_argument('--replay-start-size', type=int, default=10000, help='Minimum replay buffer size before ' + 'performing gradient updates.') parser.add_argument('--batch-size', type=int, default=100, help='Minibatch size') parser.add_argument('--render', action='store_true', help='Render env states in a GUI window.') parser.add_argument('--demo', action='store_true', help='Just run evaluation, not training.') parser.add_argument('--monitor', action='store_true', help='Wrap env with gym.wrappers.Monitor.') parser.add_argument('--logger-level', type=int, default=logging.INFO, help='Level of the root logger.') args = parser.parse_args() logging.basicConfig(level=args.logger_level) args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print('Output files are saved in {}'.format(args.outdir)) # Set a random seed used in ChainerRL misc.set_random_seed(args.seed, gpus=(args.gpu, )) def make_env(test): env = gym.make(args.env) # Unwrap TimiLimit wrapper assert isinstance(env, gym.wrappers.TimeLimit) env = env.env # Use different random seeds for train and test envs env_seed = 2**32 - 1 - args.seed if test else args.seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = chainerrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = gym.wrappers.Monitor(env, args.outdir) if args.render and not test: env = chainerrl.wrappers.Render(env) return env env = make_env(test=False) timestep_limit = env.spec.tags.get( 'wrapper_config.TimeLimit.max_episode_steps') obs_space = env.observation_space action_space = env.action_space print('Observation space:', obs_space) print('Action space:', action_space) action_size = action_space.low.size winit = chainer.initializers.LeCunUniform(3**-0.5) q_func = chainer.Sequential( concat_obs_and_action, L.Linear(None, 400, initialW=winit), F.relu, L.Linear(None, 300, initialW=winit), F.relu, L.Linear(None, 1, initialW=winit), ) policy = chainer.Sequential( L.Linear(None, 400, initialW=winit), F.relu, L.Linear(None, 300, initialW=winit), F.relu, L.Linear(None, action_size, initialW=winit), F.tanh, chainerrl.distribution.ContinuousDeterministicDistribution, ) model = DDPGModel(q_func=q_func, policy=policy) # Draw the computational graph and save it in the output directory. fake_obs = chainer.Variable(model.xp.zeros_like(obs_space.low, dtype=np.float32)[None], name='observation') fake_action = chainer.Variable(model.xp.zeros_like(action_space.low, dtype=np.float32)[None], name='action') chainerrl.misc.draw_computational_graph([policy(fake_obs)], os.path.join( args.outdir, 'policy')) chainerrl.misc.draw_computational_graph([q_func(fake_obs, fake_action)], os.path.join( args.outdir, 'q_func')) opt_a = optimizers.Adam() opt_c = optimizers.Adam() opt_a.setup(model['policy']) opt_c.setup(model['q_function']) rbuf = replay_buffer.ReplayBuffer(10**6) explorer = explorers.AdditiveGaussian(scale=0.1, low=action_space.low, high=action_space.high) def burnin_action_func(): """Select random actions until model is updated one or more times.""" return np.random.uniform(action_space.low, action_space.high).astype(np.float32) # Hyperparameters in http://arxiv.org/abs/1802.09477 agent = DDPG( model, opt_a, opt_c, rbuf, gamma=0.99, explorer=explorer, replay_start_size=args.replay_start_size, target_update_method='soft', target_update_interval=1, update_interval=1, soft_update_tau=5e-3, n_times_update=1, gpu=args.gpu, minibatch_size=args.batch_size, burnin_action_func=burnin_action_func, ) if len(args.load) > 0: agent.load(args.load) eval_env = make_env(test=True) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit) print('n_runs: {} mean: {} median: {} stdev {}'.format( args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev'])) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_env=eval_env, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, train_max_episode_len=timestep_limit)
opt_critic, rbuf, gamma=args.gamma, explorer=explorer, replay_start_size=5000, target_update_method='soft', target_update_interval=1, update_interval=4, soft_update_tau=1e-2, n_times_update=1, phi=phi, minibatch_size=128 #args.minibatch_size ) # Cargar el modelo guardado agent.load("DDPG_last_model_rew8k") for i in range(args.test_episodes): obs = env.reset() done = False reward_Alan = 0 t = 0 while not done and t < 200: #env.render() #agent.load("DDPG_best_model") action = agent.act(obs) obs, r, done, x = env.step(action) reward_Alan += r t += 1 print('Test episode:', i, 'Reward obtained: ', reward_Alan) #agent.stop_episode()
def main(): import logging logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('--outdir', type=str, default='results', help='Directory path to save output files.' ' If it does not exist, it will be created.') parser.add_argument('--env', type=str, default='FetchPickAndPlace-v1') parser.add_argument('--seed', type=int, default=0, help='Random seed [0, 2 ** 32)') parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--final-exploration-steps', type=int, default=10 ** 6) parser.add_argument('--actor-lr', type=float, default=1e-3) parser.add_argument('--critic-lr', type=float, default=1e-3) parser.add_argument('--load', type=str, default='') parser.add_argument('--steps', type=int, default=200 * 50 * 16 * 50) parser.add_argument('--n-hidden-channels', type=int, default=64) parser.add_argument('--n-hidden-layers', type=int, default=3) parser.add_argument('--replay-start-size', type=int, default=10000) parser.add_argument('--n-update-times', type=int, default=40) parser.add_argument('--target-update-interval', type=int, default=16 * 50) parser.add_argument('--target-update-method', type=str, default='soft', choices=['hard', 'soft']) parser.add_argument('--soft-update-tau', type=float, default=1 - 0.95) parser.add_argument('--update-interval', type=int, default=16 * 50) parser.add_argument('--eval-n-runs', type=int, default=30) parser.add_argument('--eval-interval', type=int, default=50 * 16 * 50) parser.add_argument('--gamma', type=float, default=0.98) parser.add_argument('--minibatch-size', type=int, default=128) parser.add_argument('--render', action='store_true') parser.add_argument('--demo', action='store_true') parser.add_argument('--monitor', action='store_true') parser.add_argument('--epsilon', type=float, default=0.05) parser.add_argument('--noise-std', type=float, default=0.05) parser.add_argument('--clip-threshold', type=float, default=5.0) parser.add_argument('--num-envs', type=int, default=1) args = parser.parse_args() args.outdir = experiments.prepare_output_dir( args, args.outdir, argv=sys.argv) print('Output files are saved in {}'.format(args.outdir)) # Set a random seed used in ChainerRL misc.set_random_seed(args.seed, gpus=(args.gpu,)) def clip_action_filter(a): return np.clip(a, action_space.low, action_space.high) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2 ** 32 def make_env(idx, test): env = gym.make(args.env) # Use different random seeds for train and test envs process_seed = int(process_seeds[idx]) env_seed = 2 ** 32 - 1 - process_seed if test else process_seed env.seed(env_seed) if args.monitor: env = gym.wrappers.Monitor(env, args.outdir) if isinstance(env.action_space, spaces.Box): misc.env_modifiers.make_action_filtered(env, clip_action_filter) if args.render and not test: env = chainerrl.wrappers.Render(env) if test: env = HEREnvWrapper(env, args.outdir) return env def make_batch_env(test): return chainerrl.envs.MultiprocessVectorEnv( [(lambda: make_env(idx, test)) for idx, env in enumerate(range(args.num_envs))]) sample_env = make_env(0, test=False) def reward_function(state, action, goal): return sample_env.compute_reward(achieved_goal=state['achieved_goal'], desired_goal=goal, info=None) timestep_limit = sample_env.spec.tags.get( 'wrapper_config.TimeLimit.max_episode_steps') space_dict = sample_env.observation_space.spaces observation_space = space_dict['observation'] goal_space = space_dict['desired_goal'] obs_size = np.asarray(observation_space.shape).prod() goal_size = np.asarray(goal_space.shape).prod() action_space = sample_env.action_space action_size = np.asarray(action_space.shape).prod() q_func = q_functions.FCSAQFunction( obs_size + goal_size, action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers) pi = policy.FCDeterministicPolicy( obs_size + goal_size, action_size=action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, min_action=action_space.low, max_action=action_space.high, bound_action=True) model = DDPGModel(q_func=q_func, policy=pi) opt_a = optimizers.Adam(alpha=args.actor_lr) opt_c = optimizers.Adam(alpha=args.critic_lr) opt_a.setup(model['policy']) opt_c.setup(model['q_function']) opt_a.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_a') opt_c.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_c') rbuf = replay_buffer.HindsightReplayBuffer(reward_function, 10 ** 6, future_k=4) def phi(dict_state): return np.concatenate( (dict_state['observation'].astype(np.float32, copy=False), dict_state['desired_goal'].astype(np.float32, copy=False)), 0) # Normalize observations based on their empirical mean and variance obs_normalizer = chainerrl.links.EmpiricalNormalization( obs_size + goal_size, clip_threshold=args.clip_threshold) explorer = HERExplorer(args.noise_std, args.epsilon, action_space) agent = DDPG(model, opt_a, opt_c, rbuf, obs_normalizer=obs_normalizer, gamma=args.gamma, explorer=explorer, replay_start_size=args.replay_start_size, phi=phi, target_update_method=args.target_update_method, target_update_interval=args.target_update_interval, update_interval=args.update_interval, soft_update_tau=args.soft_update_tau, n_times_update=args.n_update_times, gpu=args.gpu, minibatch_size=args.minibatch_size, clip_critic_tgt=(-1.0/(1.0-args.gamma), 0.0)) if len(args.load) > 0: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=make_batch_env(test=True), agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit) print('n_runs: {} mean: {} median: {} stdev {}'.format( args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev'])) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(test=False), steps=args.steps, eval_env=make_batch_env(test=True), eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, max_episode_len=timestep_limit)