def setup_env(self): """ :params ratio: should be a float between 0 and 1 """ env_id = self.args.env if self.env_kwargs: env_id = auto_tune_env(env_id, self.env_kwargs) env = GymEnv( env_id, log_dir=os.path.join(self.args.load_path, "movie") if self.args.render else None, record_video=self.args.record, ) env.env.seed(self.args.seed) if self.args.c2d: env = C2DEnv(env) if self.env is None: self.env = NormalizedEnv(env) if self.args.mirror is True: if hasattr(env.unwrapped, "mirror_sizes"): self.env.stats = SymmetricStats( *env.unwrapped.mirror_sizes[:3], max_obs=4000) else: self.args.mirror = False elif self.args.mirror == "new": self.env = SymEnv(self.env) else: # don't want to override the normalization self.env.replace_wrapped_env(env)
def test_continuous2discrete(): continuous_env = GymEnv('Pendulum-v0', record_video=False) discrete_env = C2DEnv(continuous_env, n_bins=10) assert np.all(discrete_env.action_space.nvec == np.array([10])) discrete_env.reset() out = discrete_env.step([3, 10])
def setUpClass(cls): env = GymEnv('Pendulum-v0') random_pol = RandomPol(cls.env.observation_space, cls.env.action_space) sampler = EpiSampler(cls.env, pol, num_parallel=1) epis = sampler.sample(pol, max_steps=32) traj = Traj() traj.add_epis(epis) traj.register_epis() cls.num_step = traj.num_step make_redis('localhost', '6379') cls.r = get_redis() cls.r.set('env', env) cls.r.set('traj', traj) pol_net = PolNet(env.observation_space, env.action_space) gpol = GaussianPol(env.observation_space, env.action_space, pol_net) pol_net = PolNet(env.observation_space, env.action_space, deterministic=True) dpol = DeterministicActionNoisePol( env.observation_space, env.action_space, pol_net) model_net = ModelNet(env.observation_space, env.action_space) mpcpol = MPCPol(env.observation_space, env.action_space, model_net, rew_func) q_net = QNet(env.observation_space, env.action_space) qfunc = DeterministicSAVfunc( env.observation_space, env.action_space, q_net) aqpol = ArgmaxQfPol(env.observation_space, env.action_space, qfunc) v_net = VNet(env.observation_space) vfunc = DeterministicSVfunc(env.observation_space, v_net) cls.r.set('gpol', cloudpickle.dumps(gpol)) cls.r.set('dpol', cloudpickle.dumps(dpol)) cls.r.set('mpcpol', cloudpickle.dumps(mpcpol)) cls.r.set('qfunc', cloudpickle.dumps(qfunc)) cls.r.set('aqpol', cloudpickle.dumps(aqpol)) cls.r.set('vfunc', cloudpickle.dumps(vfunc)) c2d = C2DEnv(env) pol_net = PolNet(c2d.observation_space, c2d.action_space) mcpol = MultiCategoricalPol( env.observation_space, env.action_space, pol_net) cls.r.set('mcpol', cloudpickle.dumps(mcpol))
torch.manual_seed(args.seed) device_name = 'cpu' if args.cuda < 0 or args.rl_type == 'trpo' else "cuda:{}".format( args.cuda) device = torch.device(device_name) set_device(device) score_file = os.path.join(args.log, 'progress.csv') logger.add_tabular_output(score_file) env = GymEnv(args.env_name, log_dir=os.path.join(args.log, 'movie'), record_video=args.record) env.env.seed(args.seed) if args.c2d: env = C2DEnv(env) observation_space = env.observation_space action_space = env.action_space pol_net = PolNet(observation_space, action_space) if isinstance(action_space, gym.spaces.Box): pol = GaussianPol(observation_space, action_space, pol_net, data_parallel=args.data_parallel) elif isinstance(action_space, gym.spaces.Discrete): pol = CategoricalPol(observation_space, action_space, pol_net, data_parallel=args.data_parallel)
def main(args): init_ray(args.num_cpus, args.num_gpus, args.ray_redis_address) if not os.path.exists(args.log): os.makedirs(args.log) if not os.path.exists(os.path.join(args.log, 'models')): os.mkdir(os.path.join(args.log, 'models')) score_file = os.path.join(args.log, 'progress.csv') logger.add_tabular_output(score_file) logger.add_tensorboard_output(args.log) with open(os.path.join(args.log, 'args.json'), 'w') as f: json.dump(vars(args), f) pprint(vars(args)) # when doing the distributed training, disable video recordings env = GymEnv(args.env_name) env.env.seed(args.seed) if args.c2d: env = C2DEnv(env) observation_space = env.observation_space action_space = env.action_space pol_net = PolNet(observation_space, action_space) rnn = False # pol_net = PolNetLSTM(observation_space, action_space) # rnn = True if isinstance(action_space, gym.spaces.Box): pol = GaussianPol(observation_space, action_space, pol_net, rnn=rnn) elif isinstance(action_space, gym.spaces.Discrete): pol = CategoricalPol(observation_space, action_space, pol_net) elif isinstance(action_space, gym.spaces.MultiDiscrete): pol = MultiCategoricalPol(observation_space, action_space, pol_net) else: raise ValueError('Only Box, Discrete, and MultiDiscrete are supported') vf_net = VNet(observation_space) vf = DeterministicSVfunc(observation_space, vf_net) trainer = TrainManager(Trainer, args.num_trainer, args.master_address, args=args, vf=vf, pol=pol) sampler = EpiSampler(env, pol, args.num_parallel, seed=args.seed) total_epi = 0 total_step = 0 max_rew = -1e6 start_time = time.time() while args.max_epis > total_epi: with measure('sample'): sampler.set_pol_state(trainer.get_state("pol")) epis = sampler.sample(max_steps=args.max_steps_per_iter) with measure('train'): result_dict = trainer.train(epis=epis) step = result_dict["traj_num_step"] total_step += step total_epi += result_dict["traj_num_epi"] rewards = [np.sum(epi['rews']) for epi in epis] mean_rew = np.mean(rewards) elapsed_time = time.time() - start_time logger.record_tabular('ElapsedTime', elapsed_time) logger.record_results(args.log, result_dict, score_file, total_epi, step, total_step, rewards, plot_title=args.env_name) with measure('save'): pol_state = trainer.get_state("pol") vf_state = trainer.get_state("vf") optim_pol_state = trainer.get_state("optim_pol") optim_vf_state = trainer.get_state("optim_vf") torch.save(pol_state, os.path.join(args.log, 'models', 'pol_last.pkl')) torch.save(vf_state, os.path.join(args.log, 'models', 'vf_last.pkl')) torch.save(optim_pol_state, os.path.join(args.log, 'models', 'optim_pol_last.pkl')) torch.save(optim_vf_state, os.path.join(args.log, 'models', 'optim_vf_last.pkl')) if mean_rew > max_rew: torch.save(pol_state, os.path.join(args.log, 'models', 'pol_max.pkl')) torch.save(vf_state, os.path.join(args.log, 'models', 'vf_max.pkl')) torch.save( optim_pol_state, os.path.join(args.log, 'models', 'optim_pol_max.pkl')) torch.save( optim_vf_state, os.path.join(args.log, 'models', 'optim_vf_max.pkl')) max_rew = mean_rew del sampler del trainer
np.random.seed(args.seed) torch.manual_seed(args.seed) device_name = 'cpu' if args.cuda < 0 else "cuda:{}".format(args.cuda) device = torch.device(device_name) set_device(device) score_file = os.path.join(args.log, 'progress.csv') logger.add_tabular_output(score_file) env1 = GymEnv('HumanoidBulletEnv-v0') env1.original_env.seed(args.seed) env1 = AcInObEnv(env1) env1 = RewInObEnv(env1) env1 = C2DEnv(env1) env2 = GymEnv('HumanoidFlagrunBulletEnv-v0') env2.original_env.seed(args.seed) env2 = AcInObEnv(env2) env2 = RewInObEnv(env2) env2 = C2DEnv(env2) assert env1.ob_space == env2.ob_space assert env1.ac_space.shape == env2.ac_space.shape ob_space = env1.observation_space ac_space = env1.action_space pol_net = PolNetLSTM(ob_space, ac_space,