def train_argmax_policy( load_dir: pyrado.PathLike, env_sim: MetaDomainRandWrapper, subrtn: Algorithm, num_restarts: int, num_samples: int, policy_param_init: to.Tensor = None, valuefcn_param_init: to.Tensor = None, subrtn_snapshot_mode: str = "best", ) -> Policy: """ Train a policy based on the maximizer of the posterior mean. :param load_dir: directory to load from :param env_sim: simulation environment :param subrtn: algorithm which performs the policy / value-function optimization :param num_restarts: number of restarts for the optimization of the acquisition function :param num_samples: number of samples for the optimization of the acquisition function :param policy_param_init: initial policy parameter values for the subroutine, set `None` to be random :param valuefcn_param_init: initial value function parameter values for the subroutine, set `None` to be random :param subrtn_snapshot_mode: snapshot mode for saving during training of the subroutine :return: the final BayRn policy """ # Load the required data cands = pyrado.load("candidates.pt", load_dir) cands_values = pyrado.load("candidates_values.pt", load_dir).unsqueeze(1) ddp_space = pyrado.load("ddp_space.pkl", load_dir) if cands.shape[0] > cands_values.shape[0]: print_cbt( f"There are {cands.shape[0]} candidates but only {cands_values.shape[0]} evaluations. Ignoring the" f"candidates without evaluation for computing the argmax.", "y", ) cands = cands[:cands_values.shape[0], :] # Find the maximizer argmax_cand = BayRn.argmax_posterior_mean(cands, cands_values, ddp_space, num_restarts, num_samples) # Set the domain randomizer env_sim.adapt_randomizer(argmax_cand.numpy()) # Reset the subroutine algorithm which includes resetting the exploration subrtn.reset() # Do a warm start subrtn.init_modules(warmstart=True, policy_param_init=policy_param_init, valuefcn_param_init=valuefcn_param_init) subrtn.train(snapshot_mode=subrtn_snapshot_mode, meta_info=dict(suffix="argmax")) return subrtn.policy
def train_argmax_policy(load_dir: str, env_sim: MetaDomainRandWrapper, subroutine: Algorithm, num_restarts: int, num_samples: int, policy_param_init: to.Tensor = None, valuefcn_param_init: to.Tensor = None) -> Policy: """ Train a policy based on the maximizer of the posterior mean. :param load_dir: directory to load from :param env_sim: simulation environment :param subroutine: algorithm which performs the policy / value-function optimization :param num_restarts: number of restarts for the optimization of the acquisition function :param num_samples: number of samples for the optimization of the acquisition function :param policy_param_init: initial policy parameter values for the subroutine, set `None` to be random :param valuefcn_param_init: initial value function parameter values for the subroutine, set `None` to be random :return: the final BayRn policy """ # Load the required data cands = to.load(osp.join(load_dir, 'candidates.pt')) cands_values = to.load(osp.join(load_dir, 'candidates_values.pt')).unsqueeze(1) bounds = to.load(osp.join(load_dir, 'bounds.pt')) uc_normalizer = UnitCubeProjector(bounds[0, :], bounds[1, :]) # Find the maximizer argmax_cand = BayRn.argmax_posterior_mean(cands, cands_values, uc_normalizer, num_restarts, num_samples) # Set the domain randomizer given the hyper-parameters env_sim.adapt_randomizer(argmax_cand.numpy()) # Reset the subroutine's algorithm which includes resetting the exploration subroutine.reset() # Reset the subroutine's policy (and value function) subroutine.policy.init_param(policy_param_init) if isinstance(subroutine, ActorCritic): subroutine.critic.value_fcn.init_param(valuefcn_param_init) if policy_param_init is None: print_cbt('Learning the argmax solution from scratch', 'y') else: print_cbt('Learning the argmax solution given an initialization', 'y') subroutine.train( snapshot_mode='best') # meta_info=dict(prefix='final') return subroutine.policy
def create_qqsu_setup(): # Environments env_hparams = dict(dt=1 / 100.0, max_steps=600) env_real = QQubeSwingUpSim(**env_hparams) env_real.domain_param = dict( mass_rot_pole=0.095 * 0.9, # 0.095*0.9 = 0.0855 mass_pend_pole=0.024 * 1.1, # 0.024*1.1 = 0.0264 length_rot_pole=0.085 * 0.9, # 0.085*0.9 = 0.0765 length_pend_pole=0.129 * 1.1, # 0.129*1.1 = 0.1419 ) env_sim = QQubeSwingUpSim(**env_hparams) randomizer = DomainRandomizer( NormalDomainParam(name="mass_rot_pole", mean=0.0, std=1e-9, clip_lo=1e-3), NormalDomainParam(name="mass_pend_pole", mean=0.0, std=1e-9, clip_lo=1e-3), NormalDomainParam(name="length_rot_pole", mean=0.0, std=1e-9, clip_lo=1e-3), NormalDomainParam(name="length_pend_pole", mean=0.0, std=1e-9, clip_lo=1e-3), ) env_sim = DomainRandWrapperLive(env_sim, randomizer) dp_map = { 0: ("mass_rot_pole", "mean"), 1: ("mass_rot_pole", "std"), 2: ("mass_pend_pole", "mean"), 3: ("mass_pend_pole", "std"), 4: ("length_rot_pole", "mean"), 5: ("length_rot_pole", "std"), 6: ("length_pend_pole", "mean"), 7: ("length_pend_pole", "std"), } # trafo_mask = [False, True, False, True, False, True, False, True] trafo_mask = [True] * 8 env_sim = MetaDomainRandWrapper(env_sim, dp_map) # Policies (the behavioral policy needs to be deterministic) behavior_policy = QQubeSwingUpAndBalanceCtrl(env_sim.spec) prior = DomainRandomizer( NormalDomainParam(name="mass_rot_pole", mean=0.095, std=0.095 / 10), NormalDomainParam(name="mass_pend_pole", mean=0.024, std=0.024 / 10), NormalDomainParam(name="length_rot_pole", mean=0.085, std=0.085 / 10), NormalDomainParam(name="length_pend_pole", mean=0.129, std=0.129 / 10), ) ddp_policy = DomainDistrParamPolicy(mapping=dp_map, trafo_mask=trafo_mask, prior=prior, scale_params=False) return env_sim, env_real, env_hparams, dp_map, behavior_policy, ddp_policy
def test_bayrn_power(ex_dir, env: SimEnv, bayrn_hparam: dict): pyrado.set_seed(0) # Environments and domain randomization env_real = deepcopy(env) env_sim = DomainRandWrapperLive(env, create_zero_var_randomizer(env)) dp_map = create_default_domain_param_map_qq() env_sim = MetaDomainRandWrapper(env_sim, dp_map) env_real.domain_param = dict(mass_pend_pole=0.024 * 1.1, mass_rot_pole=0.095 * 1.1) env_real = wrap_like_other_env(env_real, env_sim) # Policy and subroutine policy_hparam = dict(energy_gain=0.587, ref_energy=0.827) policy = QQubeSwingUpAndBalanceCtrl(env_sim.spec, **policy_hparam) subrtn_hparam = dict( max_iter=1, pop_size=8, num_init_states_per_domain=1, num_is_samples=4, expl_std_init=0.1, num_workers=1, ) subrtn = PoWER(ex_dir, env_sim, policy, **subrtn_hparam) # Set the boundaries for the GP dp_nom = inner_env(env_sim).get_nominal_domain_param() ddp_space = BoxSpace( bound_lo=np.array([ 0.8 * dp_nom["mass_pend_pole"], 1e-8, 0.8 * dp_nom["mass_rot_pole"], 1e-8 ]), bound_up=np.array([ 1.2 * dp_nom["mass_pend_pole"], 1e-7, 1.2 * dp_nom["mass_rot_pole"], 1e-7 ]), ) # Create algorithm and train algo = BayRn(ex_dir, env_sim, env_real, subrtn, ddp_space, **bayrn_hparam, num_workers=1) algo.train() assert algo.curr_iter == algo.max_iter or algo.stopping_criterion_met()
def create_bob_setup(): # Environments env_hparams = dict(dt=1 / 100., max_steps=500) env_real = BallOnBeamSim(**env_hparams) env_real.domain_param = dict( # l_beam=1.95, # ang_offset=-0.03, g=10.81) env_sim = BallOnBeamSim(**env_hparams) randomizer = DomainRandomizer( # NormalDomainParam(name='l_beam', mean=0, std=1e-12, clip_lo=1.5, clip_up=3.5), # UniformDomainParam(name='ang_offset', mean=0, halfspan=1e-12), NormalDomainParam(name='g', mean=0, std=1e-12), ) env_sim = DomainRandWrapperLive(env_sim, randomizer) dp_map = { # 0: ('l_beam', 'mean'), 1: ('l_beam', 'std'), # 2: ('ang_offset', 'mean'), 3: ('ang_offset', 'halfspan') 0: ('g', 'mean'), 1: ('g', 'std') } env_sim = MetaDomainRandWrapper(env_sim, dp_map) # Policies (the behavioral policy needs to be deterministic) behavior_policy = LinearPolicy(env_sim.spec, feats=FeatureStack( [identity_feat, sin_feat])) behavior_policy.param_values = to.tensor( [3.8090, -3.8036, -1.0786, -2.4510, -0.9875, -1.3252, 3.1503, 1.4443]) prior = DomainRandomizer( # NormalDomainParam(name='l_beam', mean=2.05, std=2.05/10), # UniformDomainParam(name='ang_offset', mean=0.03, halfspan=0.03/10), NormalDomainParam(name='g', mean=8.81, std=8.81 / 10), ) # trafo_mask = [False, True, False, True] trafo_mask = [True, True] ddp_policy = DomainDistrParamPolicy(mapping=dp_map, trafo_mask=trafo_mask, prior=prior, scale_params=True) return env_sim, env_real, env_hparams, dp_map, behavior_policy, ddp_policy
std=1e6, clip_lo=1e-3), ) env_sim = DomainRandWrapperLive(env_sim, randomizer) dp_map = { 0: ("mass_rot_pole", "mean"), 1: ("mass_rot_pole", "std"), 2: ("mass_pend_pole", "mean"), 3: ("mass_pend_pole", "std"), 4: ("length_rot_pole", "mean"), 5: ("length_rot_pole", "std"), 6: ("length_pend_pole", "mean"), 7: ("length_pend_pole", "std"), } trafo_mask = [False, True, False, True, False, True, False, True] env_sim = MetaDomainRandWrapper(env_sim, dp_map) # Subroutine for policy improvement behav_policy_hparam = dict(hidden_sizes=[64, 64], hidden_nonlin=to.tanh) behav_policy = FNNPolicy(spec=env_sim.spec, **behav_policy_hparam) vfcn_hparam = dict(hidden_sizes=[32, 32], hidden_nonlin=to.relu) vfcn = FNNPolicy(spec=EnvSpec(env_sim.obs_space, ValueFunctionSpace), **vfcn_hparam) critic_hparam = dict( gamma=0.9844224855479998, lamda=0.9700148505302241, num_epoch=5, batch_size=500, standardize_adv=False, lr=7.058326426522811e-4, max_grad_norm=6.0,
def test_simopt_cem_ppo(ex_dir, env: SimEnv): pyrado.set_seed(0) # Environments env_real = deepcopy(env) env_real = ActNormWrapper(env_real) env_sim = ActNormWrapper(env) randomizer = DomainRandomizer( NormalDomainParam(name="mass_rot_pole", mean=0.0, std=1e6, clip_lo=1e-3), NormalDomainParam(name="mass_pend_pole", mean=0.0, std=1e6, clip_lo=1e-3), NormalDomainParam(name="length_rot_pole", mean=0.0, std=1e6, clip_lo=1e-3), NormalDomainParam(name="length_pend_pole", mean=0.0, std=1e6, clip_lo=1e-3), ) env_sim = DomainRandWrapperLive(env_sim, randomizer) dp_map = { 0: ("mass_rot_pole", "mean"), 1: ("mass_rot_pole", "std"), 2: ("mass_pend_pole", "mean"), 3: ("mass_pend_pole", "std"), 4: ("length_rot_pole", "mean"), 5: ("length_rot_pole", "std"), 6: ("length_pend_pole", "mean"), 7: ("length_pend_pole", "std"), } trafo_mask = [True] * 8 env_sim = MetaDomainRandWrapper(env_sim, dp_map) # Subroutine for policy improvement behav_policy_hparam = dict(hidden_sizes=[16, 16], hidden_nonlin=to.tanh) behav_policy = FNNPolicy(spec=env_sim.spec, **behav_policy_hparam) vfcn_hparam = dict(hidden_sizes=[16, 16], hidden_nonlin=to.relu) vfcn = FNNPolicy(spec=EnvSpec(env_sim.obs_space, ValueFunctionSpace), **vfcn_hparam) critic_hparam = dict( gamma=0.99, lamda=0.98, num_epoch=2, batch_size=128, standardize_adv=True, lr=8e-4, max_grad_norm=5.0, ) critic = GAE(vfcn, **critic_hparam) subrtn_policy_hparam = dict( max_iter=2, eps_clip=0.13, min_steps=4 * env_sim.max_steps, num_epoch=3, batch_size=128, std_init=0.75, lr=3e-04, max_grad_norm=1.0, num_workers=1, ) subrtn_policy = PPO(ex_dir, env_sim, behav_policy, critic, **subrtn_policy_hparam) prior = DomainRandomizer( NormalDomainParam(name="mass_rot_pole", mean=0.095, std=0.095 / 10), NormalDomainParam(name="mass_pend_pole", mean=0.024, std=0.024 / 10), NormalDomainParam(name="length_rot_pole", mean=0.085, std=0.085 / 10), NormalDomainParam(name="length_pend_pole", mean=0.129, std=0.129 / 10), ) ddp_policy_hparam = dict(mapping=dp_map, trafo_mask=trafo_mask, scale_params=True) ddp_policy = DomainDistrParamPolicy(prior=prior, **ddp_policy_hparam) subsubrtn_distr_hparam = dict( max_iter=2, pop_size=10, num_init_states_per_domain=1, num_is_samples=8, expl_std_init=1e-2, expl_std_min=1e-5, extra_expl_std_init=1e-2, extra_expl_decay_iter=5, num_workers=1, ) subsubrtn_distr = CEM(ex_dir, env_sim, ddp_policy, **subsubrtn_distr_hparam) subrtn_distr_hparam = dict( metric=None, obs_dim_weight=[1, 1, 1, 1, 10, 10], num_rollouts_per_distr=3, num_workers=1, ) subrtn_distr = SysIdViaEpisodicRL(subsubrtn_distr, behavior_policy=behav_policy, **subrtn_distr_hparam) # Algorithm algo_hparam = dict( max_iter=1, num_eval_rollouts=5, warmstart=True, ) algo = SimOpt(ex_dir, env_sim, env_real, subrtn_policy, subrtn_distr, **algo_hparam) algo.train() assert algo.curr_iter == algo.max_iter
def test_sysidasrl_reps(ex_dir, env: SimEnv, num_eval_rollouts: int): pyrado.set_seed(0) def eval_ddp_policy(rollouts_real): init_states_real = np.array([ro.states[0, :] for ro in rollouts_real]) rollouts_sim = [] for i, _ in enumerate(range(num_eval_rollouts)): rollouts_sim.append( rollout(env_sim, behavior_policy, eval=True, reset_kwargs=dict(init_state=init_states_real[i, :]))) # Clip the rollouts rollouts yielding two lists of pairwise equally long rollouts ros_real_tr, ros_sim_tr = algo.truncate_rollouts(rollouts_real, rollouts_sim, replicate=False) assert len(ros_real_tr) == len(ros_sim_tr) assert all([ np.allclose(r.states[0, :], s.states[0, :]) for r, s in zip(ros_real_tr, ros_sim_tr) ]) # Return the average the loss losses = [ algo.loss_fcn(ro_r, ro_s) for ro_r, ro_s in zip(ros_real_tr, ros_sim_tr) ] return float(np.mean(np.asarray(losses))) # Environments env_real = deepcopy(env) env_real.domain_param = dict(ang_offset=-2 * np.pi / 180) env_sim = deepcopy(env) randomizer = DomainRandomizer( UniformDomainParam(name="ang_offset", mean=0, halfspan=1e-6), ) env_sim = DomainRandWrapperLive(env_sim, randomizer) dp_map = {0: ("ang_offset", "mean"), 1: ("ang_offset", "halfspan")} env_sim = MetaDomainRandWrapper(env_sim, dp_map) assert env_real is not env_sim # Policies (the behavioral policy needs to be deterministic) behavior_policy = LinearPolicy(env_sim.spec, feats=FeatureStack(identity_feat)) prior = DomainRandomizer( UniformDomainParam(name="ang_offset", mean=1 * np.pi / 180, halfspan=1 * np.pi / 180), ) ddp_policy = DomainDistrParamPolicy(mapping=dp_map, trafo_mask=[False, True], prior=prior) # Subroutine subrtn_hparam = dict( max_iter=2, eps=1.0, pop_size=100, num_init_states_per_domain=1, expl_std_init=5e-2, expl_std_min=1e-4, num_workers=1, ) subrtn = REPS(ex_dir, env_sim, ddp_policy, **subrtn_hparam) algo_hparam = dict(metric=None, obs_dim_weight=np.ones(env_sim.obs_space.shape), num_rollouts_per_distr=5, num_workers=1) algo = SysIdViaEpisodicRL(subrtn, behavior_policy, **algo_hparam) rollouts_real_tst = [] for _ in range(num_eval_rollouts): rollouts_real_tst.append(rollout(env_real, behavior_policy, eval=True)) loss_pre = eval_ddp_policy(rollouts_real_tst) # Mimic training while algo.curr_iter < algo.max_iter and not algo.stopping_criterion_met(): algo.logger.add_value(algo.iteration_key, algo.curr_iter) # Creat fake real-world data rollouts_real = [] for _ in range(num_eval_rollouts): rollouts_real.append(rollout(env_real, behavior_policy, eval=True)) algo.step(snapshot_mode="latest", meta_info=dict(rollouts_real=rollouts_real)) algo.logger.record_step() algo._curr_iter += 1 loss_post = eval_ddp_policy(rollouts_real_tst) assert loss_post <= loss_pre # don't have to be better every step
def train_and_eval(trial: optuna.Trial, study_dir: str, seed: int): """ Objective function for the Optuna `Study` to maximize. .. note:: Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments. :param trial: Optuna Trial object for hyper-parameter optimization :param study_dir: the parent directory for all trials in this study :param seed: seed value for the random number generators, pass `None` for no seeding :return: objective function value """ # Synchronize seeds between Optuna trials pyrado.set_seed(seed) # Environments env_hparams = dict(dt=1 / 100., max_steps=600) env_real = QQubeSwingUpSim(**env_hparams) env_real.domain_param = dict( Mr=0.095 * 0.9, # 0.095*0.9 = 0.0855 Mp=0.024 * 1.1, # 0.024*1.1 = 0.0264 Lr=0.085 * 0.9, # 0.085*0.9 = 0.0765 Lp=0.129 * 1.1, # 0.129*1.1 = 0.1419 ) env_sim = QQubeSwingUpSim(**env_hparams) randomizer = DomainRandomizer( NormalDomainParam(name='Mr', mean=0., std=1e6, clip_lo=1e-3), NormalDomainParam(name='Mp', mean=0., std=1e6, clip_lo=1e-3), NormalDomainParam(name='Lr', mean=0., std=1e6, clip_lo=1e-3), NormalDomainParam(name='Lp', mean=0., std=1e6, clip_lo=1e-3), ) env_sim = DomainRandWrapperLive(env_sim, randomizer) dp_map = { 0: ('Mr', 'mean'), 1: ('Mr', 'std'), 2: ('Mp', 'mean'), 3: ('Mp', 'std'), 4: ('Lr', 'mean'), 5: ('Lr', 'std'), 6: ('Lp', 'mean'), 7: ('Lp', 'std') } trafo_mask = [True] * 8 env_sim = MetaDomainRandWrapper(env_sim, dp_map) # Subroutine for policy improvement behav_policy_hparam = dict(hidden_sizes=[64, 64], hidden_nonlin=to.tanh) behav_policy = FNNPolicy(spec=env_sim.spec, **behav_policy_hparam) vfcn_hparam = dict(hidden_sizes=[64, 64], hidden_nonlin=to.tanh) vfcn = FNNPolicy(spec=EnvSpec(env_sim.obs_space, ValueFunctionSpace), **vfcn_hparam) critic_hparam = dict( gamma=0.9885, lamda=0.9648, num_epoch=2, batch_size=500, standardize_adv=False, lr=5.792e-4, max_grad_norm=1., ) critic = GAE(vfcn, **critic_hparam) subrtn_policy_hparam = dict( max_iter=200, min_steps=3 * 23 * env_sim.max_steps, num_epoch=7, eps_clip=0.0744, batch_size=500, std_init=0.9074, lr=3.446e-04, max_grad_norm=1., num_workers=1, ) subrtn_policy = PPO(study_dir, env_sim, behav_policy, critic, **subrtn_policy_hparam) # Subroutine for system identification prior_std_denom = trial.suggest_uniform('prior_std_denom', 5, 20) prior = DomainRandomizer( NormalDomainParam(name='Mr', mean=0.095, std=0.095 / prior_std_denom), NormalDomainParam(name='Mp', mean=0.024, std=0.024 / prior_std_denom), NormalDomainParam(name='Lr', mean=0.085, std=0.085 / prior_std_denom), NormalDomainParam(name='Lp', mean=0.129, std=0.129 / prior_std_denom), ) ddp_policy = DomainDistrParamPolicy( mapping=dp_map, trafo_mask=trafo_mask, prior=prior, scale_params=trial.suggest_categorical('ddp_policy_scale_params', [True, False]), ) subsubrtn_distr_hparam = dict( max_iter=trial.suggest_categorical('subsubrtn_distr_max_iter', [20]), pop_size=trial.suggest_int('pop_size', 50, 500), num_rollouts=1, num_is_samples=trial.suggest_int('num_is_samples', 5, 20), expl_std_init=trial.suggest_loguniform('expl_std_init', 1e-3, 1e-1), expl_std_min=trial.suggest_categorical('expl_std_min', [1e-4]), extra_expl_std_init=trial.suggest_loguniform('expl_std_init', 1e-3, 1e-1), extra_expl_decay_iter=trial.suggest_int('extra_expl_decay_iter', 0, 10), num_workers=1, ) csv_logger = create_csv_step_logger( osp.join(study_dir, f'trial_{trial.number}')) subsubrtn_distr = CEM(study_dir, env_sim, ddp_policy, **subsubrtn_distr_hparam, logger=csv_logger) obs_vel_weight = trial.suggest_loguniform('obs_vel_weight', 1, 100) subrtn_distr_hparam = dict( metric=None, obs_dim_weight=[1, 1, 1, 1, obs_vel_weight, obs_vel_weight], num_rollouts_per_distr=trial.suggest_int('num_rollouts_per_distr', 20, 100), num_workers=1, ) subrtn_distr = SysIdViaEpisodicRL(subsubrtn_distr, behav_policy, **subrtn_distr_hparam) # Algorithm algo_hparam = dict( max_iter=trial.suggest_categorical('algo_max_iter', [10]), num_eval_rollouts=trial.suggest_categorical('algo_num_eval_rollouts', [5]), warmstart=trial.suggest_categorical('algo_warmstart', [True]), thold_succ_subrtn=trial.suggest_categorical('algo_thold_succ_subrtn', [50]), subrtn_snapshot_mode='latest', ) algo = SimOpt(study_dir, env_sim, env_real, subrtn_policy, subrtn_distr, **algo_hparam, logger=csv_logger) # Jeeeha algo.train(seed=args.seed) # Evaluate min_rollouts = 1000 sampler = ParallelRolloutSampler( env_real, algo.policy, num_workers=1, min_rollouts=min_rollouts) # parallelize via optuna n_jobs ros = sampler.sample() mean_ret = sum([r.undiscounted_return() for r in ros]) / min_rollouts return mean_ret