def main(): unused_sess = utils.make_tf_session(gpu_mode=False) env = envs.make_clfbandit_env(verbose=True) env.expert_policy = env.make_expert_policy() env.random_policy = utils.make_random_policy(env) trans_env = envs.make_clfbandit_trans_env(env) utils.run_ep(env.expert_policy, env) utils.run_ep(env.random_policy, env) utils.run_ep(trans_env.expert_policy, trans_env) utils.run_ep(trans_env.random_policy, trans_env) logging.info('OK')
random_policy = utils.make_random_policy(env) dynamics_model = MLPDynamicsModel( sess, env, n_layers=1, layer_size=32, scope=str(uuid.uuid4()), scope_file=os.path.join(utils.pointmass_data_dir, 'dyn_scope.pkl'), tf_file=os.path.join(utils.pointmass_data_dir, 'dyn.tf')) n_unif_rollouts = 1000 unif_env = envs.make_pointmass_env() unif_rollouts = [ utils.run_ep(random_policy, unif_env, max_ep_len=1) for _ in range(n_unif_rollouts) ] with open(os.path.join(utils.pointmass_data_dir, 'demo_rollouts.pkl'), 'rb') as f: demo_rollouts = pickle.load(f) offpol_eval_rollouts = unif_rollouts demo_rollouts_for_reward_model = None pref_logs_for_reward_model = None sketch_rollouts_for_reward_model = demo_rollouts[:1] reward_model = RewardModel(sess,
def main(): sess = utils.make_tf_session(gpu_mode=False) env = envs.make_carracing_env(sess) trans_env = envs.make_carracing_trans_env(sess) random_policy = utils.make_random_policy(env) utils.run_ep(random_policy, env, max_ep_len=3, render=False) trans_rollout = utils.run_ep( random_policy, trans_env, max_ep_len=3, render=False) logging.info('envs and policies OK') raw_demo_rollouts = [ utils.run_ep(random_policy, env, max_ep_len=3, render=False) for _ in range(n_demo_rollouts) ] raw_aug_rollouts = [ utils.run_ep(random_policy, env, max_ep_len=3, render=False) for _ in range(n_aug_rollouts) ] raw_aug_rollouts += raw_demo_rollouts raw_aug_obses = [] for rollout in raw_aug_rollouts: for x in rollout: raw_aug_obses.append(x[0]) raw_aug_obses = np.array(raw_aug_obses) raw_aug_obs_data = utils.split_rollouts({'obses': raw_aug_obses}) logging.info('data collection OK') encoder = VAEModel( sess, env, learning_rate=0.0001, kl_tolerance=0.5, scope=str(uuid.uuid4()), scope_file=os.path.join(test_data_dir, 'enc_scope.pkl'), tf_file=os.path.join(test_data_dir, 'enc.tf')) encoder.train( raw_aug_obs_data, iterations=1, ftol=1e-4, learning_rate=1e-3, val_update_freq=1, verbose=False) encoder = load_wm_pretrained_vae(sess, env) encoder.save() encoder.load() obs = raw_aug_rollouts[0][0][0] latent = encoder.encode_frame(obs) unused_recon = encoder.decode_latent(latent) logging.info('encoder OK') raw_aug_traj_data = utils.split_rollouts( utils.vectorize_rollouts( raw_aug_rollouts, env.max_ep_len, preserve_trajs=True)) abs_model = AbsorptionModel( sess, env, n_layers=1, layer_size=32, scope=str(uuid.uuid4()), scope_file=os.path.join(test_data_dir, 'abs_scope.pkl'), tf_file=os.path.join(test_data_dir, 'abs.tf')) dynamics_model = MDNRNNDynamicsModel( encoder, sess, env, scope=str(uuid.uuid4()), tf_file=os.path.join(test_data_dir, 'dyn.tf'), scope_file=os.path.join(test_data_dir, 'dyn_scope.pkl'), abs_model=abs_model) dynamics_model.train( raw_aug_traj_data, iterations=1, learning_rate=1e-3, ftol=1e-4, batch_size=2, val_update_freq=1, verbose=False) dynamics_model = load_wm_pretrained_rnn(encoder, sess, env) dynamics_model.save() dynamics_model.load() demo_traj_data = utils.rnn_encode_rollouts(raw_demo_rollouts, env, encoder, dynamics_model) aug_traj_data = utils.rnn_encode_rollouts(raw_aug_rollouts, env, encoder, dynamics_model) demo_rollouts = utils.rollouts_of_traj_data(demo_traj_data) aug_rollouts = utils.rollouts_of_traj_data(aug_traj_data) demo_data = utils.split_rollouts(utils.flatten_traj_data(demo_traj_data)) aug_data = utils.split_rollouts(utils.flatten_traj_data(aug_traj_data)) env.default_init_obs = aug_rollouts[0][0][0] trans_rollouts = utils.rollouts_of_traj_data( utils.rnn_encode_rollouts([trans_rollout], trans_env, encoder, dynamics_model)) trans_env.default_init_obs = trans_rollouts[0][0][0] logging.info('mdnrnn dynamics OK') demo_data_for_reward_model = demo_data demo_rollouts_for_reward_model = demo_rollouts sketch_data_for_reward_model = aug_data sketch_rollouts_for_reward_model = aug_rollouts reward_init_kwargs = { 'n_rew_nets_in_ensemble': 2, 'n_layers': 1, 'layer_size': 32, 'scope': str(uuid.uuid4()), 'scope_file': os.path.join(test_data_dir, 'true_rew_scope.pkl'), 'tf_file': os.path.join(test_data_dir, 'true_rew.tf'), 'rew_func_input': "s'", 'use_discrete_rewards': True } reward_train_kwargs = { 'demo_coeff': 1., 'sketch_coeff': 1., 'iterations': 1, 'ftol': 1e-4, 'batch_size': 2, 'learning_rate': 1e-3, 'val_update_freq': 1, 'verbose': False } data = envs.make_carracing_rew( sess, env, sketch_data=sketch_data_for_reward_model, reward_init_kwargs=reward_init_kwargs, reward_train_kwargs=reward_train_kwargs) env.__dict__.update(data) trans_env.__dict__.update(data) autolabels = reward_models.autolabel_prefs( aug_rollouts, env, segment_len=env.max_ep_len + 1) pref_logs_for_reward_model = autolabels pref_data_for_reward_model = utils.split_prefs(autolabels) logging.info('autolabels OK') for rew_func_input in ['s', 'sa', "s'"]: reward_model = reward_models.RewardModel( sess, env, n_rew_nets_in_ensemble=2, n_layers=1, layer_size=32, scope=str(uuid.uuid4()), scope_file=os.path.join(test_data_dir, 'rew_scope.pkl'), tf_file=os.path.join(test_data_dir, 'rew.tf'), rew_func_input=rew_func_input, use_discrete_rewards=True) for demo_data in [None, demo_data_for_reward_model]: for sketch_data in [None, sketch_data_for_reward_model]: for pref_data in [None, pref_data_for_reward_model]: if pref_data is None and sketch_data is None: continue reward_model.train( demo_data=demo_data, sketch_data=sketch_data, pref_data=pref_data, demo_coeff=1., sketch_coeff=1., iterations=1, ftol=1e-4, batch_size=2, learning_rate=1e-3, val_update_freq=1, verbose=False) reward_model.save() reward_model.load() logging.info('reward models OK') for query_loss_opt in [ 'pref_uncertainty', 'rew_uncertainty', 'max_rew', 'min_rew', 'max_nov' ]: for init_obs in [None, env.default_init_obs]: for join_trajs_at_init_state in [True, False]: for shoot_steps in [1, 2]: if (shoot_steps > 1 and np.array(init_obs == env.default_init_obs).all()): continue traj_opt = GDTrajOptimizer( sess, env, reward_model, dynamics_model, traj_len=2, n_trajs=2, prior_coeff=1., diversity_coeff=0., query_loss_opt=query_loss_opt, opt_init_obs=(init_obs is None), join_trajs_at_init_state=join_trajs_at_init_state, shoot_steps=shoot_steps, learning_rate=1e-2) traj_opt.run( init_obs=init_obs, iterations=1, ftol=1e-4, verbose=False, ) logging.info('grad descent traj opt OK') imitation_kwargs = {'plan_horizon': 10, 'n_blind_steps': 2, 'test_mode': True} for n_eval_rollouts in [0, 1]: reward_models.evaluate_reward_model( sess, env, trans_env, reward_model, dynamics_model, offpol_eval_rollouts=sketch_rollouts_for_reward_model, n_eval_rollouts=n_eval_rollouts, imitation_kwargs=imitation_kwargs) logging.info('reward eval OK') for query_loss_opt in [ 'pref_uncertainty', 'rew_uncertainty', 'max_rew', 'min_rew', 'max_nov', 'unif' ]: for use_rand_policy in [False, True]: traj_opt = StochTrajOptimizer( sess, env, reward_model, dynamics_model, traj_len=2, rollout_len=2, query_loss_opt=query_loss_opt, use_rand_policy=use_rand_policy) for init_obs in [None, env.default_init_obs]: traj_opt.run(n_trajs=2, n_samples=2, init_obs=init_obs, verbose=False) logging.info('stoch traj opt OK') reward_model = reward_models.RewardModel( sess, env, n_rew_nets_in_ensemble=2, n_layers=1, layer_size=32, scope=str(uuid.uuid4()), scope_file=os.path.join(test_data_dir, 'rew_scope.pkl'), tf_file=os.path.join(test_data_dir, 'rew.tf'), rew_func_input="s'", use_discrete_rewards=True) rew_optimizer = InteractiveRewardOptimizer(sess, env, trans_env, reward_model, dynamics_model) reward_train_kwargs = { 'demo_coeff': 1., 'sketch_coeff': 1., 'iterations': 1, 'ftol': 1e-4, 'batch_size': 2, 'learning_rate': 1e-3, 'val_update_freq': 1, 'verbose': False } dynamics_train_kwargs = { 'iterations': 1, 'batch_size': 2, 'learning_rate': 1e-3, 'ftol': 1e-4, 'val_update_freq': 1, 'verbose': False } gd_traj_opt_init_kwargs = { 'traj_len': env.max_ep_len, 'n_trajs': 2, 'prior_coeff': 1., 'diversity_coeff': 1., 'query_loss_opt': 'pref_uncertainty', 'opt_init_obs': False, 'learning_rate': 1e-2, 'join_trajs_at_init_state': False } gd_traj_opt_run_kwargs = { 'init_obs': env.default_init_obs, 'iterations': 1, 'ftol': 1e-4, 'verbose': False, } unused_stoch_traj_opt_init_kwargs = { 'traj_len': 2, 'rollout_len': 2, 'query_loss_opt': 'pref_uncertainty' } unused_stoch_traj_opt_run_kwargs = { 'n_samples': 2, 'init_obs': None, 'verbose': False } eval_kwargs = {'n_eval_rollouts': 1} for init_train in [True, False]: for query_type in ['pref', 'sketch']: rew_optimizer.run( demo_rollouts=demo_rollouts_for_reward_model, sketch_rollouts=sketch_rollouts_for_reward_model, pref_logs=pref_logs_for_reward_model, rollouts_for_dyn=raw_aug_rollouts, reward_train_kwargs=reward_train_kwargs, dynamics_train_kwargs=dynamics_train_kwargs, traj_opt_cls=GDTrajOptimizer, traj_opt_run_kwargs=gd_traj_opt_run_kwargs, traj_opt_init_kwargs=gd_traj_opt_init_kwargs, imitation_kwargs=imitation_kwargs, eval_kwargs=eval_kwargs, init_train_dyn=init_train, init_train_rew=init_train, n_imitation_rollouts_per_dyn_update=1, n_queries=1, reward_update_freq=1, reward_eval_freq=1, dyn_update_freq=1, verbose=False, query_type=query_type) rew_optimizer.save() rew_optimizer.load() logging.info('rqst OK')
def main(): sess = utils.make_tf_session(gpu_mode=False) env = envs.make_pointmass_env() trans_env = envs.make_pointmass_trans_env(env) expert_policy = env.make_expert_policy() random_policy = utils.make_random_policy(env) default_init_obs = env.default_init_obs utils.run_ep(expert_policy, env) utils.run_ep(random_policy, env) utils.run_ep(expert_policy, trans_env) utils.run_ep(random_policy, trans_env) logging.info('envs and policies OK') demo_rollouts = [ utils.run_ep(expert_policy, env) for _ in range(n_demo_rollouts) ] aug_rollouts = demo_rollouts + [ utils.run_ep(random_policy, env) for _ in range(n_aug_rollouts) ] demo_data = utils.split_rollouts( utils.vectorize_rollouts(demo_rollouts, env.max_ep_len)) aug_data = utils.split_rollouts( utils.vectorize_rollouts(aug_rollouts, env.max_ep_len)) unused_demo_traj_data = utils.split_rollouts( utils.vectorize_rollouts(demo_rollouts, env.max_ep_len, preserve_trajs=True)) unused_aug_traj_data = utils.split_rollouts( utils.vectorize_rollouts(aug_rollouts, env.max_ep_len, preserve_trajs=True)) logging.info('data collection OK') abs_model = AbsorptionModel(sess, env, n_layers=1, layer_size=32, scope=str(uuid.uuid4()), scope_file=os.path.join( test_data_dir, 'abs_scope.pkl'), tf_file=os.path.join(test_data_dir, 'abs.tf')) dynamics_model = MLPDynamicsModel( sess, env, n_layers=1, layer_size=32, scope=str(uuid.uuid4()), scope_file=os.path.join(test_data_dir, 'dyn_scope.pkl'), tf_file=os.path.join(test_data_dir, 'dyn.tf'), abs_model=abs_model) dynamics_model.train(aug_data, iterations=1, ftol=1e-4, learning_rate=1e-3, batch_size=4, val_update_freq=1, verbose=False) dynamics_model.save() dynamics_model.load() logging.info('dynamics model OK') demo_data_for_reward_model = demo_data demo_rollouts_for_reward_model = demo_rollouts sketch_data_for_reward_model = aug_data sketch_rollouts_for_reward_model = aug_rollouts autolabels = reward_models.autolabel_prefs( sketch_rollouts_for_reward_model, env, segment_len=env.max_ep_len + 1) pref_logs_for_reward_model = autolabels pref_data_for_reward_model = utils.split_prefs(autolabels) logging.info('autolabels OK') for rew_func_input in ['sa', 's', "s'"]: reward_model = reward_models.RewardModel( sess, env, n_rew_nets_in_ensemble=4, n_layers=1, layer_size=64, scope=str(uuid.uuid4()), scope_file=os.path.join(test_data_dir, 'rew_scope.pkl'), tf_file=os.path.join(test_data_dir, 'rew.tf'), rew_func_input=rew_func_input, use_discrete_rewards=True) for demo_data in [None, demo_data_for_reward_model]: for sketch_data in [None, sketch_data_for_reward_model]: for pref_data in [None, pref_data_for_reward_model]: if pref_data is None and sketch_data is None: continue reward_model.train(demo_data=demo_data, sketch_data=sketch_data, pref_data=pref_data, demo_coeff=1., sketch_coeff=1., iterations=1, ftol=1e-4, batch_size=4, learning_rate=1e-3, val_update_freq=1, verbose=False) reward_model.save() reward_model.load() logging.info('reward models OK') for query_loss_opt in [ 'pref_uncertainty', 'rew_uncertainty', 'max_rew', 'min_rew', 'max_nov' ]: for init_obs in [None, default_init_obs]: for join_trajs_at_init_state in [True, False]: for query_type in ['pref', 'demo', 'sketch']: if query_type == 'pref' and query_loss_opt == 'max_nov': continue for shoot_steps in [1, 2]: traj_optimizer = GDTrajOptimizer( sess, env, reward_model, dynamics_model, traj_len=2, n_trajs=2, prior_coeff=1., diversity_coeff=0., query_loss_opt=query_loss_opt, opt_init_obs=(init_obs is None), join_trajs_at_init_state=join_trajs_at_init_state, shoot_steps=shoot_steps, learning_rate=1e-2, query_type=query_type) traj_optimizer.run( init_obs=init_obs, iterations=1, ftol=1e-4, verbose=False, ) logging.info('grad descent traj opt OK') imitation_kwargs = { 'plan_horizon': 10, 'n_blind_steps': 2, 'test_mode': True } for n_eval_rollouts in [0, 1]: reward_models.evaluate_reward_model( sess, env, trans_env, reward_model, dynamics_model, offpol_eval_rollouts=sketch_rollouts_for_reward_model, n_eval_rollouts=n_eval_rollouts, imitation_kwargs=imitation_kwargs) logging.info('reward eval OK') for query_loss_opt in [ 'pref_uncertainty', 'rew_uncertainty', 'max_rew', 'min_rew', 'max_nov', 'unif' ]: for use_rand_policy in [False, True]: traj_optimizer = StochTrajOptimizer( sess, env, reward_model, dynamics_model, traj_len=2, rollout_len=2, query_loss_opt=query_loss_opt, use_rand_policy=use_rand_policy) for init_obs in [None, default_init_obs]: traj_optimizer.run(n_trajs=2, n_samples=2, init_obs=init_obs, verbose=False) logging.info('stoch traj opt OK') reward_model = reward_models.RewardModel( sess, env, n_rew_nets_in_ensemble=4, n_layers=1, layer_size=64, scope=str(uuid.uuid4()), scope_file=os.path.join(test_data_dir, 'rew_scope.pkl'), tf_file=os.path.join(test_data_dir, 'rew.tf'), use_discrete_rewards=True) rew_optimizer = InteractiveRewardOptimizer(sess, env, trans_env, reward_model, dynamics_model) reward_train_kwargs = { 'demo_coeff': 1., 'sketch_coeff': 1., 'iterations': 1, 'ftol': 1e-4, 'batch_size': 4, 'learning_rate': 1e-3, 'val_update_freq': 1, 'verbose': False } dynamics_train_kwargs = { 'iterations': 1, 'batch_size': 4, 'learning_rate': 1e-3, 'ftol': 1e-4, 'val_update_freq': 1, 'verbose': False } gd_traj_opt_init_kwargs = { 'traj_len': env.max_ep_len, 'n_trajs': 2, 'prior_coeff': 1., 'diversity_coeff': 1., 'query_loss_opt': 'pref_uncertainty', 'opt_init_obs': False, 'learning_rate': 1e-2, 'join_trajs_at_init_state': False } gd_traj_opt_run_kwargs = { 'init_obs': default_init_obs, 'iterations': 1, 'ftol': 1e-4, 'verbose': False } unused_stoch_traj_opt_init_kwargs = { 'traj_len': 2, 'rollout_len': 2, 'query_loss_opt': 'pref_uncertainty' } unused_stoch_traj_opt_run_kwargs = { 'n_samples': 2, 'init_obs': None, 'verbose': False } eval_kwargs = {'n_eval_rollouts': 1} for init_train in [True, False]: for query_type in ['pref', 'sketch']: rew_optimizer.run(demo_rollouts=demo_rollouts_for_reward_model, sketch_rollouts=sketch_rollouts_for_reward_model, pref_logs=pref_logs_for_reward_model, rollouts_for_dyn=aug_rollouts, reward_train_kwargs=reward_train_kwargs, dynamics_train_kwargs=dynamics_train_kwargs, traj_opt_cls=GDTrajOptimizer, traj_opt_run_kwargs=gd_traj_opt_run_kwargs, traj_opt_init_kwargs=gd_traj_opt_init_kwargs, imitation_kwargs=imitation_kwargs, eval_kwargs=eval_kwargs, init_train_dyn=init_train, init_train_rew=init_train, n_imitation_rollouts_per_dyn_update=1, n_queries=1, reward_update_freq=1, reward_eval_freq=1, dyn_update_freq=1, verbose=False, query_type=query_type) rew_optimizer.save() rew_optimizer.load() logging.info('rqst OK')
def run_ep(self, *args, **kwargs): return utils.run_ep(*args, **kwargs, proc_obs=self.encoder.encode_frame)
def run_ep(self, *args, **kwargs): return utils.run_ep(*args, **kwargs)
def run(self, n_trajs=10, act_seq=None, n_samples=None, init_obs=None, verbose=False): """ Args: n_samples: num rollouts to sample from env n_trajs: num trajs to return n_trajs <= n_samples """ if n_samples is None: n_samples = n_trajs if self.query_loss_opt == 'pref_uncertainty' and not (n_trajs == 2 and n_samples >= 2): raise ValueError if n_trajs > n_samples: raise ValueError clfbandit_pref = (self.query_loss_opt == 'pref_uncertainty' and self.env.name == 'clfbandit') if init_obs is None: # rollout in real world rollouts = [ self.dynamics_model.run_ep(self.imitator, self.env, max_ep_len=self.rollout_len, store_raw_obs=False) for _ in range(n_samples) ] trajs = [utils.traj_of_rollout(rollout) for rollout in rollouts] act_seqs = [ utils.act_seq_of_rollout(rollout) for rollout in rollouts ] if clfbandit_pref: n_classes = self.env.n_act_dim if act_seq is None: new_trajs = [] act_seqs = [] for i in range(n_classes): act_seq = utils.onehot_encode( i, n_classes)[np.newaxis, :] * utils.inf new_trajs.extend(trajs) act_seqs.extend([act_seq] * len(trajs)) trajs = new_trajs else: act_seqs = [act_seq] * len(trajs) else: if self.env.name.endswith('bandit'): raise ValueError # rollout in dynamics model dream_env = utils.DreamEnv(self.env, self.dynamics_model) dream_rollouts = [ utils.run_ep(self.imitator, dream_env, max_ep_len=self.rollout_len) for _ in range(n_samples) ] trajs = [ utils.traj_of_rollout(rollout) for rollout in dream_rollouts ] act_seqs = [ utils.act_seq_of_rollout(rollout) for rollout in dream_rollouts ] trajs, act_seqs = utils.segment_trajs(trajs, act_seqs, seg_len=self.traj_len) if self.query_loss_opt != 'pref_uncertainty': # find best trajs idxes = [(i, self.query_loss(traj, act_seq)) for i, (traj, act_seq) in enumerate(zip(trajs, act_seqs))] sorted_idxes = sorted(idxes, key=lambda x: x[1]) best_trajs = [trajs[i] for i, _ in sorted_idxes[:n_trajs]] best_act_seqs = [act_seqs[i] for i, _ in sorted_idxes[:n_trajs]] best_loss = sum(x[1] for x in sorted_idxes[:n_trajs]) else: # find best traj pair best_ref_idx = None best_idx = None best_loss = None for ref_idx, (ref_traj, ref_act_seq) in enumerate(zip(trajs, act_seqs)): for idx, (traj, act_seq) in enumerate( zip(trajs[ref_idx + 1:], act_seqs[ref_idx + 1:])): if clfbandit_pref and (ref_traj[0] != traj[0]).any(): continue loss = self.query_loss([ref_traj, traj], [ref_act_seq, act_seq]) if best_loss is None or loss < best_loss: best_loss = loss best_ref_idx = ref_idx best_idx = idx + ref_idx + 1 best_trajs = [trajs[best_ref_idx], trajs[best_idx]] best_act_seqs = [act_seqs[best_ref_idx], act_seqs[best_idx]] return { 'traj': best_trajs, 'act_seq': best_act_seqs, 'loss': best_loss }
def _run( self, init_obs=None, act_seq=None, iterations=10000, ftol=1e-6, min_iters=2, verbose=False, warm_start=False, init_with_lbfgs=False, init_act_seq=None, init_traj=None, ): if (init_obs is not None) == self.opt_init_obs: raise ValueError if (act_seq is not None) == self.opt_act_seq: raise ValueError if act_seq is not None and init_act_seq is not None: raise ValueError if init_act_seq is not None and warm_start: raise ValueError if self.query_loss_opt == 'unif': if self.env.name == 'clfbandit': std = np.exp(-self.prior_coeff) def rand_traj(): obs = np.random.normal(0, std, self.env.n_z_dim)[np.newaxis, :] next_obs = self.env.absorbing_state[np.newaxis, :] return np.concatenate((obs, next_obs), axis=0) trajs_eval = [rand_traj() for _ in range(self.n_trajs)] act_seqs_eval = [[self.env.action_space.sample()] for _ in range(self.n_trajs)] elif self.env.name == 'pointmass': unif_env = envs.make_pointmass_env() random_policy = utils.make_random_policy(unif_env) unif_rollouts = [ utils.run_ep(random_policy, unif_env, max_ep_len=1) for _ in range(self.n_trajs) ] trajs_eval = [ utils.traj_of_rollout(rollout) for rollout in unif_rollouts ] act_seqs_eval = [ utils.act_seq_of_rollout(rollout) for rollout in unif_rollouts ] else: raise ValueError loss_eval = 0. return { 'traj': trajs_eval, 'act_seq': act_seqs_eval, 'loss': loss_eval } scopes = [self.opt_scope] if not warm_start: scopes.append(self.traj_scope) utils.init_tf_vars(self.sess, scopes, use_cache=True) feed_dict = {} assign_ops = [] if init_act_seq is not None: feed_dict[self.init_act_seq_ph] = init_act_seq assign_ops.append(self.assign_init_act_seq) if init_traj is not None: self.obs_dim = (self.env.n_z_dim if self.env.name == 'carracing' else self.env.n_obs_dim) feed_dict[self.init_traj_ph] = init_traj[1:, :self.obs_dim] assign_ops.append(self.assign_init_traj) if assign_ops != []: self.sess.run(assign_ops, feed_dict=feed_dict) feed_dict = {} if init_obs is not None: feed_dict[self.init_obs_ph] = init_obs() if callable( init_obs) else init_obs if act_seq is not None: feed_dict[self.act_seq_ph] = act_seq if verbose: print('iters loss') if init_with_lbfgs: self.lbfgs_optimizer.minimize(self.sess, feed_dict=feed_dict) loss_evals = [] loss_eval, trajs_eval, act_seqs_eval = self.sess.run( [self.loss, self.trajs, self.act_seqs], feed_dict=feed_dict) best_eval = { 'traj': trajs_eval, 'act_seq': act_seqs_eval, 'loss': loss_eval } #start_time = time.time() # uncomment for profiling for t in range(iterations): loss_eval, trajs_eval, act_seqs_eval, _ = self.sess.run( [self.loss, self.trajs, self.act_seqs, self.update_op], feed_dict=feed_dict) if verbose: print('%d %f' % (t, loss_eval)) loss_evals.append(loss_eval) if loss_eval < best_eval['loss']: best_eval = { 'traj': trajs_eval, 'act_seq': act_seqs_eval, 'loss': loss_eval } if ftol is not None and utils.converged( loss_evals, ftol, min_iters=min_iters): break # uncomment for profiling #print('call to update_op: %0.3f' % ((time.time() - start_time) / t)) #print('iterations: %d' % t) if verbose: plt.plot(loss_evals) plt.show() return best_eval