def experiment(variant): env = NormalizedBoxEnv(HalfCheetahEnv()) # Or for a specific version: # import gym # env = NormalizedBoxEnv(gym.make('HalfCheetah-v1')) obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) net_size = variant['net_size'] qf = FlattenMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim + action_dim, output_size=1, ) vf = FlattenMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim, output_size=1, ) policy = TanhGaussianPolicy( hidden_sizes=[net_size, net_size], obs_dim=obs_dim, action_dim=action_dim, ) algorithm = SoftActorCritic(env=env, policy=policy, qf=qf, vf=vf, **variant['algo_params']) algorithm.to(ptu.device) algorithm.train()
def experiment(variant): env = SawyerXYZEnv(**variant['env_kwargs']) env = MultitaskToFlatEnv(env) if variant['normalize']: env = NormalizedBoxEnv(env) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size qf = ConcatMlp( input_size=obs_dim + action_dim, output_size=1, **variant['qf_kwargs'] ) vf = ConcatMlp( input_size=obs_dim, output_size=1, **variant['vf_kwargs'] ) policy = TanhGaussianPolicy( obs_dim=obs_dim, action_dim=action_dim, **variant['policy_kwargs'] ) algorithm = SoftActorCritic( env=env, policy=policy, qf=qf, vf=vf, **variant['algo_kwargs'] ) algorithm.to(ptu.device) algorithm.train()
def experiment(variant): env = NormalizedBoxEnv(CartpoleSwingupSparseEnv()) #env = NormalizedBoxEnv(HalfCheetahEnv()) #env = NormalizedBoxEnv(Continuous_MountainCarEnv()) #env = DIAYNWrappedEnv(NormalizedBoxEnv(HumanoidEnv())) # Or for a specific version: # import gym # env = NormalizedBoxEnv(gym.make('HalfCheetah-v1')) skill_dim = 0 #50 obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) net_size = variant['net_size'] qf1 = FlattenMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim + skill_dim + action_dim, output_size=1, ) qf2 = FlattenMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim + skill_dim + action_dim, output_size=1, ) vf = FlattenMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim + skill_dim, output_size=1, ) policy = TanhGaussianPolicy( hidden_sizes=[net_size, net_size], obs_dim=obs_dim + skill_dim, action_dim=action_dim, #k=4, ) disc = FlattenMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim, output_size=skill_dim if skill_dim > 0 else 1, ) algorithm = SoftActorCritic( env=env, policy=policy, qf1=qf1, qf2=qf2, vf=vf, #disc=disc, #skill_dim=skill_dim, **variant['algo_params']) algorithm.to(ptu.device) algorithm.train()
def experiment(variant, env_name, record_name, record_every_episode): #env = CartPoleEnv() env = gym.make(env_name) # A workaround to give this info later on # (Such naughty business...) randomize_settings = { "turnframes": [10, 10], "engagement_distance": [100, 200] } env.record_name = record_name env.record_every_episode = record_every_episode env.randomize_settings = randomize_settings env = OneHotsToDecimalsAndRecordAndRandomize(env) obs_dim = int(np.prod(env.observation_space.shape)) num_categoricals = len(env.action_space.nvec) num_categories = env.action_space.nvec[0] net_size = variant['net_size'] qf = FlattenMlp( hidden_sizes=[net_size, net_size], # Action is fed in as a raveled one-hot vector input_size=obs_dim + int(np.sum(env.action_space.nvec)), output_size=1, hidden_activation=F.sigmoid, ) vf = FlattenMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim, output_size=1, hidden_activation=F.sigmoid, ) # For multi-discrete policy = MultiCategoricalPolicy(hidden_sizes=[net_size, net_size], obs_dim=obs_dim, num_categoricals=num_categoricals, num_categories=num_categories, hidden_activation=F.sigmoid) algorithm = SoftActorCritic(env=env, policy=policy, qf=qf, vf=vf, **variant['algo_params']) algorithm.to(ptu.device) algorithm.train()
def experiment(variant): env = NormalizedBoxEnv( MultiGoalEnv( actuation_cost_coeff=10, distance_cost_coeff=1, goal_reward=10, )) obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) qf = ConcatMlp( hidden_sizes=[100, 100], input_size=obs_dim + action_dim, output_size=1, ) vf = ConcatMlp( hidden_sizes=[100, 100], input_size=obs_dim, output_size=1, ) policy = TanhGaussianPolicy( hidden_sizes=[100, 100], obs_dim=obs_dim, action_dim=action_dim, ) plotter = QFPolicyPlotter(qf=qf, policy=policy, obs_lst=np.array([[-2.5, 0.0], [0.0, 0.0], [2.5, 2.5]]), default_action=[np.nan, np.nan], n_samples=100) algorithm = SoftActorCritic( env=env, policy=policy, qf=qf, vf=vf, # plotter=plotter, # render_eval_paths=True, **variant['algo_params']) algorithm.to(ptu.device) algorithm.train()
def experiment(variant): env = variant['env_class']() env = NormalizedBoxEnv(env) obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) qf = ConcatMlp(input_size=obs_dim + action_dim, output_size=1, **variant['qf_kwargs']) vf = ConcatMlp(input_size=obs_dim, output_size=1, **variant['vf_kwargs']) policy = TanhGaussianPolicy(obs_dim=obs_dim, action_dim=action_dim, **variant['policy_kwargs']) algorithm = SoftActorCritic(env=env, policy=policy, qf=qf, vf=vf, **variant['algo_kwargs']) algorithm.to(ptu.device) algorithm.train()
def experiment(variant): # env = normalize(GymEnv( # 'HalfCheetah-v1', # force_reset=True, # record_video=False, # record_log=False, # )) env = NormalizedBoxEnv(gym.make('HalfCheetah-v1')) obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) net_size = variant['net_size'] qf = ConcatMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim + action_dim, output_size=1, ) vf = ConcatMlp( hidden_sizes=[net_size, net_size], input_size=obs_dim, output_size=1, ) policy = TanhGaussianPolicy( hidden_sizes=[net_size, net_size], obs_dim=obs_dim, action_dim=action_dim, ) algorithm = SoftActorCritic( env=env, policy=policy, qf=qf, vf=vf, **variant['algo_params'] ) algorithm.to(ptu.device) algorithm.train()
def experiment(variant): env = NormalizedBoxEnv(MultiGoalEnv( actuation_cost_coeff=10, distance_cost_coeff=1, goal_reward=10, )) obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) qf = ConcatMlp( hidden_sizes=[100, 100], input_size=obs_dim + action_dim, output_size=1, ) vf = ConcatMlp( hidden_sizes=[100, 100], input_size=obs_dim, output_size=1, ) policy = TanhGaussianPolicy( hidden_sizes=[100, 100], obs_dim=obs_dim, action_dim=action_dim, ) algorithm = SoftActorCritic( env=env, policy=policy, qf=qf, vf=vf, **variant['algo_params'] ) algorithm.to(ptu.device) with torch.autograd.profiler.profile() as prof: algorithm.train() prof.export_chrome_trace("tmp-torch-chrome-trace.prof")
def experiment(log_dir, variant_overwrite, cpu=False): if not cpu: ptu.set_gpu_mode(True) # optionally set the GPU (default=False) # Load experiment from file. env, _, data, variant = load_experiment(log_dir, variant_overwrite) assert all([ a == b for a, b in zip(env.sampled_goal, variant['env_kwargs']['goal_prior']) ]) # Set log directory. exp_id = 'eval/ne{}-mpl{}-{}-rs{}/nhp{}'.format( variant['algo_kwargs']['num_episodes'], variant['algo_kwargs']['max_path_length'], ','.join(variant_overwrite['env_kwargs']['shaped_rewards']), variant['algo_kwargs']['reward_scale'], variant['historical_policies_kwargs']['num_historical_policies'], ) exp_id = create_exp_name(exp_id) out_dir = os.path.join(log_dir, exp_id) print('Logging to:', out_dir) setup_logger( log_dir=out_dir, variant=variant, snapshot_mode='none', snapshot_gap=50, ) # Load trained model from file. policy = data['policy'] vf = data['vf'] qf = data['qf'] algorithm = SoftActorCritic( env=env, training_env=env, # can't clone box2d env cause of swig save_environment=False, # can't save box2d env cause of swig policy=policy, qf=qf, vf=vf, **variant['algo_kwargs'], ) # Overwrite algorithm for p(z) adaptation (if model is SMM). if variant['intrinsic_reward'] == 'smm': discriminator = data['discriminator'] density_model = data['density_model'] SMMHook(base_algorithm=algorithm, discriminator=discriminator, density_model=density_model, **variant['smm_kwargs']) # Overwrite algorithm for historical averaging. if variant['historical_policies_kwargs']['num_historical_policies'] > 0: HistoricalPoliciesHook( base_algorithm=algorithm, log_dir=log_dir, **variant['historical_policies_kwargs'], ) algorithm.to(ptu.device) algorithm.train()
def experiment(log_dir, variant_overwrite, cpu=False): if not cpu: ptu.set_gpu_mode(True) # optionally set the GPU (default=False) # Load experiment from file. env, _, data, variant = load_experiment(log_dir, variant_overwrite) #assert all([a == b for a, b in zip(print(samples)env.sampled_goal, variant['env_kwargs']['goal_prior'])]) # Set log directory. exp_id = 'eval/ne{}-mpl{}-{}-rs{}/nhp{}'.format( variant['algo_kwargs']['num_episodes'], variant['algo_kwargs']['max_path_length'], ','.join(variant_overwrite['env_kwargs']['shaped_rewards']), variant['algo_kwargs']['reward_scale'], variant['historical_policies_kwargs']['num_historical_policies'], ) exp_id = create_exp_name(exp_id) out_dir = os.path.join(log_dir, exp_id) print('Logging to:', out_dir) setup_logger( log_dir=out_dir, variant=variant, snapshot_mode='none', snapshot_gap=50, ) # Load trained model from file. policy = data['policy'] vf = data['vf'] qf = data['qf'] algorithm = SoftActorCritic( env=env, training_env=env, # can't clone box2d env cause of swig save_environment=False, # can't save box2d env cause of swig policy=policy, qf=qf, vf=vf, **variant['algo_kwargs'], ) # Overwrite algorithm for p(z) adaptation (if model is SMM). if variant['intrinsic_reward'] == 'smm': discriminator = data['discriminator'] density_model = data['density_model'] SMMHook(base_algorithm=algorithm, discriminator=discriminator, density_model=density_model, **variant['smm_kwargs']) # Overwrite algorithm for historical averaging. if variant['historical_policies_kwargs']['num_historical_policies'] > 0: HistoricalPoliciesHook( base_algorithm=algorithm, log_dir=log_dir, **variant['historical_policies_kwargs'], ) algorithm.to(ptu.device) #algorithm.train() samples = algorithm.get_eval_paths() #for path in samples: # print(path['observations']) #plt.figure() #plt.plot(samples[0]['observations'][:, 0], samples[0]['observations'][:, 1]) #plt.plot(3, 2) #plt.show() print(env.reset()) print(samples[0]['observations']) i = 0 for path in samples: np.save('./outtem/out%i.npy' % i, path['observations']) i = i + 1 #print(algorithm.policy.get_action(np.array([0,0]))) from rlkit.samplers.util import rollout from rlkit.samplers.in_place import InPlacePathSampler #path=rollout(env,algorithm.eval_policy,50) eval_sampler = InPlacePathSampler( env=env, policy=algorithm.eval_policy, max_samples=100, max_path_length=50, ) path = algorithm.eval_sampler.obtain_samples() print(path[0]['observations'])
def experiment(variant): intrinsic_reward = variant['intrinsic_reward'] # Create environment. num_skills = variant['smm_kwargs']['num_skills'] if variant[ 'intrinsic_reward'] == 'smm' else 0 env, training_env = create_env(variant['env_id'], variant['env_kwargs'], num_skills) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size # Initialize networks. net_size = variant['net_size'] qf = FlattenMlp( input_size=obs_dim + action_dim, hidden_sizes=[net_size, net_size], output_size=1, ) vf = FlattenMlp( input_size=obs_dim, hidden_sizes=[net_size, net_size], output_size=1, ) policy = TanhGaussianPolicy( obs_dim=obs_dim, hidden_sizes=[net_size, net_size], action_dim=action_dim, ) algorithm = SoftActorCritic( env=env, training_env=training_env, # can't clone box2d env cause of swig save_environment=False, # can't save box2d env cause of swig policy=policy, qf=qf, vf=vf, **variant['algo_kwargs']) if intrinsic_reward == 'smm': discriminator = FlattenMlp( input_size=obs_dim - num_skills, hidden_sizes=[net_size, net_size], output_size=num_skills, ) density_model = VAEDensity(input_size=obs_dim, num_skills=num_skills, code_dim=128, **variant['vae_density_kwargs']) # Overwrite appropriate functions of algorithm. smm_algorithm_hook = SMMHook(base_algorithm=algorithm, discriminator=discriminator, density_model=density_model, **variant['smm_kwargs']) elif intrinsic_reward == 'icm': embedding_model = FlattenMlp( input_size=obs_dim, hidden_sizes=[net_size, net_size], output_size=net_size, ) forward_model = FlattenMlp( input_size=net_size + action_dim, hidden_sizes=[net_size, net_size], output_size=net_size, ) inverse_model = FlattenMlp( input_size=net_size + net_size, hidden_sizes=[], output_size=action_dim, ) # Overwrite appropriate functions of algorithm. ICMHook(base_algorithm=algorithm, embedding_model=embedding_model, forward_model=forward_model, inverse_model=inverse_model, **variant['icm_kwargs']) elif intrinsic_reward == 'count': count_algorithm_hook = CountHook(base_algorithm=algorithm, **variant['count_kwargs']) elif intrinsic_reward == 'pseudocount': density_model = VAEDensity( input_size=obs_dim, num_skills=0, code_dim=128, **variant['vae_density_kwargs'], ) # Overwrite appropriate functions of algorithm. PseudocountHook(base_algorithm=algorithm, density_model=density_model, **variant['pseudocount_kwargs']) algorithm.to(ptu.device) algorithm.train()
def experiment(args): if not args.cpu: ptu.set_gpu_mode(True) # optionally set the GPU (default=False) variant_overwrite = dict( # Evaluate model on num_episodes. algo_kwargs=dict( reward_scale=args.reward_scale, collection_mode='episodic', num_episodes=args.num_episodes, max_path_length=args.max_path_length, render=args.render, # Evaluate without additional training num_updates_per_episode=0, min_num_steps_before_training=( args.max_path_length * args.num_episodes + 1), ), # Environment settings env_kwargs=dict( sample_goal=False, goal_prior=args.test_goal, shaped_rewards=[ 'object_off_table', 'object_goal_indicator', 'object_gripper_indicator', 'action_penalty' ], terminate_upon_success=False, terminate_upon_failure=False, ), # SMM settings smm_kwargs=dict( # Posterior adaptation of latent skills p(z) update_p_z_prior_coeff=args.update_p_z_prior_coeff, # Turn off SMM reward. state_entropy_coeff=0, latent_entropy_coeff=0, latent_conditional_entropy_coeff=0, discriminator_lr=0, ), ) # Load experiment from file. env, _, data, variant = load_experiment(args.logdir, variant_overwrite) assert all([a == b for a, b in zip(env.sampled_goal, args.test_goal)]) variant.update(test_goal=list(env.sampled_goal)) if args.num_historical_policies > 0: variant.update(historical_policies_kwargs=dict( log_dir=args.logdir, num_historical_policies=args.num_historical_policies, sample_strategy=args.sample_strategy, on_policy_prob=args.on_policy_prob, )) # Set log directory. exp_id = 'eval/ne{}-mpl{}-{}-rs{}/nhp{}-{}-opp{}'.format( args.num_episodes, args.max_path_length, ','.join(variant_overwrite['env_kwargs']['shaped_rewards']), args.reward_scale, args.num_historical_policies, args.sample_strategy, args.on_policy_prob, ) exp_id = create_exp_name(exp_id) log_dir = os.path.join(args.logdir, exp_id) print('Logging to:', log_dir) setup_logger( log_dir=log_dir, variant=variant, snapshot_mode='none', snapshot_gap=50, ) # Load trained model from file. policy = data['policy'] vf = data['vf'] qf = data['qf'] algorithm = SoftActorCritic( env=env, training_env=env, # can't clone box2d env cause of swig save_environment=False, # can't save box2d env cause of swig policy=policy, qf=qf, vf=vf, **variant['algo_kwargs'], ) # Overwrite algorithm for p(z) adaptation (if model is SMM). if 'smm_kwargs' in variant: discriminator = data['discriminator'] density_model = data['density_model'] SMMHook(base_algorithm=algorithm, discriminator=discriminator, density_model=density_model, **variant['smm_kwargs']) # Overwrite algorithm for historical averaging. if args.num_historical_policies > 0: HistoricalPoliciesHook( base_algorithm=algorithm, **variant['historical_policies_kwargs'], ) algorithm.to(ptu.device) algorithm.train()