def train(): eg = ExperimentGrid(name=experiment_name) eg.add('env_name', env_config['env_name'], '', False) # eg.add('seed', 0) eg.add( 'resume', '/home/c2/src/tmp/spinningup/data/intersection_2_agents_lower_gamma_snapshot/intersection_2_agents_lower_gamma_s0_2020_03-12_12-07.37' ) eg.add('reinitialize_optimizer_on_resume', False) eg.add( 'pi_lr', 3e-5 ) # doesn't seem to have an effect, but playing it safe and lowering learning rate since we're not restoring adam rates eg.add( 'vf_lr', 1e-4 ) # doesn't seem to have an effect, but playing it safe and lowering learning rate since we're not restoring adam rates eg.add('epochs', 8000) eg.add('gamma', 0.95) eg.add('lam', 0.835) # eg.add('steps_per_epoch', 4000) eg.add('ac_kwargs:hidden_sizes', (256, 256), 'hid') eg.add('ac_kwargs:activation', torch.nn.Tanh, '') eg.add('notes', notes, '') eg.add('run_filename', os.path.realpath(__file__), '') eg.add('env_config', env_config, '') eg.run(ppo_pytorch)
def train(): eg = ExperimentGrid(name=experiment_name) eg.add('env_name', env_config['env_name'], '', False) # eg.add('seed', 0) eg.add('epochs', 8000) # eg.add('steps_per_epoch', 4000) eg.add('ac_kwargs:hidden_sizes', (256, 256), 'hid') eg.add('ac_kwargs:activation', torch.nn.Tanh, '') eg.add('notes', notes, '') eg.add('run_filename', os.path.realpath(__file__), '') eg.add('env_config', env_config, '') eg.run(ppo_pytorch)
def runExperiment(someEnv,someOptimizer, someActivation): env = ExperimentGrid(name='vpg-trece') env.add('env_name', someEnv, '', True) # eg.add('clip_ratio', [0.1,0.2]) env.add('seed', [10*i for i in range(args.num_runs)]) env.add('epochs', 10) env.add('steps_per_epoch', [4000]) #someOptimizer should be list env.add('optimizer',[someOptimizer]) env.add('ac_kwargs:hidden_sizes', [(32,), (64,64)], 'hid') env.add('ac_kwargs:activation', [someActivation], '') env.run(vpg, num_cpu=args.cpu)
def run_experiment(args): # def env_fn(): # import flexibility # register flexibility to gym env registry # return gym.make(args.env_name) eg = ExperimentGrid(name=args.exp_name) eg.add('seed', [10*i for i in range(args.num_runs)] if args.seed is None else args.seed) eg.add('epochs', args.epochs) eg.add('steps_per_epoch', args.steps_per_epoch) eg.add('save_freq', args.save_freq) eg.add('max_ep_len', 200) eg.add('ac_kwargs:activation', eval(args.act), '') eg.add('custom_h', args.custom_h) eg.add('do_checkpoint_eval', args.do_checkpoint_eval) eg.add('eval_episodes', args.eval_episodes) eg.add('train_v_iters', args.train_v_iters) eg.add('eval_temp', args.eval_temp) eg.add('train_starting_temp', args.train_starting_temp) eg.add('gamma', args.gamma) eg.add('env_version', args.env_version) eg.add('env_name', args.env_name) eg.add('env_subtract_full_flex', args.env_subtract_full_flex) eg.add('meta_learning', args.meta_learning) eg.add('finetune', args.finetune) eg.add('finetune_model_path', args.finetune_model_path) eg.add('lam', args.lam) eg.add('early_stop_epochs', args.early_stop_epochs) eg.add('save_all_eval', args.save_all_eval) if args.episodes_per_epoch is not None: eg.add('episodes_per_epoch', args.episodes_per_epoch) if args.env_version >= 3: # args.file_path = "/home/user/git/spinningup/spinup/FlexibilityEnv/input_m8n12_cv0.8.pkl" prefix = os.getcwd().split('RL_flex_design')[0] args.file_path = prefix + "RL_flex_design/spinup/FlexibilityEnv_input/{}".format(args.env_input) m, n, mean_c, mean_d, sd_d, profit_mat, target_arcs, fixed_costs, flex_0 = load_FlexibilityEnv_input(args.file_path) eg.add('env_input', args.file_path) eg.add('env_n_sample', args.env_n_sample) if args.target_arcs is None: eg.add('target_arcs', target_arcs) else: # target_arcs is explicitly specified by the scripts, which overrides the target_arc from the input file eg.add('target_arcs', args.target_arcs) if args.algo == "ppo": eg.add('train_pi_iters', args.train_pi_iters) eg.run(ppo) elif args.algo == "vpg": eg.run(vpg)
def run_experiment(args): def env_fn(): import HumanoidRL return gym.make(args.env_name) eg = ExperimentGrid(name=args.exp_name) eg.add('env_fn', env_fn) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 500) eg.add('steps_per_epoch', 10000) eg.add('save_freq', 20) eg.add('max_ep_len', 200) eg.add('ac_kwargs:activation', tf.tanh, '') eg.run(ppo_tf1)
def run_experiment(args): def env_fn(): import offload_env # registers custom envs to gym env registry env = gym.make('offload-v0') return env eg = ExperimentGrid(name=args.exp_name) eg.add('env_fn', env_fn) eg.add('seed', [10*i for i in range(args.num_runs)]) eg.add('epochs', 10) eg.add('steps_per_epoch', 10000) eg.add('save_freq', 20) eg.add('ac_kwargs:hidden_sizes', [(32,), (64,64)], 'hid') eg.add('ac_kwargs:activation', [tf.tanh, tf.nn.relu], '') eg.run(vpg, num_cpu=args.cpu)
def run_vpg_lava(): import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=4) parser.add_argument('--num_runs', type=int, default=3) args = parser.parse_args() eg = ExperimentGrid(name='vpg-pt-bench') eg.add('env_name', 'MiniGrid-LavaCrossingS9N2-v1', '', True) eg.add('seed', [6 * i for i in range(args.num_runs)]) eg.add('epochs', 500) eg.add('steps_per_epoch', 5000) eg.add('max_ep_len', 200) # eg.add('ac_kwargs:hidden_sizes', [(32,), (64,64)], 'hid') eg.add('ac_kwargs:hidden_sizes', [(32, )], 'hid') # eg.add('ac_kwargs:activation', [torch.nn.Tanh, torch.nn.ReLU], '') eg.add('ac_kwargs:activation', [torch.nn.Tanh], '') eg.run(vpg_pytorch, num_cpu=args.cpu, datestamp=True)
def run_experiment(args, rl_model): def env_fn(): import envs # registers custom envs to gym env registry return gym.make(args.env_name, desired_outputs=args.desired_outputs) eg = ExperimentGrid(name=args.exp_name) eg.add('env_fn', env_fn) # eg.add('seed', [10*i for i in range(args.num_runs)]) eg.add('epochs', 100) eg.add('steps_per_epoch', 6*500) # FIXME eg.add('save_freq', 10) # eg.add('num_runs', args.num_runs) eg.add('max_ep_len', 6) # FIXME get it from env # ppo eg.add('pi_lr', 3e-3) # actor-critic eg.add('ac_kwargs:activation', tf.tanh, '') # eg.add('ac_kwargs:hidden_sizes', [32, 32]) eg.run(rl_model, num_cpu=args.cpu, data_dir=args.data_dir)
def train(): eg = ExperimentGrid(name=experiment_name) eg.add('env_name', env_config['env_name'], '', False) # eg.add('seed', 0) eg.add( 'resume', '/home/c2/src/tmp/spinningup/data/deepdrive-2d-intersection-no-constrained-controls-example/deepdrive-2d-intersection-no-constrained-controls-example_s0_2020_03-10_13-14.50/best_HorizonReturn/2020_03-11_11-36.27' ) eg.add('reinitialize_optimizer_on_resume', False) eg.add( 'pi_lr', 3e-5 ) # doesn't seem to have an effect, but playing it safe and lowering learning rate since we're not restoring adam rates eg.add( 'vf_lr', 1e-4 ) # doesn't seem to have an effect, but playing it safe and lowering learning rate since we're not restoring adam rates eg.add('epochs', 8000) # eg.add('steps_per_epoch', 4000) eg.add('ac_kwargs:hidden_sizes', (256, 256), 'hid') eg.add('ac_kwargs:activation', torch.nn.Tanh, '') eg.add('notes', notes, '') eg.add('run_filename', os.path.realpath(__file__), '') eg.add('env_config', env_config, '') eg.run(ppo_pytorch)
from spinup.utils.run_utils import ExperimentGrid env_fn = lambda: gym.make('Pendulum-v0') network_kwargs = dict(hidden_sizes=[400, 300], activation=tf.nn.relu) logger_kwargs = dict(output_dir='logging/NAF', exp_name='naf - tests') steps_per_epoch = 1000 epochs = 100 start_steps = 50 algorithm = 'naf' if __name__ == '__main__': eg = ExperimentGrid(name='ddpg-bench-long') eg.add('env_name', 'Pendulum-v0', '', True) eg.add('seed', [10 * i for i in range(4)]) eg.add('epochs', 20) eg.add('steps_per_epoch', 1000) eg.add('ac_kwargs:hidden_sizes', [(100, 100), (400, 300)], 'hid') eg.add('ac_kwargs:activation', [tf.nn.relu], '') eg.run(ddpg, num_cpu=4, data_dir='logging/DDPG') # # # agent = naf(env_fn=env_fn, ac_kwargs=ac_kwargs, steps_per_epoch=100, epochs=25, logger_kwargs=logger_kwargs) # # # agent = spinup.ddpg(env_fn=env_fn, ac_kwargs=ac_kwargs, steps_per_epoch=500, epochs=250, logger_kwargs=logger_kwargs, # # start_steps=start_steps) # tf.reset_default_graph() # naf(env_fn=env_fn, ac_kwargs=network_kwargs, steps_per_epoch=steps_per_epoch, epochs=epochs, logger_kwargs=logger_kwargs, # act_noise=0.1, start_steps=start_steps)
from spinup.utils.run_utils import ExperimentGrid from spinup import ppo_pytorch import torch if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=4) parser.add_argument('--num_runs', type=int, default=3) args = parser.parse_args() eg = ExperimentGrid(name='ppo-test-lunar') #eg.add('env_name', 'MountainCar-v0', '', True) #eg.add('env_name', 'CartPole-v0', '', True) #eg.add('env_name', 'gym_multiagent_control:foo-v0', '', True) eg.add('env_name', 'gym_multiagent_control:foo-v2', '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 10) eg.add('steps_per_epoch', 4000) eg.add('ac_kwargs:hidden_sizes', [(32, ), (64, 64)], 'hid') eg.add('ac_kwargs:activation', [torch.nn.Tanh, torch.nn.ReLU], '') eg.run(ppo_pytorch, num_cpu=args.cpu)
# default to their own values. (Some are (64, 64) some are (300,400)) from spinup.utils.run_utils import ExperimentGrid from spinup import vpg, trpo, ppo, ddpg, td3, sac import tensorflow as tf if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() # This is set top auto and type str so that it can adapt to whatever algorithm im using parser.add_argument('--cpu', type=str, default='auto') parser.add_argument('--num_runs', type=int, default=1) args = parser.parse_args() algo_names = ['vpg', 'trpo', 'ppo', 'ddpg', 'td3', 'sac'] algo = [vpg, trpo, ppo, ddpg, td3, sac] for i in range(len(algo)): eg = ExperimentGrid(name=algo_names[i]) eg.add('env_name', 'MountainCarContinuous-v0', '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 10) eg.add('steps_per_epoch', 4000) # Use default hidden sizes in actor_critic function, comment below out #eg.add('ac_kwargs:hidden_sizes', [(32,), (64,64)], 'hid') eg.add('ac_kwargs:activation', [tf.nn.relu], '') eg.run(algo[i]) #, num_cpu=args.cpu)
if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--env', type=str, default='HalfCheetah-v2') parser.add_argument('--h', type=int, default=300) parser.add_argument('--l', type=int, default=1) parser.add_argument('--num_runs', '-n', type=int, default=3) parser.add_argument('--steps_per_epoch', '-s', type=int, default=5000) parser.add_argument('--total_steps', '-t', type=int, default=int(5e4)) args = parser.parse_args() def ddpg_with_actor_critic(bugged, **kwargs): actor_critic = bugged_mlp_actor_critic if bugged else mlp_actor_critic return ddpg(actor_critic=actor_critic, ac_kwargs=dict(hidden_sizes=[args.h] * args.l), start_steps=5000, max_ep_len=150, batch_size=64, polyak=0.95, **kwargs) eg = ExperimentGrid(name='ex2-2_ddpg') eg.add('replay_size', int(args.total_steps)) eg.add('env_name', args.env, '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', int(args.total_steps / args.steps_per_epoch)) eg.add('steps_per_epoch', args.steps_per_epoch) eg.add('bugged', [False, True]) eg.run(ddpg_with_actor_critic, datestamp=True)
logger.dump_tabular() if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--env', type=str, default='HalfCheetah-v2') parser.add_argument('--h', type=int, default=300) parser.add_argument('--l', type=int, default=1) parser.add_argument('--num_runs', '-n', type=int, default=3) parser.add_argument('--steps_per_epoch', '-s', type=int, default=5000) parser.add_argument('--total_steps', '-t', type=int, default=int(5e4)) args = parser.parse_args() def td3_with_actor_critic(**kwargs): td3(ac_kwargs=dict(hidden_sizes=[args.h] * args.l), start_steps=5000, max_ep_len=150, batch_size=64, polyak=0.95, **kwargs) eg = ExperimentGrid(name='ex2-3_td3') eg.add('replay_size', int(args.total_steps)) eg.add('env_name', args.env, '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', int(args.total_steps / args.steps_per_epoch)) eg.add('steps_per_epoch', args.steps_per_epoch) eg.add('remove_action_clip', [False, True]) eg.run(td3_with_actor_critic, datestamp=True)
from spinup import vpg import tensorflow as tf # ENVS # BipedalWalkerHardcore-v2 # LunarLanderContinuous-v2 # MontezumaRevenge-ram-v0 # Enduro-ram-v0 # MsPacman-ram-v0 # Ant-v2 # HumanoidStandup-v2 if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=1) parser.add_argument('--num_runs', type=int, default=10) # parser.add_argument('--clip_ratio', type=int, ) args = parser.parse_args() Enduro = ExperimentGrid(name='vpg-nueve-singular') Enduro.add('env_name', 'Enduro-ram-v0', '', True) # eg.add('clip_ratio', [0.1,0.2]) Enduro.add('seed', [10*i for i in range(args.num_runs)]) Enduro.add('epochs', 10) Enduro.add('steps_per_epoch', [4000,100]) Enduro.add('optimizer',['GradientDescentOptimizer', 'MomentumOptimizer', 'ProximalAdagradOptimizer', 'ProximalGradientDescentOptimizer', 'RMSPropOptimizer', 'AdaMaxOptimizer', 'AdamGSOptimizer', 'AdamWOptimizer', 'AddSignOptimizer', 'GGTOptimizer', 'LARSOptimizer', 'LazyAdamGSOptimizer', 'LazyAdamOptimizer', 'MomentumWOptimizer', 'NadamOptimizer', 'PowerSignOptimizer', 'RegAdagradOptimizer', 'ShampooOptimizer']) Enduro.add('ac_kwargs:hidden_sizes', [(32,), (64,64)], 'hid') Enduro.add('ac_kwargs:activation', [tf.nn.sigmoid], '') Enduro.run(vpg, num_cpu=args.cpu) #, 'ProximalAdagradOptimizer', 'ProximalGradientDescentOptimizer', 'RMSPropOptimizer', 'AdaMaxOptimizer', 'AdamGSOptimizer', 'AdamWOptimizer', 'AddSignOptimizer', 'GGTOptimizer', 'LARSOptimizer', 'LazyAdamGSOptimizer', 'LazyAdamOptimizer', 'MomentumWOptimizer', 'NadamOptimizer', 'PowerSignOptimizer', 'RegAdagradOptimizer', 'ShampooOptimizer' # , , , , tf.nn.selu, tf.nn.softplus, tf.nn.softsign, tf.sigmoid, tf.tanh
remainder = remainder % division indexes.append(index) total = division actual_setting = {} for j in range(len(indexes)): actual_setting[setting_names[j]] = settings[j][indexes[j]] return indexes, actual_setting indexes, actual_setting = get_setting(args.setting, total, settings, setting_names) eg = ExperimentGrid(name=EXPERIMENT_NAME) # use eg.add to add parameters in the settings or add parameters tha apply to all jobs # we now automated this part, as long as you added settings correctly into the arrays at the start of this program # they should be added to experiment automatically for i in range(len(actual_setting)): setting_name = setting_names[i] if setting_name != 'env_name' and setting_name != 'seed': eg.add(setting_name, actual_setting[setting_name], setting_savename_prefix[i], whether_add_to_savename[i]) eg.add('env_name', actual_setting['env_name'], '', True) eg.add('seed', actual_setting['seed']) eg.run(function_to_run, num_cpu=args.cpu, data_dir=save_data_dir) print( '\n###################################### GRID EXP END ######################################' ) print('total time for grid experiment:', time.time() - start_time)
parser.add_argument('--num_runs', type=int, default=3) args = parser.parse_args() x = 10 layers = [] layers_itr = [] for i in range(x): layers.append(64) layers_itr.append(list(layers)) layers = [] for i in range(x): layers.append(128) layers_itr.append(list(layers)) layers = [] for i in range(x): layers.append(256) layers_itr.append(list(layers)) layers = [] for i in range(x): layers.append(512) layers_itr.append(list(layers)) eg = ExperimentGrid(name='td3-bench') eg.add('env_name', 'MountainCarContinuous-v0', '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 10) eg.add('steps_per_epoch', 4000) eg.add('ac_kwargs:hidden_sizes', layers_itr, 'hid') eg.add('ac_kwargs:activation', [tf.nn.relu], '') eg.run(td3, num_cpu=args.cpu)
# Enduro-ram-v0 # MsPacman-ram-v0 # Ant-v2 # HumanoidStandup-v2 if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=4) parser.add_argument('--num_runs', type=int, default=10) # parser.add_argument('--clip_ratio', type=int, ) args = parser.parse_args() LunarLanderContinuous = ExperimentGrid(name='vpg-ocho') LunarLanderContinuous.add('env_name', 'LunarLanderContinuous-v2', '', True) # eg.add('clip_ratio', [0.1,0.2]) LunarLanderContinuous.add('seed', [10 * i for i in range(args.num_runs)]) LunarLanderContinuous.add('epochs', 10) LunarLanderContinuous.add('steps_per_epoch', [4000, 100]) LunarLanderContinuous.add('optimizer', [ 'NadamOptimizer', 'PowerSignOptimizer', 'RegAdagradOptimizer', 'ShampooOptimizer' ]) LunarLanderContinuous.add('ac_kwargs:hidden_sizes', [(32, ), (64, 64)], 'hid') LunarLanderContinuous.add('ac_kwargs:activation', [ tf.nn.relu, tf.nn.relu6, tf.nn.crelu, tf.nn.elu, tf.nn.selu, tf.nn.softplus, tf.nn.softsign, tf.sigmoid, tf.tanh ], '') LunarLanderContinuous.run(vpg, num_cpu=args.cpu)
division = int(total / len(setting)) index = int(remainder / division) remainder = remainder % division indexes.append(index) total = division actual_setting = {} for j in range(len(indexes)): actual_setting[setting_names[j]] = settings[j][indexes[j]] return indexes, actual_setting indexes, actual_setting = get_setting(args.setting, total, settings, setting_names) #################################################################################################### ## use eg.add to add parameters in the settings or add parameters that apply to all jobs eg = ExperimentGrid(name=exp_name) eg.add('ue_seed', 21, 'ues', True) eg.add('lr', actual_setting['lr'], 'lr', True) eg.add('border', actual_setting['border'], 'border', True) eg.add('wd', 0, 'wd', True) eg.add('buffer_type', 'FinalSigma0.5', 'Buf-', True) eg.add('buffer_size', '500K', '', True) eg.add('eval_freq', 500) eg.add('max_timesteps', 100000) eg.add('env_set', actual_setting['env_set'], '', True) eg.add('seed', actual_setting['seed']) eg.run(bc_ue_learn, num_cpu=args.cpu) print('\n###################################### GRID EXP END ######################################') print('total time for grid experiment:',time.time()-start_time)
eg.add('show_kwargs_json', True) eg.add('env_name', 'Financial_gym_pic_daily', '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 400) eg.add('save_freq', 5) # epoch save frequeece eg.add('steps_per_epoch', 400) # default 4000 eg.add('start_steps', 500) # default 10000, start store a=pi(obs) eg.add('update_after', 500) # default 1000, update eg.add('use_gpu', True) # default eg.add('gpu_parallel', True) # default eg.add('update_times_every_step', 50) # default 50 eg.add('automatic_entropy_tuning', True) # default eg.add('batch_size', 48) # default eg.add('num_test_episodes', 2) # default from spinup.algos.pytorch.sac.core import Discrete_Actor, Discrete_Critic, Xception_1 eg.add('state_of_art_model', True) eg.add('Actor', [Discrete_Actor]) eg.add('Critic', [Discrete_Critic]) eg.add('ac_kwargs:model', [Xception_1]) eg.add('ac_kwargs:num_classes', [2]) # eg.add('ac_kwargs:hidden_sizes', [(2048, 1024, 512, 256)], 'hid') # eg.add('ac_kwargs:activation', [torch.nn.ReLU], '') eg.add( "last_save_path", "/media/zzw/Magic/py_work2019/RL/spinningup-master/data/sac-pyt_financial_gym_pic_daily/sac-pyt_financial_gym_pic_daily_s0/pyt_save/model.pt" ) eg.run(fgym_trunk_sac_discrete_v2, num_cpu=args.cpu)
# Import packages and environment import numpy as np from spinup.utils.run_utils import ExperimentGrid from spinup import soc_pytorch # from spinup import sac_pytorch # from spinup import ppo_pytorch import torch as th if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--num_runs', type=int, default=3) args = parser.parse_args() eg = ExperimentGrid(name='hac-hopper-soc') # eg.add('env_name', 'Walker2DBulletEnv-v0', '', True) eg.add('env_name', 'HopperBulletEnv-v0', '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 250) eg.add('N_options', [2]) # 2,3 eg.add('ac_kwargs:hidden_sizes', [[128, 256, 128]], 'hid') eg.add('alpha', [0.1, 0.2]) eg.add('c', [0.2]) # 0.1,0.2,0.3 og evt. 0.0 eg.run(soc_pytorch)
if args.save_model_interval > 0 and (i_iter+1) % args.save_model_interval == 0: to_device(torch.device('cpu'), policy_net, value_net) pickle.dump((policy_net, value_net, running_state), open(os.path.join(assets_dir(), 'learned_models/{}_ppo.p'.format(args.env_name)), 'wb')) to_device(device, policy_net, value_net) # """clean up gpu memory""" torch.cuda.empty_cache() return agent.evaluate() print('a') print(config) print(args) return main_loop(config) def mock_train(**kwargs): config = { "lr": kwargs['lr'], "gamma": kwargs['gamma'] } print('a') print(config) print(args) eg = ExperimentGrid('hopper') eg.add('lr', [1e-4]) eg.add('gamma', [0.99, 0.95]) eg.run(train)
if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=4) parser.add_argument('--num_runs', type=int, default=10) # parser.add_argument('--clip_ratio', type=int, ) args = parser.parse_args() MontezumaRevenge = ExperimentGrid(name='vpg-nueve-singular') MontezumaRevenge.add('env_name', 'MontezumaRevenge-ram-v0', '', True) # eg.add('clip_ratio', [0.1,0.2]) MontezumaRevenge.add('seed', [10 * i for i in range(args.num_runs)]) MontezumaRevenge.add('epochs', 10) MontezumaRevenge.add('steps_per_epoch', [4000, 100]) MontezumaRevenge.add('optimizer', [ 'GradientDescentOptimizer', 'MomentumOptimizer', 'ProximalAdagradOptimizer', 'ProximalGradientDescentOptimizer', 'RMSPropOptimizer', 'AdaMaxOptimizer', 'AdamGSOptimizer', 'AdamWOptimizer', 'AddSignOptimizer', 'GGTOptimizer', 'LARSOptimizer', 'LazyAdamGSOptimizer', 'LazyAdamOptimizer', 'MomentumWOptimizer', 'NadamOptimizer', 'PowerSignOptimizer', 'RegAdagradOptimizer', 'ShampooOptimizer' ]) MontezumaRevenge.add('ac_kwargs:hidden_sizes', [(32, ), (64, 64)], 'hid') MontezumaRevenge.add('ac_kwargs:activation', [ tf.nn.relu, tf.nn.relu6, tf.nn.crelu, tf.nn.elu, tf.nn.selu, tf.nn.softplus, tf.nn.softsign, tf.sigmoid, tf.tanh ], '') MontezumaRevenge.run(vpg, num_cpu=args.cpu)
from spinup import vpg_pytorch from spinup.utils.run_utils import ExperimentGrid import torch if __name__ == '__main__': grid = ExperimentGrid(name='vpg-torch-cart-bench') grid.add('env_name', 'CartPole-v0') grid.add('seed', [0]) grid.add('epochs', 2) grid.add('steps_per_epoch', 100) grid.add('gamma', [0, 0.5, 1]) grid.add('ac_kwargs:hidden_sizes', [(32, ), (64, 64)], 'hid') grid.add('ac_kwargs:activation', [torch.nn.Tanh], '') grid.run(vpg_pytorch, num_cpu=4)
nargs='+', default=(160, 160, 160, 160, 160, 160)) args = parser.parse_args() hidden_sizes_name = '_'.join([str(num) for num in args.hidden_sizes]) #eg = ExperimentGrid(name='superpos_sac-MT10_with_bias_%s_context_q_%s' % (args.psp_type, hidden_sizes_name)) eg = ExperimentGrid(name='TIMETEST') eg.add('env_name', 'MT10Helper-v0', '', True) eg.add('num_tasks', 10) eg.add('batch_size', 128) # This is per task, so real is 128 x 10 eg.add('psp_type', args.psp_type) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 1000) eg.add('steps_per_epoch', TASK_HORIZON * PATHS_PER_TASK * NUM_TASKS) eg.add('update_after', TASK_HORIZON * NUM_TASKS * PATHS_PER_TASK) eg.add('lr', [3e-4]) eg.add('start_steps', TASK_HORIZON * PATHS_PER_TASK * NUM_TASKS) #eg.add('update_every', NUM_TASKS * ) eg.add('num_test_episodes', 10 * NUM_TASKS) eg.add('ac_kwargs:hidden_sizes', [tuple(args.hidden_sizes)], 'hid') eg.add('ac_kwargs:activation', [torch.nn.ReLU], '') eg.run(psp_sac_pytorch, num_cpu=args.cpu) #from metaworld.benchmarks import MT10 # #env_fn = lambda : MTEnv(MT10.get_train_tasks()) # #ac_kwargs = dict(hidden_sizes=[400,400], activation=torch.nn.ReLU) # #logger_kwargs = dict(output_dir='~/spinup/data/', exp_name='SAC_MT10') # #sac_pytorch(env_fn=env_fn, ac_kwargs=ac_kwargs, steps_per_epoch=128 * 10, epochs=1000, start_steps=1000, lr=3e-4, logger_kwargs=logger_kwargs)
eg.add('steps_per_epoch', 5000) # Use default hidden sizes in actor_critic function, comment below out eg.add('ac_kwargs:hidden_sizes', [(16,16)], 'hid') eg.add('ac_kwargs:activation', [tf.nn.relu], '') eg.run(algo[i], num_cpu=args.cpu) ''' #Training if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=1) parser.add_argument('--num_runs', type=int, default=1) args = parser.parse_args() eg = ExperimentGrid(name=ex_number+'_ddpg_100ep') eg.add('env_name', env_name, '', True) eg.add('seed', [10*i for i in range(args.num_runs)]) eg.add('epochs', 100) #eg.add('steps_per_epoch', 4000) eg.add('max_ep_len', 1500) eg.add('ac_kwargs:activation', [tf.nn.relu], '') eg.add('ac_kwargs:hidden_sizes', [(64,64)], 'hid') eg.run(ddpg, num_cpu=args.cpu)
from spinup.utils.run_utils import ExperimentGrid from spinup import ppo import torch if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=4) parser.add_argument('--num_runs', type=int, default=3) args = parser.parse_args() eg = ExperimentGrid(name='ppo-bench') eg.add('env_name', 'CartPole-v0', '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 10) eg.add('steps_per_epoch', 4000) eg.add('ac_kwargs:hidden_sizes', [(32, ), (64, 64)], 'hid') eg.add('ac_kwargs:activation', [torch.tanh, torch.relu], '') eg.run(ppo, num_cpu=args.cpu)
from spinup.utils.run_utils import ExperimentGrid from spinup import ppo_tf1 import tensorflow as tf if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=4) parser.add_argument('--num_runs', type=int, default=3) args = parser.parse_args() eg = ExperimentGrid(name='ppo-tf1-bench') eg.add('env_name', 'CartPole-v0', '', True) eg.add('seed', [10 * i for i in range(args.num_runs)]) eg.add('epochs', 10) eg.add('steps_per_epoch', 4000) eg.add('ac_kwargs:hidden_sizes', [(32, ), (64, 64)], 'hid') eg.add('ac_kwargs:activation', [tf.tanh, tf.nn.relu], '') eg.run(ppo_tf1, num_cpu=args.cpu)
def parse_and_execute_grid_search(cmd, args): """Interprets algorithm name and cmd line args into an ExperimentGrid.""" if cmd in BASE_ALGO_NAMES: backend = DEFAULT_BACKEND[cmd] print('\n\nUsing default backend (%s) for %s.\n'%(backend, cmd)) cmd = cmd + '_' + backend algo = eval('spinup.'+cmd) # Before all else, check to see if any of the flags is 'help'. valid_help = ['--help', '-h', 'help'] if any([arg in valid_help for arg in args]): print('\n\nShowing docstring for spinup.'+cmd+':\n') print(algo.__doc__) sys.exit() def process(arg): # Process an arg by eval-ing it, so users can specify more # than just strings at the command line (eg allows for # users to give functions as args). try: return eval(arg) except: return arg # Make first pass through args to build base arg_dict. Anything # with a '--' in front of it is an argument flag and everything after, # until the next flag, is a possible value. arg_dict = dict() for i, arg in enumerate(args): assert i > 0 or '--' in arg, \ friendly_err("You didn't specify a first flag.") if '--' in arg: arg_key = arg.lstrip('-') arg_dict[arg_key] = [] else: arg_dict[arg_key].append(process(arg)) # Make second pass through, to catch flags that have no vals. # Assume such flags indicate that a boolean parameter should have # value True. for k,v in arg_dict.items(): if len(v) == 0: v.append(True) # Third pass: check for user-supplied shorthands, where a key has # the form --keyname[kn]. The thing in brackets, 'kn', is the # shorthand. NOTE: modifying a dict while looping through its # contents is dangerous, and breaks in 3.6+. We loop over a fixed list # of keys to avoid this issue. given_shorthands = dict() fixed_keys = list(arg_dict.keys()) for k in fixed_keys: p1, p2 = k.find('['), k.find(']') if p1 >= 0 and p2 >= 0: # Both '[' and ']' found, so shorthand has been given k_new = k[:p1] shorthand = k[p1+1:p2] given_shorthands[k_new] = shorthand arg_dict[k_new] = arg_dict[k] del arg_dict[k] # Penultimate pass: sugar. Allow some special shortcuts in arg naming, # eg treat "env" the same as "env_name". This is super specific # to Spinning Up implementations, and may be hard to maintain. # These special shortcuts are described by SUBSTITUTIONS. for special_name, true_name in SUBSTITUTIONS.items(): if special_name in arg_dict: # swap it in arg dict arg_dict[true_name] = arg_dict[special_name] del arg_dict[special_name] if special_name in given_shorthands: # point the shortcut to the right name given_shorthands[true_name] = given_shorthands[special_name] del given_shorthands[special_name] # Final pass: check for the special args that go to the 'run' command # for an experiment grid, separate them from the arg dict, and make sure # that they have unique values. The special args are given by RUN_KEYS. run_kwargs = dict() for k in RUN_KEYS: if k in arg_dict: val = arg_dict[k] assert len(val) == 1, \ friendly_err("You can only provide one value for %s."%k) run_kwargs[k] = val[0] del arg_dict[k] # Determine experiment name. If not given by user, will be determined # by the algorithm name. if 'exp_name' in arg_dict: assert len(arg_dict['exp_name']) == 1, \ friendly_err("You can only provide one value for exp_name.") exp_name = arg_dict['exp_name'][0] del arg_dict['exp_name'] else: exp_name = 'cmd_' + cmd # Make sure that if num_cpu > 1, the algorithm being used is compatible # with MPI. if 'num_cpu' in run_kwargs and not(run_kwargs['num_cpu'] == 1): assert cmd in add_with_backends(MPI_COMPATIBLE_ALGOS), \ friendly_err("This algorithm can't be run with num_cpu > 1.") # Special handling for environment: make sure that env_name is a real, # registered gym environment. valid_envs = [e.id for e in list(gym.envs.registry.all())] assert 'env_name' in arg_dict, \ friendly_err("You did not give a value for --env_name! Add one and try again.") for env_name in arg_dict['env_name']: err_msg = dedent(""" %s is not registered with Gym. Recommendations: * Check for a typo (did you include the version tag?) * View the complete list of valid Gym environments at https://gym.openai.com/envs/ """%env_name) assert env_name in valid_envs, err_msg # Construct and execute the experiment grid. eg = ExperimentGrid(name=exp_name) for k,v in arg_dict.items(): eg.add(k, v, shorthand=given_shorthands.get(k)) eg.run(algo, **run_kwargs)
eg.add('epochs', 10) eg.add('steps_per_epoch', 5000) # Use default hidden sizes in actor_critic function, comment below out eg.add('ac_kwargs:hidden_sizes', [(32,)], 'hid') eg.add('ac_kwargs:activation', [tf.nn.relu], '') eg.run(algo[i], num_cpu=args.cpu) ''' #Training if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--cpu', type=int, default=1) parser.add_argument('--num_runs', type=int, default=1) args = parser.parse_args() eg = ExperimentGrid(name='ex4_trpo_30ep') eg.add('env_name', 'Acrobot-v1', '', True) eg.add('seed', [10*i for i in range(args.num_runs)]) eg.add('epochs', 30) #eg.add('steps_per_epoch', 4000) eg.add('max_ep_len', 1500) eg.add('ac_kwargs:activation', [tf.nn.relu], '') eg.add('ac_kwargs:hidden_sizes', [(16,),(16,16),(8,),(8,8),(4,),(4,4)], 'hid') eg.run(trpo, num_cpu=args.cpu)