def train(num_timesteps, seed): rank = MPI.COMM_WORLD.Get_rank() sess = U.single_threaded_session() sess.__enter__() if rank == 0: logger.configure() else: logger.configure(format_strs=[]) workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() set_global_seeds(workerseed) config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs', 'ant_navigate.yaml') print(config_file) env = AntNavigateEnv(config = config_file) def policy_fn(name, ob_space, ac_space): #return mlp_policy.MlpPolicy(name=name, ob_space=sensor_space, ac_space=ac_space, hid_size=64, num_hid_layers=2) return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space, save_per_acts=10000, session=sess, kind='small') env.seed(workerseed) gym.logger.setLevel(logging.WARN) pposgd_simple.learn(env, policy_fn, max_timesteps=int(num_timesteps * 1.1 * 5), timesteps_per_actorbatch=6000, clip_param=0.2, entcoeff=0.00, optim_epochs=4, optim_stepsize=1e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', save_per_acts=500 ) env.close()
def train(num_timesteps, seed): rank = MPI.COMM_WORLD.Get_rank() #sess = U.single_threaded_session() sess = utils.make_gpu_session(args.num_gpu) sess.__enter__() if args.meta != "": saver = tf.train.import_meta_graph(args.meta) saver.restore(sess, tf.train.latest_checkpoint('./')) if rank == 0: logger.configure() else: logger.configure(format_strs=[]) workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() set_global_seeds(workerseed) use_filler = not args.disable_filler config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs', 'husky_navigate.yaml') print(config_file) raw_env = HuskyNavigateEnv(is_discrete=True, gpu_count=args.gpu_count, config=config_file) # def policy_fn(name, ob_space, sensor_space, ac_space): def policy_fn(name, ob_space, ac_space): if args.mode == "SENSOR": return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=64, num_hid_layers=2) else: #return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, ac_space=ac_space, save_per_acts=10000, session=sess) #else: return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space, save_per_acts=10000, session=sess, kind='small') env = Monitor(raw_env, logger.get_dir() and osp.join(logger.get_dir(), str(rank))) env.seed(workerseed) gym.logger.setLevel(logging.WARN) pposgd_simple.learn(env, policy_fn, max_timesteps=int(num_timesteps * 1.1), timesteps_per_actorbatch=3000, clip_param=0.2, entcoeff=0.01, optim_epochs=4, optim_stepsize=3e-3, optim_batchsize=64, gamma=0.996, lam=0.95, schedule='linear', save_name="husky_navigate_ppo_{}".format(args.mode), save_per_acts=50, sensor=args.mode == "SENSOR", reload_name=args.reload_name) '''
def train(seed): rank = MPI.COMM_WORLD.Get_rank() sess = utils.make_gpu_session(args.num_gpu) sess.__enter__() if args.meta != "": saver = tf.train.import_meta_graph(args.meta) saver.restore(sess, tf.train.latest_checkpoint('./')) if rank == 0: logger.configure() else: logger.configure(format_strs=[]) workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() set_global_seeds(workerseed) use_filler = not args.disable_filler config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs', 'config_husky.yaml') print(config_file) raw_env = HuskyNavigateEnv(gpu_idx=args.gpu_idx, config=config_file) step = raw_env.config['n_step'] episode = raw_env.config['n_episode'] iteration = raw_env.config['n_iter'] elm_policy = raw_env.config['elm_active'] num_timesteps = step * episode * iteration tpa = step * episode if args.mode == "SENSOR": #Blind Mode def policy_fn(name, ob_space, ac_space): return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=128, num_hid_layers=4, elm_mode=elm_policy) elif args.mode == "DEPTH" or args.mode == "RGB": #Fusing sensor space with image space def policy_fn(name, ob_space, sensor_space, ac_space): return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, ac_space=ac_space, save_per_acts=10000, hid_size=128, num_hid_layers=4, session=sess, elm_mode=elm_policy) elif args.mode == "RESNET": def policy_fn(name, ob_space, sensor_space, ac_space): return resnet_policy.ResPolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, ac_space=ac_space, save_per_acts=10000, hid_size=128, num_hid_layers=4, session=sess, elm_mode=elm_policy) elif args.mode == "ODE": def policy_fn(name, ob_space, sensor_space, ac_space): return ode_policy.ODEPolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, ac_space=ac_space, save_per_acts=10000, hid_size=128, num_hid_layers=4, session=sess, elm_mode=elm_policy) else: #Using only image space def policy_fn(name, ob_space, ac_space): return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space, session=sess, kind='small') env = Monitor(raw_env, logger.get_dir() and osp.join(logger.get_dir(), str(rank))) env.seed(workerseed) gym.logger.setLevel(logging.WARN) args.reload_name = '/home/berk/PycharmProjects/Gibson_Exercise/gibson/utils/models/PPO_ODE_2020-12-05_500_50_137_150.model' print(args.reload_name) modes_camera = ["DEPTH", "RGB", "RESNET", "ODE"] if args.mode in modes_camera: pposgd_fuse.learn(env, policy_fn, max_timesteps=int(num_timesteps * 1.1), timesteps_per_actorbatch=tpa, clip_param=0.2, entcoeff=0.03, vfcoeff=0.01, optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', save_name="PPO_{}_{}_{}_{}_{}".format( args.mode, datetime.date.today(), step, episode, iteration), save_per_acts=15, reload_name=args.reload_name) else: if args.mode == "SENSOR": sensor = True else: sensor = False pposgd_simple.learn(env, policy_fn, max_timesteps=int(num_timesteps * 1.1), timesteps_per_actorbatch=tpa, clip_param=0.2, entcoeff=0.03, vfcoeff=0.01, optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64, gamma=0.996, lam=0.95, schedule='linear', save_name="PPO_{}_{}_{}_{}_{}".format( args.mode, datetime.date.today(), step, episode, iteration), save_per_acts=15, sensor=sensor, reload_name=args.reload_name) env.close()