def main(): args = parser.parse_args() env = GridWorld(display=args.render, obstacles=[np.asarray([1, 2])], step_wrapper=utils.step_wrapper, reset_wrapper=utils.reset_wrapper, stepReward=.01) model = ActorCritic(env, gamma=0.99, log_interval=100, max_episodes=10**4, max_ep_length=30) if args.policy_path is not None: model.policy.load(args.policy_path) if not args.play: model.train() if not args.dont_save: model.policy.save('./saved-models/') if args.play: env.tickSpeed = 15 assert args.policy_path is not None, 'pass a policy to play from!' model.generate_trajectory(args.num_trajs, './trajs/ac_gridworld/')
def main(): fe = DummyFeatureExtractor() env = EwapGridworld( ped_id=1, vision_radius=4, ) rl = ActorCritic(env, feat_extractor=fe, max_episodes=10**4) rl.train()
def main(): """Runs experiment""" args = parser.parse_args() utils.seed_all(args.seed) ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d_%H:%M:%S") to_save = pathlib.Path(args.save_dir) dir_name = args.save_folder + "_" + st to_save = to_save / dir_name to_save = str(to_save.resolve()) log_file = "Experiment_info.txt" experiment_logger = Logger(to_save, log_file) experiment_logger.log_header("Arguments for the experiment :") experiment_logger.log_info(vars(args)) feat_ext = fe_utils.load_feature_extractor(args.feat_extractor, obs_width=args.pedestrian_width, agent_width=args.pedestrian_width) experiment_logger.log_header("Parameters of the feature extractor :") experiment_logger.log_info(feat_ext.__dict__) env = GridWorld( display=args.render, is_random=False, rows=576, cols=720, agent_width=args.pedestrian_width, step_size=2, obs_width=args.pedestrian_width, width=10, subject=args.subject, annotation_file=args.annotation_file, goal_state=None, step_wrapper=utils.step_wrapper, seed=args.seed, replace_subject=args.replace_subject, segment_size=args.segment_size, external_control=True, continuous_action=False, reset_wrapper=utils.reset_wrapper, consider_heading=True, is_onehot=False, show_orientation=True, show_comparison=True, show_trail=True, ) experiment_logger.log_header("Environment details :") experiment_logger.log_info(env.__dict__) if args.rl_method == "ActorCritic": rl_method = ActorCritic( env, feat_extractor=feat_ext, gamma=1, log_interval=args.rl_log_intervals, max_episode_length=args.rl_ep_length, hidden_dims=args.policy_net_hidden_dims, save_folder=to_save, lr=args.lr_rl, max_episodes=args.rl_episodes, ) if args.rl_method == "SAC": if not env.continuous_action: print("The action space needs to be continuous for SAC to work.") exit() replay_buffer = ReplayBuffer(args.replay_buffer_size) rl_method = SoftActorCritic( env, replay_buffer, feat_ext, play_interval=500, learning_rate=args.lr_rl, buffer_sample_size=args.replay_buffer_sample_size, ) if args.rl_method == "discrete_QSAC": if not isinstance(env.action_space, gym.spaces.Discrete): print("discrete SAC requires a discrete action space to work.") exit() replay_buffer = ReplayBuffer(args.replay_buffer_size) rl_method = QSAC( env, replay_buffer, feat_ext, args.replay_buffer_sample_size, learning_rate=args.lr_rl, entropy_tuning=True, entropy_target=args.entropy_target, play_interval=args.play_interval, tau=args.tau, gamma=args.gamma, ) if args.rl_method == "discrete_SAC": if not isinstance(env.action_space, gym.spaces.Discrete): print("discrete SAC requires a discrete action space to work.") exit() replay_buffer = ReplayBuffer(args.replay_buffer_size) rl_method = DiscreteSAC( env, replay_buffer, feat_ext, args.replay_buffer_sample_size, learning_rate=args.lr_rl, entropy_tuning=True, entropy_target=args.entropy_target, play_interval=args.play_interval, tau=args.tau, gamma=args.gamma, ) print("RL method initialized.") print(rl_method.policy) if args.policy_path is not None: rl_method.policy.load(args.policy_path) experiment_logger.log_header("Details of the RL method :") experiment_logger.log_info(rl_method.__dict__) expert_trajectories = read_expert_trajectories(args.exp_trajectory_path) irl_method = PerTrajGCL( rl=rl_method, env=env, expert_trajectories=expert_trajectories, learning_rate=args.lr_irl, l2_regularization=args.regularizer, save_folder=to_save, saving_interval=args.saving_interval, ) print("IRL method intialized.") print(irl_method.reward_net) experiment_logger.log_header("Details of the IRL method :") experiment_logger.log_info(irl_method.__dict__) irl_method.pre_train( args.pre_train_iterations, args.num_expert_samples, account_for_terminal_state=args.account_for_terminal_state, gamma=args.gamma, ) rl_method.train( args.pre_train_rl_iterations, args.rl_ep_length, reward_network=irl_method.reward_net, ) # save intermediate RL result rl_method.policy.save(to_save + "/policy") irl_method.train( args.irl_iterations, args.rl_episodes, args.rl_ep_length, args.rl_ep_length, reset_training=args.reset_training, account_for_terminal_state=args.account_for_terminal_state, gamma=args.gamma, stochastic_sampling=args.stochastic_sampling, num_expert_samples=args.num_expert_samples, num_policy_samples=args.num_policy_samples, ) metric_applicator = metric_utils.LTHMP2020() metric_results = metric_utils.collect_trajectories_and_metrics( env, feat_ext, rl_method.policy, len(expert_trajectories), args.rl_ep_length, metric_applicator, disregard_collisions=True, ) pd_metrics = pd.DataFrame(metric_results).T pd_metrics = pd_metrics.applymap(lambda x: x[0]) pd_metrics.to_pickle(to_save + "/metrics.pkl") with open(to_save + "/rl_data.csv", "a") as f: rl_method.data_table.write_csv(f) with open(to_save + "/irl_data.csv", "a") as f: irl_method.data_table.write_csv(f) with open(to_save + "/pre_irl_data.csv", "a") as f: irl_method.pre_data_table.write_csv(f)
def main(): env = gym.make('CartPole-v0') model = ActorCritic(env, gamma=0.99, log_interval=1, max_ep_length=200) model.train()
def main(): #####for the logger ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S") ################### args = parser.parse_args() seed_all(args.seed) if args.on_server: matplotlib.use("Agg") # pygame without monitor os.environ["SDL_VIDEODRIVER"] = "dummy" from matplotlib import pyplot as plt mp.set_start_method("spawn") from rlmethods.b_actor_critic import ActorCritic from rlmethods.soft_ac import SoftActorCritic, QSoftActorCritic from rlmethods.rlutils import ReplayBuffer from envs.gridworld_drone import GridWorldDrone from featureExtractor.drone_feature_extractor import ( DroneFeatureSAM1, DroneFeatureOccup, DroneFeatureRisk, DroneFeatureRisk_v2, VasquezF1, VasquezF2, VasquezF3, Fahad, GoalConditionedFahad, ) from featureExtractor.gridworld_featureExtractor import ( FrontBackSide, LocalGlobal, OneHot, SocialNav, FrontBackSideSimple, ) from featureExtractor.drone_feature_extractor import ( DroneFeatureRisk_speed, DroneFeatureRisk_speedv2, ) from featureExtractor.drone_feature_extractor import VasquezF1 save_folder = None if not args.dont_save and not args.play: if not args.save_folder: print("Provide save folder.") exit() policy_net_dims = "-policy_net-" for dim in args.policy_net_hidden_dims: policy_net_dims += str(dim) policy_net_dims += "-" reward_net_dims = "-reward_net-" for dim in args.reward_net_hidden_dims: reward_net_dims += str(dim) reward_net_dims += "-" save_folder = ( "./results/" + args.save_folder + st + args.feat_extractor + "-seed-" + str(args.seed) + policy_net_dims + reward_net_dims + "-total-ep-" + str(args.total_episodes) + "-max-ep-len-" + str(args.max_ep_length) ) experiment_logger = Logger(save_folder, "experiment_info.txt") experiment_logger.log_header("Arguments for the experiment :") repo = git.Repo(search_parent_directories=True) experiment_logger.log_info({'From branch : ' : repo.active_branch.name}) experiment_logger.log_info({'Commit number : ' : repo.head.object.hexsha}) experiment_logger.log_info(vars(args)) window_size = 9 step_size = 2 agent_width = 10 obs_width = 10 grid_size = 10 feat_ext = None # initialize the feature extractor to be used if args.feat_extractor == "Onehot": feat_ext = OneHot(grid_rows=10, grid_cols=10) if args.feat_extractor == "SocialNav": feat_ext = SocialNav(fieldList=["agent_state", "goal_state"]) if args.feat_extractor == "FrontBackSideSimple": feat_ext = FrontBackSideSimple( thresh1=1, thresh2=2, thresh3=3, thresh4=4, step_size=step_size, agent_width=agent_width, obs_width=obs_width, ) if args.feat_extractor == "LocalGlobal": feat_ext = LocalGlobal( window_size=11, grid_size=grid_size, agent_width=agent_width, obs_width=obs_width, step_size=step_size, ) if args.feat_extractor == "DroneFeatureSAM1": feat_ext = DroneFeatureSAM1( agent_width=agent_width, obs_width=obs_width, step_size=step_size, grid_size=grid_size, thresh1=15, thresh2=30, ) if args.feat_extractor == "DroneFeatureOccup": feat_ext = DroneFeatureOccup( agent_width=agent_width, obs_width=obs_width, step_size=step_size, grid_size=grid_size, window_size=window_size, ) if args.feat_extractor == "DroneFeatureRisk": feat_ext = DroneFeatureRisk( agent_width=agent_width, obs_width=obs_width, step_size=step_size, grid_size=grid_size, show_agent_persp=False, thresh1=15, thresh2=30, ) if args.feat_extractor == "DroneFeatureRisk_v2": feat_ext = DroneFeatureRisk_v2( agent_width=agent_width, obs_width=obs_width, step_size=step_size, grid_size=grid_size, show_agent_persp=False, thresh1=15, thresh2=30, ) if args.feat_extractor == "DroneFeatureRisk_speed": feat_ext = DroneFeatureRisk_speed( agent_width=agent_width, obs_width=obs_width, step_size=step_size, grid_size=grid_size, show_agent_persp=False, return_tensor=False, thresh1=10, thresh2=15, ) if args.feat_extractor == "DroneFeatureRisk_speedv2": feat_ext = DroneFeatureRisk_speedv2( agent_width=agent_width, obs_width=obs_width, step_size=step_size, grid_size=grid_size, show_agent_persp=False, return_tensor=False, thresh1=18, thresh2=30, ) if args.feat_extractor == "VasquezF1": feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0) if args.feat_extractor == "VasquezF2": feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0) if args.feat_extractor == "VasquezF3": feat_ext = VasquezF3(agent_width) if args.feat_extractor == "Fahad": feat_ext = Fahad(36, 60, 0.5, 1.0) if args.feat_extractor == "GoalConditionedFahad": feat_ext = GoalConditionedFahad(36, 60, 0.5, 1.0) if feat_ext is None: print("Please enter proper feature extractor!") exit() # log feature extractor info if not args.dont_save and not args.play: experiment_logger.log_header("Parameters of the feature extractor :") experiment_logger.log_info(feat_ext.__dict__) # initialize the environment if args.replace_subject: replace_subject = True else: replace_subject = False env = GridWorldDrone( display=args.render, is_onehot=False, seed=args.seed, obstacles=None, show_trail=False, is_random=True, annotation_file=args.annotation_file, subject=args.subject, tick_speed=60, obs_width=10, step_size=step_size, agent_width=agent_width, replace_subject=replace_subject, segment_size=args.segment_size, external_control=True, step_reward=0.001, show_comparison=True, consider_heading=True, show_orientation=True, # rows=200, cols=200, width=grid_size) rows=576, cols=720, width=grid_size, ) # env = gym.make('Acrobot-v1') # log environment info if not args.dont_save and not args.play: experiment_logger.log_header("Environment details :") experiment_logger.log_info(env.__dict__) # initialize RL if args.rl_method == "ActorCritic": model = ActorCritic( env, feat_extractor=feat_ext, gamma=1, log_interval=100, max_episode_length=args.max_ep_length, hidden_dims=args.policy_net_hidden_dims, save_folder=save_folder, lr=args.lr, entropy_coeff=args.entropy_coeff, max_episodes=args.total_episodes, ) if args.rl_method == "SAC": replay_buffer = ReplayBuffer(args.replay_buffer_size) model = SoftActorCritic( env, replay_buffer, feat_ext, buffer_sample_size=args.replay_buffer_sample_size, entropy_tuning=True, play_interval=args.play_interval, entropy_target=args.entropy_target, gamma=args.gamma, learning_rate=args.lr, ) if args.rl_method == "discrete_QSAC": replay_buffer = ReplayBuffer(args.replay_buffer_size) model = QSoftActorCritic( env, replay_buffer, feat_ext, buffer_sample_size=args.replay_buffer_sample_size, entropy_tuning=True, play_interval=args.play_interval, entropy_target=args.entropy_target, gamma=args.gamma, learning_rate=args.lr, ) # log RL info if not args.dont_save and not args.play: experiment_logger.log_header("Details of the RL method :") experiment_logger.log_info(model.__dict__) if args.policy_path is not None: from debugtools import numericalSort policy_file_list = [] reward_across_models = [] # print(args.policy_path) if os.path.isfile(args.policy_path): policy_file_list.append(args.policy_path) if os.path.isdir(args.policy_path): policy_names = glob.glob(os.path.join(args.policy_path, "*.pt")) policy_file_list = sorted(policy_names, key=numericalSort) xaxis = np.arange(len(policy_file_list)) if not args.play and not args.play_user: # no playing of any kind, so training if args.reward_path is None: if args.policy_path: model.policy.load(args.policy_path) if args.rl_method == "SAC" or args.rl_method == "discrete_QSAC": model.train(args.total_episodes, args.max_ep_length) else: model.train() else: from irlmethods.deep_maxent import RewardNet state_size = feat_ext.extract_features(env.reset()).shape[0] reward_net = RewardNet(state_size, args.reward_net_hidden_dims) reward_net.load(args.reward_path) print(next(reward_net.parameters()).is_cuda) model.train(reward_net=reward_net) if not args.dont_save: model.policy.save(save_folder + "/policy-models/") if args.play: # env.tickSpeed = 15 from debugtools import compile_results xaxis = [] counter = 1 plt.figure(0) avg_reward_list = [] frac_good_run_list = [] print(policy_file_list) for policy_file in policy_file_list: print("Playing for policy :", policy_file) model.policy.load(policy_file) policy_folder = policy_file.strip().split("/")[0:-2] save_folder = "" for p in policy_folder: save_folder = save_folder + p + "/" print("The final save folder ", save_folder) # env.tickSpeed = 10 assert args.policy_path is not None, "pass a policy to play from!" if args.exp_trajectory_path is not None: from irlmethods.irlUtils import calculate_expert_svf expert_svf = calculate_expert_svf( args.exp_trajectory_path, max_time_steps=args.max_ep_length, feature_extractor=feat_ext, gamma=1, ) # reward_across_models.append(model.generate_trajectory(args.num_trajs, args.render)) if args.exp_trajectory_path is None: if args.dont_save: rewards, state_info, sub_info = model.generate_trajectory( args.num_trajs, args.render ) else: rewards, state_info, sub_info = model.generate_trajectory( args.num_trajs, args.render, store_raw=args.store_raw_states, path=save_folder + "/agent_generated_trajectories/", ) else: if args.dont_save: rewards, state_info, sub_info = model.generate_trajectory( args.num_trajs, args.render, expert_svf=expert_svf ) else: rewards, state_info, sub_info = model.generate_trajectory( args.num_trajs, args.render, path=save_folder + "/agent_generated_trajectories/", expert_svf=expert_svf, ) avg_reward, good_run_frac = compile_results( rewards, state_info, sub_info ) avg_reward_list.append(avg_reward) frac_good_run_list.append(good_run_frac) plt.plot(avg_reward_list, c="r") plt.plot(frac_good_run_list, c="g") plt.draw() plt.show() if args.play_user: env.tickSpeed = 200 model.generate_trajectory_user( args.num_trajs, args.render, path="./user_generated_trajectories/" )