Exemple #1
0
def main():
    fe = DummyFeatureExtractor()
    env = EwapGridworld(
        ped_id=1,
        vision_radius=4,
    )
    rl = ActorCritic(env, feat_extractor=fe, max_episodes=10**4)

    rl.train()
Exemple #2
0
def main():
    args = parser.parse_args()

    env = GridWorld(display=args.render,
                    obstacles=[np.asarray([1, 2])],
                    step_wrapper=utils.step_wrapper,
                    reset_wrapper=utils.reset_wrapper,
                    stepReward=.01)

    model = ActorCritic(env,
                        gamma=0.99,
                        log_interval=100,
                        max_episodes=10**4,
                        max_ep_length=30)

    if args.policy_path is not None:
        model.policy.load(args.policy_path)

    if not args.play:
        model.train()

        if not args.dont_save:
            model.policy.save('./saved-models/')

    if args.play:
        env.tickSpeed = 15
        assert args.policy_path is not None, 'pass a policy to play from!'

        model.generate_trajectory(args.num_trajs, './trajs/ac_gridworld/')
Exemple #3
0
def main():
    args = parser.parse_args()
    mp.set_start_method('spawn')

    env = GridWorld(display=False,
                    obstacles=[np.asarray([1, 2])],
                    reset_wrapper=reset_wrapper,
                    step_wrapper=step_wrapper)

    model = ActorCritic(env,
                        gamma=0.99,
                        log_interval=100,
                        max_episodes=10**4,
                        max_ep_length=30)

    if args.policy_path is not None:
        model.policy.load(args.policy_path)

    if not args.play:
        model.train_mp(n_jobs=4)

        if not args.dont_save:
            model.policy.save('./saved-models/')

    if args.play:
        assert args.policy_path is not None, 'pass a policy to play from!'

        model.generate_trajectory(args.num_trajs, './trajs/mp_gridworld/')
Exemple #4
0
def main():
    args = parser.parse_args()

    if args.render:
        from envs.gridworld import GridWorld
    else:
        from envs.gridworld_clockless import GridWorldClockless as GridWorld

    env = GridWorld(display=args.render,
                    obstacles=[np.asarray([1, 2])],
                    goal_state=np.asarray([5, 5]),
                    step_wrapper=step_wrapper,
                    reset_wrapper=reset_wrapper,
                    seed=3)
    loss_t = LBT(list_size=100, stop_threshold=1.5, log_interval=100)
    model = ActorCritic(env,
                        gamma=0.99,
                        log_interval=200,
                        max_episodes=5000,
                        max_ep_length=20,
                        termination=loss_t)

    if args.policy_path is not None:
        model.policy.load(args.policy_path)

    if args.reward_net is not None:
        reward_net = RewardNet(env.reset().shape[0])
        reward_net.to('cuda')
        reward_net.load('./saved-models-rewards/0.pt')
        reward_net.eval()
    else:
        reward_net = None

    if not args.play:
        model.train_mp(n_jobs=4, reward_net=reward_net, irl=args.irl)

        if not args.dont_save:
            model.policy.save('./saved-models/')

    if args.play:
        env.tickSpeed = 15
        assert args.policy_path is not None, 'pass a policy to play from!'

        model.generate_trajectory(args.num_trajs, './trajs/ac_gridworld/')
Exemple #5
0
def main():
    args = parser.parse_args()

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    ###
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
    ###
    parent_dir = './results/' + str(args.save_folder) + st
    to_save = './results/'+str(args.save_folder)+st+'-reg-'+str(args.regularizer)+ \
              '-seed-'+str(args.seed)+'-lr-'+str(args.lr)
    #'-seed-'+str(args.seed)
    log_file = 'Experiment_info.txt'
    experiment_logger = Logger(to_save, log_file)

    experiment_logger.log_header('Arguments for the experiment :')
    experiment_logger.log_info(vars(args))

    from rlmethods.rlutils import LossBasedTermination
    from rlmethods.b_actor_critic import ActorCritic
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils

    #check for the feature extractor being used
    #initialize feature extractor
    if args.feat_extractor == 'MCFeatures':
        feat_ext = MCFeatures(args.state_discretization[0],
                              args.state_discretization[1])

    if args.feat_extractor == 'MCFeaturesOnehot':
        feat_ext = MCFeaturesOnehot(args.state_discretization[0],
                                    args.state_discretization[1])

    experiment_logger.log_header('Parameters of the feature extractor :')
    experiment_logger.log_info(feat_ext.__dict__)

    #initialize the environment
    if not args.dont_save and args.save_folder is None:
        print('Specify folder to save the results.')
        exit()
    #**set is_onehot to false
    goal_state = np.asarray([1, 5])
    '''
    env = GridWorld(display=args.render, is_onehot= False,is_random=False,
                    rows =10,
                    cols =10,
                    seed = 7,
                    obstacles = [np.asarray([5,5])],
                                
                    goal_state = np.asarray([1,5]))

    '''
    env = gym.make('MountainCar-v0')
    env = env.unwrapped
    #pdb.set_trace()
    experiment_logger.log_header('Environment details :')
    experiment_logger.log_info(env.__dict__)

    #CHANGE HEREq

    #CHANGE HERE
    #initialize loss based termination
    # intialize RL method
    #CHANGE HERE
    rlMethod = ActorCritic(env,
                           gamma=1,
                           log_interval=args.rl_log_intervals,
                           max_episodes=args.rl_episodes,
                           max_ep_length=args.rl_ep_length,
                           termination=None,
                           plot_loss=False,
                           save_folder=to_save,
                           hidden_dims=args.policy_net_hidden_dims,
                           feat_extractor=feat_ext)
    print("RL method initialized.")
    print(rlMethod.policy)
    if args.policy_path is not None:
        rlMethod.policy.load(args.policy_path)

    experiment_logger.log_header('Details of the RL method :')
    experiment_logger.log_info(rlMethod.__dict__)

    # initialize IRL method
    #CHANGE HERE
    trajectory_path = args.exp_trajectory_path

    irlMethod = DeepMaxEnt(trajectory_path,
                           rlmethod=rlMethod,
                           env=env,
                           iterations=args.irl_iterations,
                           log_intervals=5,
                           on_server=args.on_server,
                           regularizer=args.regularizer,
                           learning_rate=args.lr,
                           graft=False,
                           seed=args.seed,
                           scale_svf=args.scale_svf,
                           hidden_dims=args.reward_net_hidden_dims,
                           clipping_value=args.clipping_value,
                           save_folder=parent_dir)
    print("IRL method intialized.")
    experiment_logger.log_header('Details of the IRL method :')
    experiment_logger.log_info(irlMethod.__dict__)

    print(irlMethod.reward)
    rewardNetwork = irlMethod.train()

    if not args.dont_save:
        pass
Exemple #6
0
def main():
    env = gym.make('CartPole-v0')

    model = ActorCritic(env, gamma=0.99, log_interval=1, max_ep_length=200)
    model.train()
def main():

    args = parser.parse_args()

    experiment_logger = Logger('temp_save.txt')

    experiment_logger.log_header('Arguments for the experiment :')
    experiment_logger.log_info(vars(args))

    mp.set_start_method('spawn')

    if args.render:
        from envs.gridworld import GridWorld
    else:
        from envs.gridworld_clockless import GridWorldClockless as GridWorld

    agent_width = 10
    step_size = 10
    obs_width = 10
    grid_size = 10

    if args.feat_extractor == 'Onehot':
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == 'SocialNav':
        feat_ext = SocialNav(fieldList=['agent_state', 'goal_state'])
    if args.feat_extractor == 'FrontBackSideSimple':
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == 'LocalGlobal':
        feat_ext = LocalGlobal(
            window_size=3,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    experiment_logger.log_header('Parameters of the feature extractor :')
    experiment_logger.log_info(feat_ext.__dict__)
    '''
    np.asarray([2,2]),np.asarray([7,4]),np.asarray([3,5]),
                                np.asarray([5,2]),np.asarray([8,3]),np.asarray([7,5]),
                                np.asarray([3,3]),np.asarray([3,7]),np.asarray([5,7])
                                '''
    env = GridWorld(display=args.render,
                    is_onehot=False,
                    is_random=True,
                    rows=100,
                    agent_width=agent_width,
                    step_size=step_size,
                    obs_width=obs_width,
                    width=grid_size,
                    cols=100,
                    seed=7,
                    buffer_from_obs=0,
                    obstacles=3,
                    goal_state=np.asarray([5, 5]))

    experiment_logger.log_header('Environment details :')
    experiment_logger.log_info(env.__dict__)

    model = ActorCritic(env,
                        feat_extractor=feat_ext,
                        gamma=0.99,
                        log_interval=100,
                        max_ep_length=40,
                        hidden_dims=args.policy_net_hidden_dims,
                        max_episodes=4000)

    experiment_logger.log_header('Details of the RL method :')
    experiment_logger.log_info(model.__dict__)

    pdb.set_trace()

    if args.policy_path is not None:
        model.policy.load(args.policy_path)

    if not args.play and not args.play_user:
        if args.reward_path is None:
            model.train_mp(n_jobs=4)
        else:
            from irlmethods.deep_maxent import RewardNet
            state_size = featExtract.extract_features(env.reset()).shape[0]
            reward_net = RewardNet(state_size)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train_mp(reward_net=reward_net, n_jobs=4)

        if not args.dont_save:
            model.policy.save('./saved-models/')

    if args.play:
        env.tickSpeed = 15
        assert args.policy_path is not None, 'pass a policy to play from!'

        model.generate_trajectory(args.num_trajs,
                                  './trajs/ac_fbs_simple4_static_map7/')

    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(args.num_trajs,
                                       './trajs/ac_gridworld_user/')
Exemple #8
0
def main():
    args = parser.parse_args()

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    #####for the logger
    base_folder = './results/' + str(args.save_folder) + '-reg-' + str(
        args.regularizer) + '-seed-' + str(args.seed) + '-lr-' + str(args.lr)
    log_file = 'Experiment_info.txt'
    experiment_logger = Logger(base_folder, log_file)

    experiment_logger.log_header('Arguments for the experiment :')
    experiment_logger.log_info(vars(args))

    from rlmethods.rlutils import LossBasedTermination
    from rlmethods.b_actor_critic import ActorCritic
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils
    from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal, SocialNav, FrontBackSideSimple

    agent_width = 10
    step_size = 10
    obs_width = 10
    grid_size = 10

    if args.feat_extractor is None:

        print('Feature extractor missing.')
        exit()

    #check for the feature extractor being used
    #initialize feature extractor
    if args.feat_extractor == 'Onehot':
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == 'SocialNav':
        feat_ext = SocialNav()
    if args.feat_extractor == 'FrontBackSideSimple':
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == 'LocalGlobal':
        feat_ext = LocalGlobal(
            window_size=5,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    experiment_logger.log_header('Parameters of the feature extractor :')
    experiment_logger.log_info(feat_ext.__dict__)

    #initialize the environment
    if not args.dont_save and args.save_folder is None:
        print('Specify folder to save the results.')
        exit()

    if args.annotation_file is None:
        print('Specify annotation file for the environment.')
        exit()

    if args.exp_trajectory_path is None:
        print('Specify expert trajectory folder.')
        exit()

    #**set is_onehot to false
    goal_state = np.asarray([1, 5])
    '''
    env = GridWorld(display=args.render, is_onehot= False,is_random=False,
                    rows =10,
                    cols =10,
                    seed = 7,
                    obstacles = [np.asarray([5,5])],
                                
                    goal_state = np.asarray([1,5]))

    '''

    env = GridWorld(display=args.render,
                    is_random=True,
                    rows=576,
                    cols=720,
                    agent_width=agent_width,
                    step_size=step_size,
                    obs_width=obs_width,
                    width=grid_size,
                    annotation_file=args.annotation_file,
                    goal_state=goal_state,
                    step_wrapper=utils.step_wrapper,
                    seed=args.seed,
                    reset_wrapper=utils.reset_wrapper,
                    is_onehot=False)

    experiment_logger.log_header('Environment details :')
    experiment_logger.log_info(env.__dict__)

    #CHANGE HEREq

    #CHANGE HERE
    #initialize loss based termination
    # intialize RL method
    #CHANGE HERE
    rlMethod = ActorCritic(env,
                           gamma=0.99,
                           log_interval=args.rl_log_intervals,
                           max_episodes=args.rl_episodes,
                           max_ep_length=args.rl_ep_length,
                           termination=None,
                           hidden_dims=args.reward_net_hidden_dims,
                           feat_extractor=feat_ext)
    print("RL method initialized.")
    print(rlMethod.policy)
    if args.policy_path is not None:
        rlMethod.policy.load(args.policy_path)

    experiment_logger.log_header('Details of the RL method :')
    experiment_logger.log_info(rlMethod.__dict__)

    # initialize IRL method
    #CHANGE HERE
    trajectory_path = args.exp_trajectory_path

    folder_to_save = '/results/' + args.save_folder
    irlMethod = DeepMaxEnt(trajectory_path,
                           rlmethod=rlMethod,
                           env=env,
                           iterations=args.irl_iterations,
                           log_intervals=5,
                           on_server=args.on_server,
                           regularizer=args.regularizer,
                           learning_rate=args.lr,
                           graft=True,
                           hidden_dims=args.reward_net_hidden_dims,
                           save_folder=folder_to_save)
    print("IRL method intialized.")
    print(irlMethod.reward)

    experiment_logger.log_header('Details of the IRL method :')
    experiment_logger.log_info(irlMethod.__dict__)
    rewardNetwork = irlMethod.train()

    if not args.dont_save:
        pass
Exemple #9
0
def main():
    args = parser.parse_args()

    utils.seed_all(args.seed)

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use("Agg")

        # pygame without monitor
        os.environ["SDL_VIDEODRIVER"] = "dummy"

    #####for the logger
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d_%H:%M:%S")
    ###################

    if not args.save_folder:
        print("Provide save folder.")
        exit()

    policy_net_dims = "-policy_net-"
    for dim in args.policy_net_hidden_dims:
        policy_net_dims += str(dim)
        policy_net_dims += "-"

    reward_net_dims = "-reward_net-"
    for dim in args.reward_net_hidden_dims:
        reward_net_dims += str(dim)
        reward_net_dims += "-"

    parent_dir = ("./results/" + str(args.save_folder) + st + policy_net_dims +
                  reward_net_dims)
    to_save = ("./results/" + str(args.save_folder) + st + policy_net_dims +
               reward_net_dims + "-reg-" + str(args.regularizer) + "-seed-" +
               str(args.seed) + "-lr-" + str(args.lr_irl))

    log_file = "Experiment_info.txt"

    experiment_logger = Logger(to_save, log_file)
    experiment_logger.log_header("Arguments for the experiment :")
    repo = git.Repo(search_parent_directories=True)
    experiment_logger.log_info({'From branch : ': repo.active_branch.name})
    experiment_logger.log_info({'Commit number : ': repo.head.object.hexsha})
    experiment_logger.log_info(vars(args))

    # from rlmethods.rlutils import LossBasedTermination
    # for rl
    from rlmethods.b_actor_critic import ActorCritic
    from rlmethods.soft_ac_pi import SoftActorCritic
    from rlmethods.soft_ac import SoftActorCritic as QSAC
    from rlmethods.rlutils import ReplayBuffer

    # for irl
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils
    from featureExtractor.gridworld_featureExtractor import (
        OneHot,
        LocalGlobal,
        SocialNav,
        FrontBackSideSimple,
    )

    agent_width = 10
    step_size = 2
    obs_width = 10
    grid_size = 10

    if args.feat_extractor is None:

        print("Feature extractor missing.")
        exit()

    # check for the feature extractor being used
    # initialize feature extractor
    if args.feat_extractor == "Onehot":
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == "SocialNav":
        feat_ext = SocialNav()
    if args.feat_extractor == "FrontBackSideSimple":
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == "LocalGlobal":
        feat_ext = LocalGlobal(
            window_size=5,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    if args.feat_extractor == "DroneFeatureSAM1":

        feat_ext = DroneFeatureSAM1(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=5,
            thresh2=10,
        )

    if args.feat_extractor == "DroneFeatureRisk":

        feat_ext = DroneFeatureRisk(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_v2":

        feat_ext = DroneFeatureRisk_v2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_speed":

        feat_ext = DroneFeatureRisk_speed(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=10,
            thresh2=15,
        )

    if args.feat_extractor == "DroneFeatureRisk_speedv2":

        feat_ext = DroneFeatureRisk_speedv2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=18,
            thresh2=30,
        )

    if args.feat_extractor == 'VasquezF1':
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == 'VasquezF2':
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == 'VasquezF3':
        feat_ext = VasquezF3(agent_width)

    if args.feat_extractor == "Fahad":
        feat_ext = Fahad(36, 60, 0.5, 1.0)

    if args.feat_extractor == "GoalConditionedFahad":
        feat_ext = GoalConditionedFahad(36, 60, 0.5, 1.0)

    experiment_logger.log_header("Parameters of the feature extractor :")
    experiment_logger.log_info(feat_ext.__dict__)

    # initialize the environment
    if not args.dont_save and args.save_folder is None:
        print("Specify folder to save the results.")
        exit()
    """
    environment can now initialize without an annotation file
    if args.annotation_file is None:
        print('Specify annotation file for the environment.')
        exit()
    """
    if args.exp_trajectory_path is None:
        print("Specify expert trajectory folder.")
        exit()
    """
    env = GridWorld(display=args.render, is_onehot= False,is_random=False,
                    rows =10,
                    cols =10,
                    seed = 7,
                    obstacles = [np.asarray([5,5])],
                                
                    goal_state = np.asarray([1,5]))

    """

    env = GridWorld(
        display=args.render,
        is_random=True,
        rows=576,
        cols=720,
        agent_width=agent_width,
        step_size=step_size,
        obs_width=obs_width,
        width=grid_size,
        subject=args.subject,
        annotation_file=args.annotation_file,
        goal_state=None,
        step_wrapper=utils.step_wrapper,
        seed=args.seed,
        replace_subject=args.replace_subject,
        segment_size=args.segment_size,
        external_control=True,
        continuous_action=False,
        reset_wrapper=utils.reset_wrapper,
        consider_heading=True,
        is_onehot=False,
    )

    experiment_logger.log_header("Environment details :")
    experiment_logger.log_info(env.__dict__)

    # CHANGE HEREq

    # CHANGE HERE
    # initialize loss based termination
    # intialize RL method
    # CHANGE HERE

    if args.rl_method == "ActorCritic":
        rl_method = ActorCritic(
            env,
            feat_extractor=feat_ext,
            gamma=1,
            log_interval=args.rl_log_intervals,
            max_episode_length=args.rl_ep_length,
            hidden_dims=args.policy_net_hidden_dims,
            save_folder=to_save,
            lr=args.lr_rl,
            max_episodes=args.rl_episodes,
        )

    if args.rl_method == "SAC":
        if not env.continuous_action:
            print("The action space needs to be continuous for SAC to work.")
            exit()
        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = SoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            play_interval=500,
            learning_rate=args.lr_rl,
            buffer_sample_size=args.replay_buffer_sample_size,
        )

    if args.rl_method == "discrete_SAC":
        if not isinstance(env.action_space, gym.spaces.Discrete):
            print(
                "discrete SAC requires a discrete action space environmnet to work."
            )
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = QSAC(
            env,
            replay_buffer,
            feat_ext,
            args.replay_buffer_sample_size,
            learning_rate=args.lr_rl,
            entropy_tuning=True,
            entropy_target=0.3,
            play_interval=args.play_interval,
        )

    print("RL method initialized.")
    print(rl_method.policy)
    if args.policy_path is not None:
        rl_method.policy.load(args.policy_path)

    experiment_logger.log_header("Details of the RL method :")
    experiment_logger.log_info(rl_method.__dict__)

    # initialize IRL method
    # CHANGE HERE
    trajectory_path = args.exp_trajectory_path

    if args.scale_svf is None:
        scale = False

    if args.scale_svf:
        scale = args.scale_svf
    irl_method = DeepMaxEnt(
        trajectory_path,
        rlmethod=rl_method,
        env=env,
        iterations=args.irl_iterations,
        on_server=args.on_server,
        l1regularizer=args.regularizer,
        learning_rate=args.lr_irl,
        seed=args.seed,
        graft=False,
        scale_svf=scale,
        hidden_dims=args.reward_net_hidden_dims,
        clipping_value=args.clipping_value,
        enumerate_all=True,
        save_folder=parent_dir,
        rl_max_ep_len=args.rl_ep_length,
        rl_episodes=args.rl_episodes,
    )

    print("IRL method intialized.")
    print(irl_method.reward)

    experiment_logger.log_header("Details of the IRL method :")
    experiment_logger.log_info(irl_method.__dict__)

    smoothing_flag = False
    if args.svf_smoothing:
        smoothing_flag = True

    irl_method.train(smoothing=smoothing_flag)

    if not args.dont_save:
        pass
Exemple #10
0
def plot_reward_across_policy_models(foldername,
                                     expert=None,
                                     feature_extractor=None,
                                     seed_list=[],
                                     iterations_per_model=50,
                                     compare_expert=True):

    #given a folder of policy networks, the function will go through them one by one and
    #create a plot of the rewards obtained by each of the policy networks and compare them
    #to that of an expert (if provided)
    color_list = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
    counter = 0

    reward_across_seeds = []
    xaxis = None
    for seed in seed_list:

        env = GridWorld(display=False,
                        is_onehot=False,
                        is_random=True,
                        rows=10,
                        cols=10,
                        seed=seed,
                        obstacles=[
                            np.asarray([5, 1]),
                            np.array([5, 9]),
                            np.asarray([4, 1]),
                            np.array([6, 9]),
                            np.asarray([3, 1]),
                            np.array([7, 9])
                        ],
                        goal_state=np.asarray([1, 5]))

        max_ep_length = 20

        rl_method = ActorCritic(env,
                                feat_extractor=feature_extractor,
                                gamma=0.99,
                                max_ep_length=max_ep_length,
                                log_interval=50)

        model_names = glob.glob(os.path.join(foldername, '*.pt'))

        xaxis = np.arange(len(model_names))

        reward_exp = get_rewards_for_model(expert,
                                           env=env,
                                           feature_extractor=feature_extractor,
                                           rl_method=rl_method,
                                           max_ep_length=max_ep_length,
                                           iterations=iterations_per_model)

        reward_across_models = []
        reward_expert = []
        for policy_file in sorted(model_names, key=numericalSort):

            print('asdfasfsa', policy_file)

            reward_per_model = get_rewards_for_model(
                policy_file,
                env=env,
                feature_extractor=feature_extractor,
                rl_method=rl_method,
                max_ep_length=max_ep_length,
                iterations=iterations_per_model)

            print('Average reward for the model:', reward_per_model)
            reward_across_models.append(reward_per_model)
            reward_expert.append(reward_exp)

        reward_across_seeds.append(reward_across_models)

    np_reward_across_seeds = np.array(reward_across_seeds)

    print(np_reward_across_seeds.shape)
    means_rewards = np.mean(np_reward_across_seeds, axis=0)

    print("the mean rewards :", means_rewards)

    print("The mean across all runs and seeds : ", np.mean(means_rewards))

    std_rewards = np.std(np_reward_across_seeds, axis=0)

    print('the std :', std_rewards)
    plt.xlabel('IRL iteration no.')
    plt.ylabel('Reward obtained')
    plt.plot(xaxis,
             means_rewards,
             color=color_list[counter],
             label='IRL trained agent')
    plt.fill_between(xaxis,
                     means_rewards - std_rewards,
                     means_rewards + std_rewards,
                     alpha=0.5,
                     facecolor=color_list[counter])
    plt.plot(reward_expert, color='k', label='Expert agent')
    plt.legend()
    plt.draw()
    plt.pause(0.001)
    plt.show()
    return reward_across_models
Exemple #11
0
def main():

    #####for the logger
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
    ###################

    args = parser.parse_args()

    seed_all(args.seed)

    if args.on_server:

        matplotlib.use("Agg")
        # pygame without monitor
        os.environ["SDL_VIDEODRIVER"] = "dummy"

    from matplotlib import pyplot as plt

    mp.set_start_method("spawn")

    from rlmethods.b_actor_critic import ActorCritic
    from rlmethods.soft_ac import SoftActorCritic, QSoftActorCritic
    from rlmethods.rlutils import ReplayBuffer

    from envs.gridworld_drone import GridWorldDrone
    from featureExtractor.drone_feature_extractor import (
        DroneFeatureSAM1,
        DroneFeatureOccup,
        DroneFeatureRisk,
        DroneFeatureRisk_v2,
        VasquezF1,
        VasquezF2,
        VasquezF3,
        Fahad,
        GoalConditionedFahad,
    )
    from featureExtractor.gridworld_featureExtractor import (
        FrontBackSide,
        LocalGlobal,
        OneHot,
        SocialNav,
        FrontBackSideSimple,
    )
    from featureExtractor.drone_feature_extractor import (
        DroneFeatureRisk_speed,
        DroneFeatureRisk_speedv2,
    )

    from featureExtractor.drone_feature_extractor import VasquezF1

    save_folder = None

    if not args.dont_save and not args.play:

        if not args.save_folder:
            print("Provide save folder.")
            exit()

        policy_net_dims = "-policy_net-"
        for dim in args.policy_net_hidden_dims:
            policy_net_dims += str(dim)
            policy_net_dims += "-"

        reward_net_dims = "-reward_net-"
        for dim in args.reward_net_hidden_dims:
            reward_net_dims += str(dim)
            reward_net_dims += "-"

        save_folder = (
            "./results/"
            + args.save_folder
            + st
            + args.feat_extractor
            + "-seed-"
            + str(args.seed)
            + policy_net_dims
            + reward_net_dims
            + "-total-ep-"
            + str(args.total_episodes)
            + "-max-ep-len-"
            + str(args.max_ep_length)
        )

        experiment_logger = Logger(save_folder, "experiment_info.txt")
        experiment_logger.log_header("Arguments for the experiment :")
        repo = git.Repo(search_parent_directories=True)
        experiment_logger.log_info({'From branch : ' : repo.active_branch.name})
        experiment_logger.log_info({'Commit number : ' : repo.head.object.hexsha})
        experiment_logger.log_info(vars(args))

    window_size = 9
    step_size = 2
    agent_width = 10
    obs_width = 10
    grid_size = 10

    feat_ext = None
    # initialize the feature extractor to be used
    if args.feat_extractor == "Onehot":
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == "SocialNav":
        feat_ext = SocialNav(fieldList=["agent_state", "goal_state"])
    if args.feat_extractor == "FrontBackSideSimple":
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == "LocalGlobal":
        feat_ext = LocalGlobal(
            window_size=11,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    if args.feat_extractor == "DroneFeatureSAM1":

        feat_ext = DroneFeatureSAM1(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureOccup":

        feat_ext = DroneFeatureOccup(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            window_size=window_size,
        )

    if args.feat_extractor == "DroneFeatureRisk":

        feat_ext = DroneFeatureRisk(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_v2":

        feat_ext = DroneFeatureRisk_v2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_speed":

        feat_ext = DroneFeatureRisk_speed(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            return_tensor=False,
            thresh1=10,
            thresh2=15,
        )

    if args.feat_extractor == "DroneFeatureRisk_speedv2":

        feat_ext = DroneFeatureRisk_speedv2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            return_tensor=False,
            thresh1=18,
            thresh2=30,
        )

    if args.feat_extractor == "VasquezF1":
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == "VasquezF2":
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == "VasquezF3":
        feat_ext = VasquezF3(agent_width)

    if args.feat_extractor == "Fahad":
        feat_ext = Fahad(36, 60, 0.5, 1.0)

    if args.feat_extractor == "GoalConditionedFahad":
        feat_ext = GoalConditionedFahad(36, 60, 0.5, 1.0)

    if feat_ext is None:
        print("Please enter proper feature extractor!")
        exit()
    # log feature extractor info

    if not args.dont_save and not args.play:

        experiment_logger.log_header("Parameters of the feature extractor :")
        experiment_logger.log_info(feat_ext.__dict__)

    # initialize the environment
    if args.replace_subject:
        replace_subject = True
    else:
        replace_subject = False

    env = GridWorldDrone(
        display=args.render,
        is_onehot=False,
        seed=args.seed,
        obstacles=None,
        show_trail=False,
        is_random=True,
        annotation_file=args.annotation_file,
        subject=args.subject,
        tick_speed=60,
        obs_width=10,
        step_size=step_size,
        agent_width=agent_width,
        replace_subject=replace_subject,
        segment_size=args.segment_size,
        external_control=True,
        step_reward=0.001,
        show_comparison=True,
        consider_heading=True,
        show_orientation=True,
        # rows=200, cols=200, width=grid_size)
        rows=576,
        cols=720,
        width=grid_size,
    )

    # env = gym.make('Acrobot-v1')
    # log environment info
    if not args.dont_save and not args.play:

        experiment_logger.log_header("Environment details :")
        experiment_logger.log_info(env.__dict__)

    # initialize RL

    if args.rl_method == "ActorCritic":
        model = ActorCritic(
            env,
            feat_extractor=feat_ext,
            gamma=1,
            log_interval=100,
            max_episode_length=args.max_ep_length,
            hidden_dims=args.policy_net_hidden_dims,
            save_folder=save_folder,
            lr=args.lr,
            entropy_coeff=args.entropy_coeff,
            max_episodes=args.total_episodes,
        )

    if args.rl_method == "SAC":

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        model = SoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            buffer_sample_size=args.replay_buffer_sample_size,
            entropy_tuning=True,
            play_interval=args.play_interval,
            entropy_target=args.entropy_target,
            gamma=args.gamma,
            learning_rate=args.lr,
        )

    if args.rl_method == "discrete_QSAC":

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        model = QSoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            buffer_sample_size=args.replay_buffer_sample_size,
            entropy_tuning=True,
            play_interval=args.play_interval,
            entropy_target=args.entropy_target,
            gamma=args.gamma,
            learning_rate=args.lr,
        )
    # log RL info
    if not args.dont_save and not args.play:

        experiment_logger.log_header("Details of the RL method :")
        experiment_logger.log_info(model.__dict__)

    if args.policy_path is not None:

        from debugtools import numericalSort

        policy_file_list = []
        reward_across_models = []
        # print(args.policy_path)
        if os.path.isfile(args.policy_path):
            policy_file_list.append(args.policy_path)
        if os.path.isdir(args.policy_path):
            policy_names = glob.glob(os.path.join(args.policy_path, "*.pt"))
            policy_file_list = sorted(policy_names, key=numericalSort)

        xaxis = np.arange(len(policy_file_list))

    if not args.play and not args.play_user:
        # no playing of any kind, so training

        if args.reward_path is None:

            if args.policy_path:
                model.policy.load(args.policy_path)

            if args.rl_method == "SAC" or args.rl_method == "discrete_QSAC":
                model.train(args.total_episodes, args.max_ep_length)

            else:
                model.train()

        else:
            from irlmethods.deep_maxent import RewardNet

            state_size = feat_ext.extract_features(env.reset()).shape[0]
            reward_net = RewardNet(state_size, args.reward_net_hidden_dims)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train(reward_net=reward_net)

        if not args.dont_save:
            model.policy.save(save_folder + "/policy-models/")

    if args.play:
        # env.tickSpeed = 15
        from debugtools import compile_results

        xaxis = []
        counter = 1
        plt.figure(0)
        avg_reward_list = []
        frac_good_run_list = []
        print(policy_file_list)
        for policy_file in policy_file_list:

            print("Playing for policy :", policy_file)
            model.policy.load(policy_file)
            policy_folder = policy_file.strip().split("/")[0:-2]
            save_folder = ""
            for p in policy_folder:
                save_folder = save_folder + p + "/"

            print("The final save folder ", save_folder)
            # env.tickSpeed = 10
            assert args.policy_path is not None, "pass a policy to play from!"
            if args.exp_trajectory_path is not None:
                from irlmethods.irlUtils import calculate_expert_svf

                expert_svf = calculate_expert_svf(
                    args.exp_trajectory_path,
                    max_time_steps=args.max_ep_length,
                    feature_extractor=feat_ext,
                    gamma=1,
                )
            # reward_across_models.append(model.generate_trajectory(args.num_trajs, args.render))
            if args.exp_trajectory_path is None:

                if args.dont_save:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs, args.render
                    )
                else:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs,
                        args.render,
                        store_raw=args.store_raw_states,
                        path=save_folder + "/agent_generated_trajectories/",
                    )
            else:

                if args.dont_save:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs, args.render, expert_svf=expert_svf
                    )
                else:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs,
                        args.render,
                        path=save_folder + "/agent_generated_trajectories/",
                        expert_svf=expert_svf,
                    )

            avg_reward, good_run_frac = compile_results(
                rewards, state_info, sub_info
            )

            avg_reward_list.append(avg_reward)
            frac_good_run_list.append(good_run_frac)
            plt.plot(avg_reward_list, c="r")
            plt.plot(frac_good_run_list, c="g")
            plt.draw()
        plt.show()

    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(
            args.num_trajs, args.render, path="./user_generated_trajectories/"
        )
def main():
    args = parser.parse_args()

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    from rlmethods.rlutils import LossBasedTermination
    from rlmethods.b_actor_critic import ActorCritic
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils
    from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal
    # initialize the environment

    #**set is_onehot to false
    goal_state = np.asarray([5, 5])

    env = GridWorld(display=args.render,
                    obstacles=[np.asarray([3, 7])],
                    goal_state=goal_state,
                    step_wrapper=utils.step_wrapper,
                    seed=3,
                    reset_wrapper=utils.reset_wrapper,
                    is_onehot=False)

    #initialize feature extractor

    feat_ext = OneHot(grid_rows=10, grid_cols=10)
    #feat_ext = LocalGlobal(window_size=3,
    #                       fieldList = ['agent_state','goal_state','obstacles'])
    #initialize loss based termination

    # intialize RL method
    #pass the appropriate feature extractor
    rlMethod = ActorCritic(env,
                           gamma=0.99,
                           log_interval=args.rl_log_intervals,
                           max_episodes=args.rl_episodes,
                           max_ep_length=args.rl_ep_length,
                           termination=None,
                           feat_extractor=feat_ext)
    print("RL method initialized.")
    if args.policy_path is not None:
        rlMethod.policy.load(args.policy_path)

    # initialize IRL method
    trajectory_path = './trajs/ac_gridworld_locglob_3/'
    irlMethod = DeepMaxEnt(trajectory_path,
                           rlmethod=rlMethod,
                           env=env,
                           iterations=args.irl_iterations,
                           log_intervals=5,
                           on_server=args.on_server,
                           plot_save_folder='./plots/')
    print("IRL method intialized.")
    rewardNetwork = irlMethod.train()

    if not args.dont_save:
        pass
Exemple #13
0
def main():

    args = parser.parse_args()

    utils.seed_all(args.seed)

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'
    from matplotlib import pyplot as plt

    save_folder = None
    if not args.dont_save:
        save_folder = './results/'+ args.save_folder
        experiment_logger = Logger(save_folder,'experiment_info.txt')

        experiment_logger.log_header('Arguments for the experiment :')
        experiment_logger.log_info(vars(args))
    

    mp.set_start_method('spawn')

    if args.render:
        from envs.gridworld import GridWorld
    else:
        from envs.gridworld_clockless import GridWorldClockless as GridWorld
        

    if args.feat_extractor=='MCFeatures':
        feat_ext = MCFeatures(args.state_discretization[0], args.state_discretization[1]) 

    elif args.feat_extractor=='MCFeaturesOnehot':
        feat_ext = MCFeaturesOnehot(args.state_discretization[0], args.state_discretization[1])

    else:
        print('Enter proper feature extractor value.')
        exit()

    if not args.dont_save:
        experiment_logger.log_header('Parameters of the feature extractor :')
        experiment_logger.log_info(feat_ext.__dict__)

    '''
    np.asarray([2,2]),np.asarray([7,4]),np.asarray([3,5]),
                                np.asarray([5,2]),np.asarray([8,3]),np.asarray([7,5]),
                                np.asarray([3,3]),np.asarray([3,7]),np.asarray([5,7])
                                
    env = GridWorld(display=args.render, is_onehot= False,is_random=True,
                    rows=100, agent_width=agent_width,step_size=step_size,
                    obs_width=obs_width,width=grid_size,
                    cols=100,
                    seed=7,
                    buffer_from_obs=0,
                    obstacles=3,
                                
                    goal_state=np.asarray([5,5]))
    '''
    env = gym.make('MountainCar-v0')
    env = env.unwrapped

    if not args.dont_save:

        experiment_logger.log_header('Environment details :')
        experiment_logger.log_info(env.__dict__)


    model = ActorCritic(env, feat_extractor=feat_ext,  gamma=0.99, plot_loss=False,
                        log_interval=10, max_ep_length=300, hidden_dims=args.policy_net_hidden_dims,
                        max_episodes=30, save_folder=save_folder)

    if not args.dont_save:

        experiment_logger.log_header('Details of the RL method :')
        experiment_logger.log_info(model.__dict__)
    
    #pdb.set_trace()

    if args.policy_path is not None:
        policy_file_list =  []
        reward_across_models = []
        if os.path.isfile(args.policy_path):
            policy_file_list.append(args.policy_path)
        if os.path.isdir(args.policy_path):
            policy_names = glob.glob(os.path.join(args.policy_path, '*.pt'))
            policy_file_list = sorted(policy_names, key=numericalSort)
        
        xaxis = np.arange(len(policy_file_list))

    if not args.play and not args.play_user:
        if args.reward_path is None:
            model.train_mp(n_jobs=4)
        else:

            from irlmethods.deep_maxent import RewardNet
            state_size = feat_ext.state_rep_size
            reward_net = RewardNet(state_size, args.policy_net_hidden_dims)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train_mp(reward_net = reward_net,n_jobs = 4)

        if not args.dont_save:  
            model.policy.save(save_folder+'/policy/')

    if args.play:
        xaxis = []
        counter = 1
        print(policy_file_list)
        for policy_file in policy_file_list:

            model.policy.load(policy_file)

            env.tickSpeed = 15
            assert args.policy_path is not None, 'pass a policy to play from!'

            reward_across_models.append(model.generate_trajectory(args.num_trajs, args.render))

        #plotting the 2d list

            xaxis.append(counter)
            counter += 1
            reward_across_models_np = np.array(reward_across_models)
            mean_rewards = np.mean(reward_across_models_np, axis=1)
            std_rewards = np.std(reward_across_models_np, axis=1)
            plt.plot(xaxis,mean_rewards,color = 'r',label='IRL trained agent')
            plt.fill_between(xaxis , mean_rewards-std_rewards , 
                        mean_rewards+std_rewards, alpha = 0.5, facecolor = 'r')
            plt.draw()
            plt.pause(0.001)
            '''
            print('RAM usage :')
            display_memory_usage(process.memory_info().rss)
            print('GPU usage :')
            display_memory_usage(torch.cuda.memory_allocated())
            torch.cuda.empty_cache()
            display_memory_usage(torch.cuda.memory_allocated())
            '''
            #plt.show()
        plt.show()
    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(args.num_trajs, './trajs/ac_gridworld_user/')
Exemple #14
0
def main():

    args = parser.parse_args()
    mp.set_start_method('spawn')

    from envs.gridworld_drone import GridWorldDrone

    agent_width = 10
    step_size = 2
    obs_width = 10
    grid_size = 10

    if args.feat_extractor == 'Onehot':
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == 'SocialNav':
        feat_ext = SocialNav(fieldList=['agent_state', 'goal_state'])
    if args.feat_extractor == 'FrontBackSideSimple':
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
            fieldList=['agent_state', 'goal_state', 'obstacles'])

    if args.feat_extractor == 'LocalGlobal':
        feat_ext = LocalGlobal(
            window_size=3,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            fieldList=['agent_state', 'goal_state', 'obstacles'])

    #featExtract = OneHot(grid_rows=10,grid_cols=10)
    #featExtract = FrontBackSideSimple(thresh1 = 1,fieldList =  ['agent_state','goal_state','obstacles'])

    #featExtract = SocialNav(fieldList = ['agent_state','goal_state'])
    '''
    np.asarray([2,2]),np.asarray([7,4]),np.asarray([3,5]),
                                np.asarray([5,2]),np.asarray([8,3]),np.asarray([7,5]),
                                np.asarray([3,3]),np.asarray([3,7]),np.asarray([5,7])
                               
    env = GridWorld(display=args.render, is_onehot= False,is_random=True,
                    rows=10, agent_width=agent_width,step_size=step_size,
                    obs_width=obs_width,width=grid_size,
                    cols=10,
                    seed = 7,
                    obstacles = '../envs/map3.jpg',
                                
                    goal_state = np.asarray([5,5]))
    '''

    env = GridWorldDrone(display=args.render,
                         is_onehot=False,
                         seed=999,
                         obstacles=None,
                         show_trail=False,
                         is_random=False,
                         annotation_file=args.annotation_file,
                         subject=None,
                         tick_speed=90,
                         obs_width=10,
                         step_size=step_size,
                         agent_width=agent_width,
                         show_comparison=True,
                         rows=576,
                         cols=720,
                         width=grid_size)

    model = ActorCritic(env,
                        feat_extractor=featExtract,
                        gamma=0.99,
                        log_interval=50,
                        max_ep_length=500,
                        max_episodes=2000)

    if args.policy_path is not None:
        model.policy.load(args.policy_path)

    if not args.play and not args.play_user:
        if args.reward_path is None:
            model.train_mp(n_jobs=4)
        else:
            from irlmethods.deep_maxent import RewardNet
            state_size = featExtract.extract_features(env.reset()).shape[0]
            reward_net = RewardNet(state_size)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train_mp(reward_net=reward_net, n_jobs=4)

        if not args.dont_save:
            model.policy.save('./saved-models/')

    if args.play:
        #env.tickSpeed = 15
        assert args.policy_path is not None, 'pass a policy to play from!'

        model.generate_trajectory(
            args.num_trajs, './trajs/ac_loc_glob_rectified_win_3_static_map3/')

    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(args.num_trajs,
                                       './trajs/ac_gridworld_user/')
Exemple #15
0
def generate_agent_grid_visitation_map(policy_fname_list,
                                       feature_extractor=None,
                                       store=False):

    #given the policy file name list and feature extractor creates a heatmap of the
    #agent on the gridworld based on the trajectories in the list
    #if store=True, the figure is stored in the form of a pickle

    #list containing the points of trajectories of all the policies
    trajectory_point_master_list = []
    traj_to_plot = 2

    env = GridWorld(display=False,
                    is_onehot=False,
                    is_random=False,
                    rows=10,
                    cols=10,
                    seed=3,
                    obstacles=[np.asarray([5, 5])],
                    goal_state=np.asarray([1, 5]))

    max_ep_length = 15
    run_iterations = 50

    rl_method = ActorCritic(env,
                            feat_extractor=feature_extractor,
                            gamma=0.99,
                            max_ep_length=max_ep_length,
                            log_interval=50)

    labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

    counter = 0
    for name in policy_fname_list:
        counter += 1
        if counter == traj_to_plot:
            policy_name_to_plot = name
        #ready the policy
        rl_method.policy.load(name)
        trajectory_point_policy = []

        env = GridWorld(display=False,
                        is_onehot=False,
                        is_random=False,
                        rows=10,
                        cols=10,
                        seed=7,
                        obstacles=[np.asarray([5, 5])],
                        goal_state=np.asarray([1, 5]))

        heat_map = np.zeros((env.rows, env.cols))

        for i in range(run_iterations):
            trajectory_point_run = []
            state = env.reset()
            heat_map[state['agent_state'][0], state['agent_state'][1]] += 1
            trajectory_point_run.append(
                (state['agent_state'][0] * env.cellWidth,
                 state['agent_state'][1] * env.cellWidth))
            state = feature_extractor.extract_features(state)
            for t in range(max_ep_length):

                action = rl_method.select_action(state)
                state, reward, done, _ = env.step(action)
                heat_map[state['agent_state'][0], state['agent_state'][1]] += 1
                trajectory_point_run.append(
                    (state['agent_state'][0] * env.cellWidth,
                     state['agent_state'][1] * env.cellWidth))
                state = feature_extractor.extract_features(state)

            trajectory_point_policy.append(trajectory_point_run)

        trajectory_point_master_list.append(trajectory_point_policy)
        fig, ax = plt.subplots()

        im = ax.imshow(heat_map, vmin=0, vmax=40)
        ax.set_xticks(np.arange(10))
        ax.set_yticks(np.arange(10))

        ax.set_xticklabels(labels)
        ax.set_yticklabels(labels)

        ax.set_xlabel('Columns of the gridworld', fontsize='large')
        ax.set_ylabel('Rows of the gridworld', fontsize='large')

        for i in range(len(labels)):
            for j in range(len(labels)):

                text = ax.text(j,
                               i,
                               heat_map[i, j],
                               ha="center",
                               va="bottom",
                               color="black")
                #arrow = ax.arrow(j,i,.1,.1,shape='full',head_width= .2)
                #arrow = ax.annotate("",xy = (j,i) , arrowprops = arrow)
                pass
        ax.set_title("Grid location visitation frequency for a unbiased agent")

        #plt.colorbar()
        #plt.clim(0,70)
        plt.draw()
        if store:
            pickle_filename = 'FigureObject' + str(counter) + '.fig.pickle'
            pickle.dump(fig, open(pickle_filename, 'wb'))
        plt.pause(.001)

    #annotate_trajectory(policy_name_to_plot, env, rl_method,
    #                    max_ep_length, ax, feature_extractor=feature_extractor)

    plt.show()
Exemple #16
0
def main():
    """Runs experiment"""

    args = parser.parse_args()

    utils.seed_all(args.seed)

    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d_%H:%M:%S")

    to_save = pathlib.Path(args.save_dir)
    dir_name = args.save_folder + "_" + st
    to_save = to_save / dir_name
    to_save = str(to_save.resolve())

    log_file = "Experiment_info.txt"

    experiment_logger = Logger(to_save, log_file)
    experiment_logger.log_header("Arguments for the experiment :")
    experiment_logger.log_info(vars(args))

    feat_ext = fe_utils.load_feature_extractor(args.feat_extractor, obs_width=args.pedestrian_width, agent_width=args.pedestrian_width)

    experiment_logger.log_header("Parameters of the feature extractor :")
    experiment_logger.log_info(feat_ext.__dict__)

    env = GridWorld(
        display=args.render,
        is_random=False,
        rows=576,
        cols=720,
        agent_width=args.pedestrian_width,
        step_size=2,
        obs_width=args.pedestrian_width,
        width=10,
        subject=args.subject,
        annotation_file=args.annotation_file,
        goal_state=None,
        step_wrapper=utils.step_wrapper,
        seed=args.seed,
        replace_subject=args.replace_subject,
        segment_size=args.segment_size,
        external_control=True,
        continuous_action=False,
        reset_wrapper=utils.reset_wrapper,
        consider_heading=True,
        is_onehot=False,
        show_orientation=True,
        show_comparison=True,
        show_trail=True,
    )

    experiment_logger.log_header("Environment details :")
    experiment_logger.log_info(env.__dict__)

    if args.rl_method == "ActorCritic":
        rl_method = ActorCritic(
            env,
            feat_extractor=feat_ext,
            gamma=1,
            log_interval=args.rl_log_intervals,
            max_episode_length=args.rl_ep_length,
            hidden_dims=args.policy_net_hidden_dims,
            save_folder=to_save,
            lr=args.lr_rl,
            max_episodes=args.rl_episodes,
        )

    if args.rl_method == "SAC":
        if not env.continuous_action:
            print("The action space needs to be continuous for SAC to work.")
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = SoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            play_interval=500,
            learning_rate=args.lr_rl,
            buffer_sample_size=args.replay_buffer_sample_size,
        )

    if args.rl_method == "discrete_QSAC":
        if not isinstance(env.action_space, gym.spaces.Discrete):
            print("discrete SAC requires a discrete action space to work.")
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = QSAC(
            env,
            replay_buffer,
            feat_ext,
            args.replay_buffer_sample_size,
            learning_rate=args.lr_rl,
            entropy_tuning=True,
            entropy_target=args.entropy_target,
            play_interval=args.play_interval,
            tau=args.tau,
            gamma=args.gamma,
        )

    if args.rl_method == "discrete_SAC":
        if not isinstance(env.action_space, gym.spaces.Discrete):
            print("discrete SAC requires a discrete action space to work.")
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = DiscreteSAC(
            env,
            replay_buffer,
            feat_ext,
            args.replay_buffer_sample_size,
            learning_rate=args.lr_rl,
            entropy_tuning=True,
            entropy_target=args.entropy_target,
            play_interval=args.play_interval,
            tau=args.tau,
            gamma=args.gamma,
        )

    print("RL method initialized.")
    print(rl_method.policy)
    if args.policy_path is not None:
        rl_method.policy.load(args.policy_path)

    experiment_logger.log_header("Details of the RL method :")
    experiment_logger.log_info(rl_method.__dict__)

    expert_trajectories = read_expert_trajectories(args.exp_trajectory_path)

    irl_method = PerTrajGCL(
        rl=rl_method,
        env=env,
        expert_trajectories=expert_trajectories,
        learning_rate=args.lr_irl,
        l2_regularization=args.regularizer,
        save_folder=to_save,
        saving_interval=args.saving_interval,
    )

    print("IRL method intialized.")
    print(irl_method.reward_net)

    experiment_logger.log_header("Details of the IRL method :")
    experiment_logger.log_info(irl_method.__dict__)

    irl_method.pre_train(
        args.pre_train_iterations,
        args.num_expert_samples,
        account_for_terminal_state=args.account_for_terminal_state,
        gamma=args.gamma,
    )

    rl_method.train(
        args.pre_train_rl_iterations,
        args.rl_ep_length,
        reward_network=irl_method.reward_net,
    )

    # save intermediate RL result
    rl_method.policy.save(to_save + "/policy")

    irl_method.train(
        args.irl_iterations,
        args.rl_episodes,
        args.rl_ep_length,
        args.rl_ep_length,
        reset_training=args.reset_training,
        account_for_terminal_state=args.account_for_terminal_state,
        gamma=args.gamma,
        stochastic_sampling=args.stochastic_sampling,
        num_expert_samples=args.num_expert_samples,
        num_policy_samples=args.num_policy_samples,
    )

    metric_applicator = metric_utils.LTHMP2020()
    metric_results = metric_utils.collect_trajectories_and_metrics(
        env,
        feat_ext,
        rl_method.policy,
        len(expert_trajectories),
        args.rl_ep_length,
        metric_applicator,
        disregard_collisions=True,
    )

    pd_metrics = pd.DataFrame(metric_results).T
    pd_metrics = pd_metrics.applymap(lambda x: x[0])
    pd_metrics.to_pickle(to_save + "/metrics.pkl")

    with open(to_save + "/rl_data.csv", "a") as f:
        rl_method.data_table.write_csv(f)

    with open(to_save + "/irl_data.csv", "a") as f:
        irl_method.data_table.write_csv(f)

    with open(to_save + "/pre_irl_data.csv", "a") as f:
        irl_method.pre_data_table.write_csv(f)
Exemple #17
0
                         width=grid_size)

    #load the feature extractor
    feat_ext = DroneFeatureRisk_speed(agent_width=agent_width,
                                      obs_width=obs_width,
                                      step_size=step_size,
                                      grid_size=grid_size,
                                      show_agent_persp=False,
                                      thresh1=10,
                                      thresh2=15)

    #load the actor critic module
    model = ActorCritic(env,
                        feat_extractor=feat_ext,
                        gamma=1,
                        log_interval=100,
                        max_episode_length=max_ep_length,
                        hidden_dims=policy_net_hidden_dims,
                        save_folder=None,
                        max_episodes=total_episodes)

    expert_trajectory_folder = '/home/abhisek/Study/Robotics/deepirl/envs/expert_datasets/university_students/annotation/traj_info/frame_skip_1/students003/DroneFeatureRisk_speed_smooth_state/'

    policy_folder = '/home/abhisek/Study/Robotics/deepirl/experiments/results/Alienware/Temp'
    policy_file_list = []
    #read the files in the folder
    if os.path.isdir(policy_folder):
        policy_names = glob.glob(os.path.join(policy_folder, '*.pt'))
        policy_file_list = sorted(policy_names, key=numericalSort)

    else:
        policy_file_list.append(policy_folder)