Beispiel #1
0
class Restlin:
    def __init__(self):
        self.__CONFIG = "../config/config.config"
        self.__APP = FastAPI()
        self.__HOST = ''
        self.__PORT = ''
        self.__LOGIN = Login(username='', password='')
        self.__APP.add_middleware(CORSMiddleware,
                                  allow_origins=["*"],
                                  allow_methods=["*"],
                                  allow_headers=["*"])
        self.__get_config()
        self.__LOGGER = Logger('restlin')

    def __get_config(self):
        with open(self.__CONFIG, 'r') as stream:
            restlin = yaml.load(stream, Loader=yaml.FullLoader)['RESTLIN']
            self.__HOST = restlin['HOST']
            self.__PORT = restlin['PORT']

    def set_login(self, login: Login):
        self.__LOGIN.username = login.username
        self.__LOGIN.password = login.password

    def get_login(self):
        return self.__LOGIN

    def start_unicorn(self):
        uvicorn.run(self.__APP,
                    host=self.__HOST,
                    port=self.__PORT,
                    log_level='error')

    def get_app(self):
        return self.__APP

    def log_info(self, msg):
        self.__LOGGER.log_info(msg)

    def log_warning(self, msg):
        self.__LOGGER.log_warning(msg)

    def log_error(self, msg):
        self.__LOGGER.log_error(msg)
Beispiel #2
0
def main():
    """Runs experiment"""

    args = parser.parse_args()

    utils.seed_all(args.seed)

    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d_%H:%M:%S")

    to_save = pathlib.Path(args.save_dir)
    dir_name = args.save_folder + "_" + st
    to_save = to_save / dir_name
    to_save = str(to_save.resolve())

    log_file = "Experiment_info.txt"

    experiment_logger = Logger(to_save, log_file)
    experiment_logger.log_header("Arguments for the experiment :")
    experiment_logger.log_info(vars(args))

    feat_ext = fe_utils.load_feature_extractor(args.feat_extractor, obs_width=args.pedestrian_width, agent_width=args.pedestrian_width)

    experiment_logger.log_header("Parameters of the feature extractor :")
    experiment_logger.log_info(feat_ext.__dict__)

    env = GridWorld(
        display=args.render,
        is_random=False,
        rows=576,
        cols=720,
        agent_width=args.pedestrian_width,
        step_size=2,
        obs_width=args.pedestrian_width,
        width=10,
        subject=args.subject,
        annotation_file=args.annotation_file,
        goal_state=None,
        step_wrapper=utils.step_wrapper,
        seed=args.seed,
        replace_subject=args.replace_subject,
        segment_size=args.segment_size,
        external_control=True,
        continuous_action=False,
        reset_wrapper=utils.reset_wrapper,
        consider_heading=True,
        is_onehot=False,
        show_orientation=True,
        show_comparison=True,
        show_trail=True,
    )

    experiment_logger.log_header("Environment details :")
    experiment_logger.log_info(env.__dict__)

    if args.rl_method == "ActorCritic":
        rl_method = ActorCritic(
            env,
            feat_extractor=feat_ext,
            gamma=1,
            log_interval=args.rl_log_intervals,
            max_episode_length=args.rl_ep_length,
            hidden_dims=args.policy_net_hidden_dims,
            save_folder=to_save,
            lr=args.lr_rl,
            max_episodes=args.rl_episodes,
        )

    if args.rl_method == "SAC":
        if not env.continuous_action:
            print("The action space needs to be continuous for SAC to work.")
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = SoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            play_interval=500,
            learning_rate=args.lr_rl,
            buffer_sample_size=args.replay_buffer_sample_size,
        )

    if args.rl_method == "discrete_QSAC":
        if not isinstance(env.action_space, gym.spaces.Discrete):
            print("discrete SAC requires a discrete action space to work.")
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = QSAC(
            env,
            replay_buffer,
            feat_ext,
            args.replay_buffer_sample_size,
            learning_rate=args.lr_rl,
            entropy_tuning=True,
            entropy_target=args.entropy_target,
            play_interval=args.play_interval,
            tau=args.tau,
            gamma=args.gamma,
        )

    if args.rl_method == "discrete_SAC":
        if not isinstance(env.action_space, gym.spaces.Discrete):
            print("discrete SAC requires a discrete action space to work.")
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = DiscreteSAC(
            env,
            replay_buffer,
            feat_ext,
            args.replay_buffer_sample_size,
            learning_rate=args.lr_rl,
            entropy_tuning=True,
            entropy_target=args.entropy_target,
            play_interval=args.play_interval,
            tau=args.tau,
            gamma=args.gamma,
        )

    print("RL method initialized.")
    print(rl_method.policy)
    if args.policy_path is not None:
        rl_method.policy.load(args.policy_path)

    experiment_logger.log_header("Details of the RL method :")
    experiment_logger.log_info(rl_method.__dict__)

    expert_trajectories = read_expert_trajectories(args.exp_trajectory_path)

    irl_method = PerTrajGCL(
        rl=rl_method,
        env=env,
        expert_trajectories=expert_trajectories,
        learning_rate=args.lr_irl,
        l2_regularization=args.regularizer,
        save_folder=to_save,
        saving_interval=args.saving_interval,
    )

    print("IRL method intialized.")
    print(irl_method.reward_net)

    experiment_logger.log_header("Details of the IRL method :")
    experiment_logger.log_info(irl_method.__dict__)

    irl_method.pre_train(
        args.pre_train_iterations,
        args.num_expert_samples,
        account_for_terminal_state=args.account_for_terminal_state,
        gamma=args.gamma,
    )

    rl_method.train(
        args.pre_train_rl_iterations,
        args.rl_ep_length,
        reward_network=irl_method.reward_net,
    )

    # save intermediate RL result
    rl_method.policy.save(to_save + "/policy")

    irl_method.train(
        args.irl_iterations,
        args.rl_episodes,
        args.rl_ep_length,
        args.rl_ep_length,
        reset_training=args.reset_training,
        account_for_terminal_state=args.account_for_terminal_state,
        gamma=args.gamma,
        stochastic_sampling=args.stochastic_sampling,
        num_expert_samples=args.num_expert_samples,
        num_policy_samples=args.num_policy_samples,
    )

    metric_applicator = metric_utils.LTHMP2020()
    metric_results = metric_utils.collect_trajectories_and_metrics(
        env,
        feat_ext,
        rl_method.policy,
        len(expert_trajectories),
        args.rl_ep_length,
        metric_applicator,
        disregard_collisions=True,
    )

    pd_metrics = pd.DataFrame(metric_results).T
    pd_metrics = pd_metrics.applymap(lambda x: x[0])
    pd_metrics.to_pickle(to_save + "/metrics.pkl")

    with open(to_save + "/rl_data.csv", "a") as f:
        rl_method.data_table.write_csv(f)

    with open(to_save + "/irl_data.csv", "a") as f:
        irl_method.data_table.write_csv(f)

    with open(to_save + "/pre_irl_data.csv", "a") as f:
        irl_method.pre_data_table.write_csv(f)
Beispiel #3
0
def main():
    args = parser.parse_args()

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    ###
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
    ###
    parent_dir = './results/' + str(args.save_folder) + st
    to_save = './results/'+str(args.save_folder)+st+'-reg-'+str(args.regularizer)+ \
              '-seed-'+str(args.seed)+'-lr-'+str(args.lr)
    #'-seed-'+str(args.seed)
    log_file = 'Experiment_info.txt'
    experiment_logger = Logger(to_save, log_file)

    experiment_logger.log_header('Arguments for the experiment :')
    experiment_logger.log_info(vars(args))

    from rlmethods.rlutils import LossBasedTermination
    from rlmethods.b_actor_critic import ActorCritic
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils

    #check for the feature extractor being used
    #initialize feature extractor
    if args.feat_extractor == 'MCFeatures':
        feat_ext = MCFeatures(args.state_discretization[0],
                              args.state_discretization[1])

    if args.feat_extractor == 'MCFeaturesOnehot':
        feat_ext = MCFeaturesOnehot(args.state_discretization[0],
                                    args.state_discretization[1])

    experiment_logger.log_header('Parameters of the feature extractor :')
    experiment_logger.log_info(feat_ext.__dict__)

    #initialize the environment
    if not args.dont_save and args.save_folder is None:
        print('Specify folder to save the results.')
        exit()
    #**set is_onehot to false
    goal_state = np.asarray([1, 5])
    '''
    env = GridWorld(display=args.render, is_onehot= False,is_random=False,
                    rows =10,
                    cols =10,
                    seed = 7,
                    obstacles = [np.asarray([5,5])],
                                
                    goal_state = np.asarray([1,5]))

    '''
    env = gym.make('MountainCar-v0')
    env = env.unwrapped
    #pdb.set_trace()
    experiment_logger.log_header('Environment details :')
    experiment_logger.log_info(env.__dict__)

    #CHANGE HEREq

    #CHANGE HERE
    #initialize loss based termination
    # intialize RL method
    #CHANGE HERE
    rlMethod = ActorCritic(env,
                           gamma=1,
                           log_interval=args.rl_log_intervals,
                           max_episodes=args.rl_episodes,
                           max_ep_length=args.rl_ep_length,
                           termination=None,
                           plot_loss=False,
                           save_folder=to_save,
                           hidden_dims=args.policy_net_hidden_dims,
                           feat_extractor=feat_ext)
    print("RL method initialized.")
    print(rlMethod.policy)
    if args.policy_path is not None:
        rlMethod.policy.load(args.policy_path)

    experiment_logger.log_header('Details of the RL method :')
    experiment_logger.log_info(rlMethod.__dict__)

    # initialize IRL method
    #CHANGE HERE
    trajectory_path = args.exp_trajectory_path

    irlMethod = DeepMaxEnt(trajectory_path,
                           rlmethod=rlMethod,
                           env=env,
                           iterations=args.irl_iterations,
                           log_intervals=5,
                           on_server=args.on_server,
                           regularizer=args.regularizer,
                           learning_rate=args.lr,
                           graft=False,
                           seed=args.seed,
                           scale_svf=args.scale_svf,
                           hidden_dims=args.reward_net_hidden_dims,
                           clipping_value=args.clipping_value,
                           save_folder=parent_dir)
    print("IRL method intialized.")
    experiment_logger.log_header('Details of the IRL method :')
    experiment_logger.log_info(irlMethod.__dict__)

    print(irlMethod.reward)
    rewardNetwork = irlMethod.train()

    if not args.dont_save:
        pass
def main():

    args = parser.parse_args()

    experiment_logger = Logger('temp_save.txt')

    experiment_logger.log_header('Arguments for the experiment :')
    experiment_logger.log_info(vars(args))

    mp.set_start_method('spawn')

    if args.render:
        from envs.gridworld import GridWorld
    else:
        from envs.gridworld_clockless import GridWorldClockless as GridWorld

    agent_width = 10
    step_size = 10
    obs_width = 10
    grid_size = 10

    if args.feat_extractor == 'Onehot':
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == 'SocialNav':
        feat_ext = SocialNav(fieldList=['agent_state', 'goal_state'])
    if args.feat_extractor == 'FrontBackSideSimple':
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == 'LocalGlobal':
        feat_ext = LocalGlobal(
            window_size=3,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    experiment_logger.log_header('Parameters of the feature extractor :')
    experiment_logger.log_info(feat_ext.__dict__)
    '''
    np.asarray([2,2]),np.asarray([7,4]),np.asarray([3,5]),
                                np.asarray([5,2]),np.asarray([8,3]),np.asarray([7,5]),
                                np.asarray([3,3]),np.asarray([3,7]),np.asarray([5,7])
                                '''
    env = GridWorld(display=args.render,
                    is_onehot=False,
                    is_random=True,
                    rows=100,
                    agent_width=agent_width,
                    step_size=step_size,
                    obs_width=obs_width,
                    width=grid_size,
                    cols=100,
                    seed=7,
                    buffer_from_obs=0,
                    obstacles=3,
                    goal_state=np.asarray([5, 5]))

    experiment_logger.log_header('Environment details :')
    experiment_logger.log_info(env.__dict__)

    model = ActorCritic(env,
                        feat_extractor=feat_ext,
                        gamma=0.99,
                        log_interval=100,
                        max_ep_length=40,
                        hidden_dims=args.policy_net_hidden_dims,
                        max_episodes=4000)

    experiment_logger.log_header('Details of the RL method :')
    experiment_logger.log_info(model.__dict__)

    pdb.set_trace()

    if args.policy_path is not None:
        model.policy.load(args.policy_path)

    if not args.play and not args.play_user:
        if args.reward_path is None:
            model.train_mp(n_jobs=4)
        else:
            from irlmethods.deep_maxent import RewardNet
            state_size = featExtract.extract_features(env.reset()).shape[0]
            reward_net = RewardNet(state_size)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train_mp(reward_net=reward_net, n_jobs=4)

        if not args.dont_save:
            model.policy.save('./saved-models/')

    if args.play:
        env.tickSpeed = 15
        assert args.policy_path is not None, 'pass a policy to play from!'

        model.generate_trajectory(args.num_trajs,
                                  './trajs/ac_fbs_simple4_static_map7/')

    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(args.num_trajs,
                                       './trajs/ac_gridworld_user/')
Beispiel #5
0
def main():
    #####for the logger
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
    ###################

    args = parser.parse_args()

    from envs.gridworld_drone import GridWorldDrone

    from featureExtractor.drone_feature_extractor import (
        DroneFeatureRisk_speedv2, )

    save_folder = None

    if not args.dont_save and not args.play:

        if not args.save_folder:
            print("Provide save folder.")
            exit()

        policy_net_dims = "-policy_net-"
        for dim in args.policy_net_hidden_dims:
            policy_net_dims += str(dim)
            policy_net_dims += "-"

        reward_net_dims = "-reward_net-"
        for dim in args.reward_net_hidden_dims:
            reward_net_dims += str(dim)
            reward_net_dims += "-"
        save_folder = ("./results/" + args.save_folder + st +
                       args.feat_extractor + "-seed-" + str(args.seed) +
                       policy_net_dims + reward_net_dims + "-total-ep-" +
                       str(args.total_epochs) + "-max-ep-len-" +
                       str(args.max_ep_length))

        experiment_logger = Logger(save_folder, "experiment_info.txt")
        experiment_logger.log_header("Arguments for the experiment :")
        repo = git.Repo(search_parent_directories=True)
        experiment_logger.log_info({'From branch : ': repo.active_branch.name})
        experiment_logger.log_info(
            {'Commit number : ': repo.head.object.hexsha})
        experiment_logger.log_info(vars(args))

    window_size = 9
    step_size = 2
    agent_width = 10
    obs_width = 10
    grid_size = 10

    feat_ext = None

    # initialize the feature extractor to be used

    if args.feat_extractor == 'DroneFeatureRisk_speedv2':

        feat_ext = DroneFeatureRisk_speedv2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            return_tensor=False,
            thresh1=18,
            thresh2=30,
        )

    if feat_ext is None:
        print("Please enter proper feature extractor!")
        sys.exit()

    #log feature extractor information
    if not args.dont_save and not args.play:
        experiment_logger.log_header("Parameters of the feature extractor :")
        experiment_logger.log_info(feat_ext.__dict__)

    #initialize the environment

    replace_subject = False
    if args.replace_subject:
        replace_subject = True
    else:
        replace_subject = False

    continuous_action_flag = False
    if args.continuous_control:
        continuous_action_flag = True

    env = GridWorldDrone(
        display=args.render,
        seed=args.seed,
        show_trail=False,
        is_random=False,
        annotation_file=args.annotation_file,
        subject=args.subject,
        tick_speed=60,
        obs_width=10,
        step_size=step_size,
        agent_width=agent_width,
        external_control=True,
        step_reward=0.001,
        show_comparison=True,
        replace_subject=replace_subject,
        continuous_action=continuous_action_flag,
        # rows=200, cols=200, width=grid_size)
        rows=576,
        cols=720,
        width=grid_size,
    )

    #log information about the environment

    if not args.dont_save and not args.play:
        experiment_logger.log_header("Environment details :")
        experiment_logger.log_info(env.__dict__)

    #initialize the controller

    categorical_flag = False
    output_size = 2
    if args.is_categorical:
        categorical_flag = True
        output_size = 35

    controller = SupervisedPolicyController(
        80,
        output_size,
        categorical=categorical_flag,
        hidden_dims=args.policy_net_hidden_dims,
        policy_path=args.policy_path,
        mini_batch_size=args.batch_size,
        learning_rate=args.lr,
        save_folder=save_folder)

    if not args.dont_save and not args.play:
        experiment_logger.log_header("Environment details :")
        experiment_logger.log_info(controller.__dict__)

    base_data_path = '../envs/expert_datasets/university_students/annotation/traj_info/\
frame_skip_1/students003/'

    folder_name = args.training_data_folder
    data_folder = base_data_path + folder_name
    if not args.play:

        if categorical_flag:
            controller.train(args.total_epochs, data_folder)
        else:
            controller.train_regression(args.total_epochs, data_folder)

    if args.play:

        controller.play_policy(args.num_trajs, env, args.max_ep_length,
                               feat_ext)
Beispiel #6
0
def main():
    args = parser.parse_args()

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    #####for the logger
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
    ###################

    if not args.save_folder:
        print('Provide save folder.')
        exit()

    policy_net_dims = '-policy_net-'
    for dim in args.policy_net_hidden_dims:
        policy_net_dims += str(dim)
        policy_net_dims += '-'

    reward_net_dims = '-reward_net-'
    for dim in args.reward_net_hidden_dims:
        reward_net_dims += str(dim)
        reward_net_dims += '-'

    parent_dir = './results/' + str(
        args.save_folder) + st + policy_net_dims + reward_net_dims
    to_save = './results/'+str(args.save_folder)+st+policy_net_dims + reward_net_dims + \
              '-reg-'+str(args.regularizer)+ \
              '-seed-'+str(args.seed)+'-lr-'+str(args.lr_irl)

    log_file = 'Experiment_info.txt'

    experiment_logger = Logger(to_save, log_file)
    experiment_logger.log_header('Arguments for the experiment :')
    experiment_logger.log_info(vars(args))

    #from rlmethods.rlutils import LossBasedTermination
    #for rl
    from rlmethods.b_actor_critic import ActorCritic
    from rlmethods.soft_ac_pi import SoftActorCritic
    from rlmethods.rlutils import ReplayBuffer

    #for irl
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils
    from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal, SocialNav, FrontBackSideSimple

    agent_width = 10
    step_size = 2
    obs_width = 10
    grid_size = 10

    if args.feat_extractor is None:

        print('Feature extractor missing.')
        exit()

    #check for the feature extractor being used
    #initialize feature extractor
    if args.feat_extractor == 'Onehot':
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == 'SocialNav':
        feat_ext = SocialNav()
    if args.feat_extractor == 'FrontBackSideSimple':
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == 'LocalGlobal':
        feat_ext = LocalGlobal(
            window_size=5,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    if args.feat_extractor == 'DroneFeatureSAM1':

        feat_ext = DroneFeatureSAM1(agent_width=agent_width,
                                    obs_width=obs_width,
                                    step_size=step_size,
                                    grid_size=grid_size,
                                    thresh1=5,
                                    thresh2=10)

    if args.feat_extractor == 'DroneFeatureRisk':

        feat_ext = DroneFeatureRisk(agent_width=agent_width,
                                    obs_width=obs_width,
                                    step_size=step_size,
                                    grid_size=grid_size,
                                    thresh1=15,
                                    thresh2=30)

    if args.feat_extractor == 'DroneFeatureRisk_v2':

        feat_ext = DroneFeatureRisk_v2(agent_width=agent_width,
                                       obs_width=obs_width,
                                       step_size=step_size,
                                       grid_size=grid_size,
                                       thresh1=15,
                                       thresh2=30)

    if args.feat_extractor == 'DroneFeatureRisk_speed':

        feat_ext = DroneFeatureRisk_speed(agent_width=agent_width,
                                          obs_width=obs_width,
                                          step_size=step_size,
                                          grid_size=grid_size,
                                          thresh1=10,
                                          thresh2=15)

    if args.feat_extractor == 'DroneFeatureRisk_speedv2':

        feat_ext = DroneFeatureRisk_speedv2(agent_width=agent_width,
                                            obs_width=obs_width,
                                            step_size=step_size,
                                            grid_size=grid_size,
                                            thresh1=18,
                                            thresh2=30)

    experiment_logger.log_header('Parameters of the feature extractor :')
    experiment_logger.log_info(feat_ext.__dict__)

    #initialize the environment
    if not args.dont_save and args.save_folder is None:
        print('Specify folder to save the results.')
        exit()
    '''
    environment can now initialize without an annotation file
    if args.annotation_file is None:
        print('Specify annotation file for the environment.')
        exit()
    '''
    if args.exp_trajectory_path is None:
        print('Specify expert trajectory folder.')
        exit()

    #**set is_onehot to false
    goal_state = np.asarray([1, 5])
    '''
    env = GridWorld(display=args.render, is_onehot= False,is_random=False,
                    rows =10,
                    cols =10,
                    seed = 7,
                    obstacles = [np.asarray([5,5])],
                                
                    goal_state = np.asarray([1,5]))

    '''

    env = GridWorld(display=args.render,
                    is_random=True,
                    rows=576,
                    cols=720,
                    agent_width=agent_width,
                    step_size=step_size,
                    obs_width=obs_width,
                    width=grid_size,
                    subject=args.subject,
                    annotation_file=args.annotation_file,
                    goal_state=goal_state,
                    step_wrapper=utils.step_wrapper,
                    seed=args.seed,
                    replace_subject=args.replace_subject,
                    segment_size=args.segment_size,
                    external_control=True,
                    continuous_action=True,
                    reset_wrapper=utils.reset_wrapper,
                    consider_heading=True,
                    is_onehot=False)

    experiment_logger.log_header('Environment details :')
    experiment_logger.log_info(env.__dict__)

    #CHANGE HEREq

    #CHANGE HERE
    #initialize loss based termination
    # intialize RL method
    #CHANGE HERE

    replay_buffer = ReplayBuffer(args.replay_buffer_size)
    tbx_writer = SummaryWriter(to_save)
    rl_method = SoftActorCritic(
        env,
        replay_buffer,
        feat_ext,
        buffer_sample_size=args.replay_buffer_sample_size,
        tbx_writer=tbx_writer,
        entropy_tuning=True,
        tau=0.005,
        log_alpha=args.log_alpha,
        entropy_target=args.entropy_target,
        render=args.render,
        checkpoint_interval=100000000,
        play_interval=args.play_interval,
    )

    print("RL method initialized.")
    print(rl_method.policy)

    experiment_logger.log_header('Details of the RL method :')
    experiment_logger.log_info(rl_method.__dict__)

    # initialize IRL method
    #CHANGE HERE
    trajectory_path = args.exp_trajectory_path

    if args.scale_svf is None:
        scale = False

    if args.scale_svf:
        scale = args.scale_svf
    irl_method = DeepMaxEnt(trajectory_path,
                            rlmethod=rl_method,
                            rl_episodes=args.rl_episodes,
                            env=env,
                            iterations=args.irl_iterations,
                            on_server=args.on_server,
                            l1regularizer=args.regularizer,
                            learning_rate=args.lr_irl,
                            seed=args.seed,
                            graft=False,
                            scale_svf=scale,
                            rl_max_ep_len=args.max_episode_length,
                            hidden_dims=args.reward_net_hidden_dims,
                            clipping_value=args.clipping_value,
                            enumerate_all=True,
                            save_folder=parent_dir)

    print("IRL method intialized.")
    print(irl_method.reward)

    experiment_logger.log_header('Details of the IRL method :')
    experiment_logger.log_info(irl_method.__dict__)
    irl_method.train()

    if not args.dont_save:
        pass
Beispiel #7
0
def main():
    args = parser.parse_args()

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    #####for the logger
    base_folder = './results/' + str(args.save_folder) + '-reg-' + str(
        args.regularizer) + '-seed-' + str(args.seed) + '-lr-' + str(args.lr)
    log_file = 'Experiment_info.txt'
    experiment_logger = Logger(base_folder, log_file)

    experiment_logger.log_header('Arguments for the experiment :')
    experiment_logger.log_info(vars(args))

    from rlmethods.rlutils import LossBasedTermination
    from rlmethods.b_actor_critic import ActorCritic
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils
    from featureExtractor.gridworld_featureExtractor import OneHot, LocalGlobal, SocialNav, FrontBackSideSimple

    agent_width = 10
    step_size = 10
    obs_width = 10
    grid_size = 10

    if args.feat_extractor is None:

        print('Feature extractor missing.')
        exit()

    #check for the feature extractor being used
    #initialize feature extractor
    if args.feat_extractor == 'Onehot':
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == 'SocialNav':
        feat_ext = SocialNav()
    if args.feat_extractor == 'FrontBackSideSimple':
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == 'LocalGlobal':
        feat_ext = LocalGlobal(
            window_size=5,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    experiment_logger.log_header('Parameters of the feature extractor :')
    experiment_logger.log_info(feat_ext.__dict__)

    #initialize the environment
    if not args.dont_save and args.save_folder is None:
        print('Specify folder to save the results.')
        exit()

    if args.annotation_file is None:
        print('Specify annotation file for the environment.')
        exit()

    if args.exp_trajectory_path is None:
        print('Specify expert trajectory folder.')
        exit()

    #**set is_onehot to false
    goal_state = np.asarray([1, 5])
    '''
    env = GridWorld(display=args.render, is_onehot= False,is_random=False,
                    rows =10,
                    cols =10,
                    seed = 7,
                    obstacles = [np.asarray([5,5])],
                                
                    goal_state = np.asarray([1,5]))

    '''

    env = GridWorld(display=args.render,
                    is_random=True,
                    rows=576,
                    cols=720,
                    agent_width=agent_width,
                    step_size=step_size,
                    obs_width=obs_width,
                    width=grid_size,
                    annotation_file=args.annotation_file,
                    goal_state=goal_state,
                    step_wrapper=utils.step_wrapper,
                    seed=args.seed,
                    reset_wrapper=utils.reset_wrapper,
                    is_onehot=False)

    experiment_logger.log_header('Environment details :')
    experiment_logger.log_info(env.__dict__)

    #CHANGE HEREq

    #CHANGE HERE
    #initialize loss based termination
    # intialize RL method
    #CHANGE HERE
    rlMethod = ActorCritic(env,
                           gamma=0.99,
                           log_interval=args.rl_log_intervals,
                           max_episodes=args.rl_episodes,
                           max_ep_length=args.rl_ep_length,
                           termination=None,
                           hidden_dims=args.reward_net_hidden_dims,
                           feat_extractor=feat_ext)
    print("RL method initialized.")
    print(rlMethod.policy)
    if args.policy_path is not None:
        rlMethod.policy.load(args.policy_path)

    experiment_logger.log_header('Details of the RL method :')
    experiment_logger.log_info(rlMethod.__dict__)

    # initialize IRL method
    #CHANGE HERE
    trajectory_path = args.exp_trajectory_path

    folder_to_save = '/results/' + args.save_folder
    irlMethod = DeepMaxEnt(trajectory_path,
                           rlmethod=rlMethod,
                           env=env,
                           iterations=args.irl_iterations,
                           log_intervals=5,
                           on_server=args.on_server,
                           regularizer=args.regularizer,
                           learning_rate=args.lr,
                           graft=True,
                           hidden_dims=args.reward_net_hidden_dims,
                           save_folder=folder_to_save)
    print("IRL method intialized.")
    print(irlMethod.reward)

    experiment_logger.log_header('Details of the IRL method :')
    experiment_logger.log_info(irlMethod.__dict__)
    rewardNetwork = irlMethod.train()

    if not args.dont_save:
        pass
Beispiel #8
0
class Database:
    def __init__(self, username, password):
        # READ CONFIGURATION FROM config FILE
        self.__CONFIG = "../config/config.config"

        # PARSE DATA FROM DICTIONARY
        self.__HOST = ''
        self.__DATABASE = ''
        self.__PORT = ''
        self.__get_config()

        # CREATE CONNECTION
        try:
            self.__CONNECTION = psycopg2.connect(host=self.__HOST,
                                                 port=self.__PORT,
                                                 database=self.__DATABASE,
                                                 user=username,
                                                 password=password)
        except Exception as e:
            print(f"Got exception trying to connect:\n {e}")

        # CREATE CURSOR
        self.__CURSOR = self.__CONNECTION.cursor()

        #LOGGER
        self.__LOGGER = Logger("database")
        self.__LOGGER.log_info(
            f"Established new connection to database by {username}.")

    def __get_config(self):
        with open(self.__CONFIG, 'r') as stream:
            db = yaml.load(stream, Loader=yaml.FullLoader)['DATABASE']
            self.__HOST = db['HOST']
            self.__PORT = db['PORT']
            self.__DATABASE = db['DATABASE']

    def __get_ids_for_update(self, clientname, clientnumber, servicename,
                             statusname):
        self.__CURSOR.execute(
            f"SELECT * FROM get_id_for_update('{clientname}', '{clientnumber}', '{servicename}', '{statusname}')"
        )
        return self.__CURSOR.fetchone()

    def update_client_live(self, clientname, clientnumber, servicename,
                           statusname):
        try:
            data = self.__get_ids_for_update(clientname, clientnumber,
                                             servicename, statusname)
            self.__CURSOR.execute(
                f"call update_live_status({data[0]}, {data[1]}, {data[2]})")
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Update client live raised error: {e}")

    def is_admin(self):
        self.__CURSOR.execute("SELECT is_super()")
        return self.__CURSOR.fetchone()[0]

    def get_data_for_managers(self):
        try:
            self.__CURSOR.execute("SELECT * FROM managers_view")
            data = self.__CURSOR.fetchall()
            self.__CURSOR.execute("SELECT * FROM managers_live_view")
            live = self.__CURSOR.fetchall()
            self.__CURSOR.execute("SELECT service FROM services ORDER BY id")
            services = self.__CURSOR.fetchall()
            organised_data = Data()
            index = 0
            response = Response()
            response.data = []
            response.services = []
            for service in services:
                response.services.append(service[0])
            for d in data:
                organised_data.statuses = []
                organised_data.name = d[0]
                organised_data.phone = d[1]
                organised_data.adress = d[2]
                organised_data.salary = d[3]
                for s in services:
                    organised_data.statuses.append(live[index][0])
                    index += 1
                response.data.append(organised_data)
                organised_data = Data()
        except Exception as e:
            self.__LOGGER.log_error(f"Get data for managers raised error: {e}")
        return response

    def get_managers(self):
        try:
            self.__CURSOR.execute("SELECT * FROM administrator_user_view")
            response = []
            manager = Managers()
            managers = self.__CURSOR.fetchall()
            for m in managers:
                manager.username = m[0]
                response.append(manager)
                manager = Managers()
        except Exception as e:
            self.__LOGGER.log_error(f"Get managers raised error: {e}")
        return response

    def get_clients(self):
        try:
            self.__CURSOR.execute("SELECT * FROM administrator_client_view")
            clients = self.__CURSOR.fetchall()
            response = []
            client = Clients()
            for c in clients:
                client.id_ = c[0]
                client.fio = c[1]
                client.tel = c[2]
                client.job = c[3]
                client.homeadress = c[4]
                client.salary = c[5]
                client.call_back = c[6]
                response.append(client)
                client = Clients()
        except Exception as e:
            self.__LOGGER.log_error(f"Get clients raised error: {e}")

        return response

    def get_services(self):
        try:
            self.__CURSOR.execute("SELECT * FROM administrator_service_view")
            services = self.__CURSOR.fetchall()
            response = []
            service = Services()
            for s in services:
                service.id_ = s[0]
                service.service = s[1]
                service.service_description = s[2]
                response.append(service)
                service = Services()
        except Exception as e:
            self.__LOGGER.log_error(f"Get services raised error: {e}")
        return response

    def delete_client(self, id_):
        try:
            self.__CURSOR.execute(f"call delete_client({id_})")
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Delete client raised error: {e}")
            return False
        return True

    def update_client(self, client: Clients):
        try:
            cli = client.dict()
            query = f"call update_client(client_id_p=>{client.id_}"
            for c in cli.keys():
                if (cli[c] and c != "id_" and c != "call_back"
                        and c != "salary"):
                    query += f",{c}_p=>'{cli[c]}'"
                elif (cli[c] and c == "salary" or c == "call_back"):
                    query += f",{c}_p=>{cli[c]}"
            query += ")"
            self.__CURSOR.execute(query)
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Update client raised error: {e}")
            return False
        return True

    def insert_clients(self, client: Clients):
        try:
            self.__CURSOR.execute(
                f"call insert_client('{client.fio}','{client.tel}','{client.job}', '{client.homeadress}', '{client.salary}')"
            )
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Insert client raised error: {e}")
            return False
        return True

    def delete_service(self, id_):
        try:
            self.__CURSOR.execute(f"call delete_service({id_})")
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Delete service raised error: {e}")
            return False
        return True

    def insert_service(self, service: Services):
        try:
            self.__CURSOR.execute(
                f"select insert_service('{service.service}','{service.service_description}')"
            )
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Insert service raised error: {e}")
            return False
        return True

    def delete_role(self, username):
        try:
            self.__CURSOR.execute(f"call delete_user('{username}')")
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Delete role raised error: {e}")
            return False
        return True

    def insert_role(self, login: Login):
        try:
            self.__CURSOR.execute(
                f"call add_user('{login.username}', '{login.password}')")
            self.__CONNECTION.commit()
        except Exception as e:
            self.__LOGGER.log_error(f"Insert role raised error: {e}")
            return False
        return True
Beispiel #9
0
def main():
    args = parser.parse_args()

    utils.seed_all(args.seed)

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use("Agg")

        # pygame without monitor
        os.environ["SDL_VIDEODRIVER"] = "dummy"

    #####for the logger
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d_%H:%M:%S")
    ###################

    if not args.save_folder:
        print("Provide save folder.")
        exit()

    policy_net_dims = "-policy_net-"
    for dim in args.policy_net_hidden_dims:
        policy_net_dims += str(dim)
        policy_net_dims += "-"

    reward_net_dims = "-reward_net-"
    for dim in args.reward_net_hidden_dims:
        reward_net_dims += str(dim)
        reward_net_dims += "-"

    parent_dir = ("./results/" + str(args.save_folder) + st + policy_net_dims +
                  reward_net_dims)
    to_save = ("./results/" + str(args.save_folder) + st + policy_net_dims +
               reward_net_dims + "-reg-" + str(args.regularizer) + "-seed-" +
               str(args.seed) + "-lr-" + str(args.lr_irl))

    log_file = "Experiment_info.txt"

    experiment_logger = Logger(to_save, log_file)
    experiment_logger.log_header("Arguments for the experiment :")
    repo = git.Repo(search_parent_directories=True)
    experiment_logger.log_info({'From branch : ': repo.active_branch.name})
    experiment_logger.log_info({'Commit number : ': repo.head.object.hexsha})
    experiment_logger.log_info(vars(args))

    # from rlmethods.rlutils import LossBasedTermination
    # for rl
    from rlmethods.b_actor_critic import ActorCritic
    from rlmethods.soft_ac_pi import SoftActorCritic
    from rlmethods.soft_ac import SoftActorCritic as QSAC
    from rlmethods.rlutils import ReplayBuffer

    # for irl
    from irlmethods.deep_maxent import DeepMaxEnt
    import irlmethods.irlUtils as irlUtils
    from featureExtractor.gridworld_featureExtractor import (
        OneHot,
        LocalGlobal,
        SocialNav,
        FrontBackSideSimple,
    )

    agent_width = 10
    step_size = 2
    obs_width = 10
    grid_size = 10

    if args.feat_extractor is None:

        print("Feature extractor missing.")
        exit()

    # check for the feature extractor being used
    # initialize feature extractor
    if args.feat_extractor == "Onehot":
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == "SocialNav":
        feat_ext = SocialNav()
    if args.feat_extractor == "FrontBackSideSimple":
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == "LocalGlobal":
        feat_ext = LocalGlobal(
            window_size=5,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    if args.feat_extractor == "DroneFeatureSAM1":

        feat_ext = DroneFeatureSAM1(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=5,
            thresh2=10,
        )

    if args.feat_extractor == "DroneFeatureRisk":

        feat_ext = DroneFeatureRisk(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_v2":

        feat_ext = DroneFeatureRisk_v2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_speed":

        feat_ext = DroneFeatureRisk_speed(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=10,
            thresh2=15,
        )

    if args.feat_extractor == "DroneFeatureRisk_speedv2":

        feat_ext = DroneFeatureRisk_speedv2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=18,
            thresh2=30,
        )

    if args.feat_extractor == 'VasquezF1':
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == 'VasquezF2':
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == 'VasquezF3':
        feat_ext = VasquezF3(agent_width)

    if args.feat_extractor == "Fahad":
        feat_ext = Fahad(36, 60, 0.5, 1.0)

    if args.feat_extractor == "GoalConditionedFahad":
        feat_ext = GoalConditionedFahad(36, 60, 0.5, 1.0)

    experiment_logger.log_header("Parameters of the feature extractor :")
    experiment_logger.log_info(feat_ext.__dict__)

    # initialize the environment
    if not args.dont_save and args.save_folder is None:
        print("Specify folder to save the results.")
        exit()
    """
    environment can now initialize without an annotation file
    if args.annotation_file is None:
        print('Specify annotation file for the environment.')
        exit()
    """
    if args.exp_trajectory_path is None:
        print("Specify expert trajectory folder.")
        exit()
    """
    env = GridWorld(display=args.render, is_onehot= False,is_random=False,
                    rows =10,
                    cols =10,
                    seed = 7,
                    obstacles = [np.asarray([5,5])],
                                
                    goal_state = np.asarray([1,5]))

    """

    env = GridWorld(
        display=args.render,
        is_random=True,
        rows=576,
        cols=720,
        agent_width=agent_width,
        step_size=step_size,
        obs_width=obs_width,
        width=grid_size,
        subject=args.subject,
        annotation_file=args.annotation_file,
        goal_state=None,
        step_wrapper=utils.step_wrapper,
        seed=args.seed,
        replace_subject=args.replace_subject,
        segment_size=args.segment_size,
        external_control=True,
        continuous_action=False,
        reset_wrapper=utils.reset_wrapper,
        consider_heading=True,
        is_onehot=False,
    )

    experiment_logger.log_header("Environment details :")
    experiment_logger.log_info(env.__dict__)

    # CHANGE HEREq

    # CHANGE HERE
    # initialize loss based termination
    # intialize RL method
    # CHANGE HERE

    if args.rl_method == "ActorCritic":
        rl_method = ActorCritic(
            env,
            feat_extractor=feat_ext,
            gamma=1,
            log_interval=args.rl_log_intervals,
            max_episode_length=args.rl_ep_length,
            hidden_dims=args.policy_net_hidden_dims,
            save_folder=to_save,
            lr=args.lr_rl,
            max_episodes=args.rl_episodes,
        )

    if args.rl_method == "SAC":
        if not env.continuous_action:
            print("The action space needs to be continuous for SAC to work.")
            exit()
        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = SoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            play_interval=500,
            learning_rate=args.lr_rl,
            buffer_sample_size=args.replay_buffer_sample_size,
        )

    if args.rl_method == "discrete_SAC":
        if not isinstance(env.action_space, gym.spaces.Discrete):
            print(
                "discrete SAC requires a discrete action space environmnet to work."
            )
            exit()

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        rl_method = QSAC(
            env,
            replay_buffer,
            feat_ext,
            args.replay_buffer_sample_size,
            learning_rate=args.lr_rl,
            entropy_tuning=True,
            entropy_target=0.3,
            play_interval=args.play_interval,
        )

    print("RL method initialized.")
    print(rl_method.policy)
    if args.policy_path is not None:
        rl_method.policy.load(args.policy_path)

    experiment_logger.log_header("Details of the RL method :")
    experiment_logger.log_info(rl_method.__dict__)

    # initialize IRL method
    # CHANGE HERE
    trajectory_path = args.exp_trajectory_path

    if args.scale_svf is None:
        scale = False

    if args.scale_svf:
        scale = args.scale_svf
    irl_method = DeepMaxEnt(
        trajectory_path,
        rlmethod=rl_method,
        env=env,
        iterations=args.irl_iterations,
        on_server=args.on_server,
        l1regularizer=args.regularizer,
        learning_rate=args.lr_irl,
        seed=args.seed,
        graft=False,
        scale_svf=scale,
        hidden_dims=args.reward_net_hidden_dims,
        clipping_value=args.clipping_value,
        enumerate_all=True,
        save_folder=parent_dir,
        rl_max_ep_len=args.rl_ep_length,
        rl_episodes=args.rl_episodes,
    )

    print("IRL method intialized.")
    print(irl_method.reward)

    experiment_logger.log_header("Details of the IRL method :")
    experiment_logger.log_info(irl_method.__dict__)

    smoothing_flag = False
    if args.svf_smoothing:
        smoothing_flag = True

    irl_method.train(smoothing=smoothing_flag)

    if not args.dont_save:
        pass
Beispiel #10
0
def main():

    #####for the logger
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
    ###################

    args = parser.parse_args()

    seed_all(args.seed)

    if args.on_server:

        matplotlib.use("Agg")
        # pygame without monitor
        os.environ["SDL_VIDEODRIVER"] = "dummy"

    from matplotlib import pyplot as plt

    mp.set_start_method("spawn")

    from rlmethods.b_actor_critic import ActorCritic
    from rlmethods.soft_ac import SoftActorCritic, QSoftActorCritic
    from rlmethods.rlutils import ReplayBuffer

    from envs.gridworld_drone import GridWorldDrone
    from featureExtractor.drone_feature_extractor import (
        DroneFeatureSAM1,
        DroneFeatureOccup,
        DroneFeatureRisk,
        DroneFeatureRisk_v2,
        VasquezF1,
        VasquezF2,
        VasquezF3,
        Fahad,
        GoalConditionedFahad,
    )
    from featureExtractor.gridworld_featureExtractor import (
        FrontBackSide,
        LocalGlobal,
        OneHot,
        SocialNav,
        FrontBackSideSimple,
    )
    from featureExtractor.drone_feature_extractor import (
        DroneFeatureRisk_speed,
        DroneFeatureRisk_speedv2,
    )

    from featureExtractor.drone_feature_extractor import VasquezF1

    save_folder = None

    if not args.dont_save and not args.play:

        if not args.save_folder:
            print("Provide save folder.")
            exit()

        policy_net_dims = "-policy_net-"
        for dim in args.policy_net_hidden_dims:
            policy_net_dims += str(dim)
            policy_net_dims += "-"

        reward_net_dims = "-reward_net-"
        for dim in args.reward_net_hidden_dims:
            reward_net_dims += str(dim)
            reward_net_dims += "-"

        save_folder = (
            "./results/"
            + args.save_folder
            + st
            + args.feat_extractor
            + "-seed-"
            + str(args.seed)
            + policy_net_dims
            + reward_net_dims
            + "-total-ep-"
            + str(args.total_episodes)
            + "-max-ep-len-"
            + str(args.max_ep_length)
        )

        experiment_logger = Logger(save_folder, "experiment_info.txt")
        experiment_logger.log_header("Arguments for the experiment :")
        repo = git.Repo(search_parent_directories=True)
        experiment_logger.log_info({'From branch : ' : repo.active_branch.name})
        experiment_logger.log_info({'Commit number : ' : repo.head.object.hexsha})
        experiment_logger.log_info(vars(args))

    window_size = 9
    step_size = 2
    agent_width = 10
    obs_width = 10
    grid_size = 10

    feat_ext = None
    # initialize the feature extractor to be used
    if args.feat_extractor == "Onehot":
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == "SocialNav":
        feat_ext = SocialNav(fieldList=["agent_state", "goal_state"])
    if args.feat_extractor == "FrontBackSideSimple":
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == "LocalGlobal":
        feat_ext = LocalGlobal(
            window_size=11,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    if args.feat_extractor == "DroneFeatureSAM1":

        feat_ext = DroneFeatureSAM1(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureOccup":

        feat_ext = DroneFeatureOccup(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            window_size=window_size,
        )

    if args.feat_extractor == "DroneFeatureRisk":

        feat_ext = DroneFeatureRisk(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_v2":

        feat_ext = DroneFeatureRisk_v2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            thresh1=15,
            thresh2=30,
        )

    if args.feat_extractor == "DroneFeatureRisk_speed":

        feat_ext = DroneFeatureRisk_speed(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            return_tensor=False,
            thresh1=10,
            thresh2=15,
        )

    if args.feat_extractor == "DroneFeatureRisk_speedv2":

        feat_ext = DroneFeatureRisk_speedv2(
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
            grid_size=grid_size,
            show_agent_persp=False,
            return_tensor=False,
            thresh1=18,
            thresh2=30,
        )

    if args.feat_extractor == "VasquezF1":
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == "VasquezF2":
        feat_ext = VasquezF1(agent_width * 6, 0.5, 1.0)

    if args.feat_extractor == "VasquezF3":
        feat_ext = VasquezF3(agent_width)

    if args.feat_extractor == "Fahad":
        feat_ext = Fahad(36, 60, 0.5, 1.0)

    if args.feat_extractor == "GoalConditionedFahad":
        feat_ext = GoalConditionedFahad(36, 60, 0.5, 1.0)

    if feat_ext is None:
        print("Please enter proper feature extractor!")
        exit()
    # log feature extractor info

    if not args.dont_save and not args.play:

        experiment_logger.log_header("Parameters of the feature extractor :")
        experiment_logger.log_info(feat_ext.__dict__)

    # initialize the environment
    if args.replace_subject:
        replace_subject = True
    else:
        replace_subject = False

    env = GridWorldDrone(
        display=args.render,
        is_onehot=False,
        seed=args.seed,
        obstacles=None,
        show_trail=False,
        is_random=True,
        annotation_file=args.annotation_file,
        subject=args.subject,
        tick_speed=60,
        obs_width=10,
        step_size=step_size,
        agent_width=agent_width,
        replace_subject=replace_subject,
        segment_size=args.segment_size,
        external_control=True,
        step_reward=0.001,
        show_comparison=True,
        consider_heading=True,
        show_orientation=True,
        # rows=200, cols=200, width=grid_size)
        rows=576,
        cols=720,
        width=grid_size,
    )

    # env = gym.make('Acrobot-v1')
    # log environment info
    if not args.dont_save and not args.play:

        experiment_logger.log_header("Environment details :")
        experiment_logger.log_info(env.__dict__)

    # initialize RL

    if args.rl_method == "ActorCritic":
        model = ActorCritic(
            env,
            feat_extractor=feat_ext,
            gamma=1,
            log_interval=100,
            max_episode_length=args.max_ep_length,
            hidden_dims=args.policy_net_hidden_dims,
            save_folder=save_folder,
            lr=args.lr,
            entropy_coeff=args.entropy_coeff,
            max_episodes=args.total_episodes,
        )

    if args.rl_method == "SAC":

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        model = SoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            buffer_sample_size=args.replay_buffer_sample_size,
            entropy_tuning=True,
            play_interval=args.play_interval,
            entropy_target=args.entropy_target,
            gamma=args.gamma,
            learning_rate=args.lr,
        )

    if args.rl_method == "discrete_QSAC":

        replay_buffer = ReplayBuffer(args.replay_buffer_size)

        model = QSoftActorCritic(
            env,
            replay_buffer,
            feat_ext,
            buffer_sample_size=args.replay_buffer_sample_size,
            entropy_tuning=True,
            play_interval=args.play_interval,
            entropy_target=args.entropy_target,
            gamma=args.gamma,
            learning_rate=args.lr,
        )
    # log RL info
    if not args.dont_save and not args.play:

        experiment_logger.log_header("Details of the RL method :")
        experiment_logger.log_info(model.__dict__)

    if args.policy_path is not None:

        from debugtools import numericalSort

        policy_file_list = []
        reward_across_models = []
        # print(args.policy_path)
        if os.path.isfile(args.policy_path):
            policy_file_list.append(args.policy_path)
        if os.path.isdir(args.policy_path):
            policy_names = glob.glob(os.path.join(args.policy_path, "*.pt"))
            policy_file_list = sorted(policy_names, key=numericalSort)

        xaxis = np.arange(len(policy_file_list))

    if not args.play and not args.play_user:
        # no playing of any kind, so training

        if args.reward_path is None:

            if args.policy_path:
                model.policy.load(args.policy_path)

            if args.rl_method == "SAC" or args.rl_method == "discrete_QSAC":
                model.train(args.total_episodes, args.max_ep_length)

            else:
                model.train()

        else:
            from irlmethods.deep_maxent import RewardNet

            state_size = feat_ext.extract_features(env.reset()).shape[0]
            reward_net = RewardNet(state_size, args.reward_net_hidden_dims)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train(reward_net=reward_net)

        if not args.dont_save:
            model.policy.save(save_folder + "/policy-models/")

    if args.play:
        # env.tickSpeed = 15
        from debugtools import compile_results

        xaxis = []
        counter = 1
        plt.figure(0)
        avg_reward_list = []
        frac_good_run_list = []
        print(policy_file_list)
        for policy_file in policy_file_list:

            print("Playing for policy :", policy_file)
            model.policy.load(policy_file)
            policy_folder = policy_file.strip().split("/")[0:-2]
            save_folder = ""
            for p in policy_folder:
                save_folder = save_folder + p + "/"

            print("The final save folder ", save_folder)
            # env.tickSpeed = 10
            assert args.policy_path is not None, "pass a policy to play from!"
            if args.exp_trajectory_path is not None:
                from irlmethods.irlUtils import calculate_expert_svf

                expert_svf = calculate_expert_svf(
                    args.exp_trajectory_path,
                    max_time_steps=args.max_ep_length,
                    feature_extractor=feat_ext,
                    gamma=1,
                )
            # reward_across_models.append(model.generate_trajectory(args.num_trajs, args.render))
            if args.exp_trajectory_path is None:

                if args.dont_save:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs, args.render
                    )
                else:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs,
                        args.render,
                        store_raw=args.store_raw_states,
                        path=save_folder + "/agent_generated_trajectories/",
                    )
            else:

                if args.dont_save:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs, args.render, expert_svf=expert_svf
                    )
                else:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs,
                        args.render,
                        path=save_folder + "/agent_generated_trajectories/",
                        expert_svf=expert_svf,
                    )

            avg_reward, good_run_frac = compile_results(
                rewards, state_info, sub_info
            )

            avg_reward_list.append(avg_reward)
            frac_good_run_list.append(good_run_frac)
            plt.plot(avg_reward_list, c="r")
            plt.plot(frac_good_run_list, c="g")
            plt.draw()
        plt.show()

    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(
            args.num_trajs, args.render, path="./user_generated_trajectories/"
        )
def main():

    #####for the logger
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
    ###################

    args = parser.parse_args()

    if args.on_server:

        matplotlib.use('Agg')
        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    from matplotlib import pyplot as plt
    mp.set_start_method('spawn')

    from rlmethods.scott_SAC.SAC import SAC
    from envs.gridworld_drone import GridWorldDrone
    from featureExtractor.drone_feature_extractor import DroneFeatureSAM1, DroneFeatureOccup, DroneFeatureRisk, DroneFeatureRisk_v2
    from featureExtractor.gridworld_featureExtractor import FrontBackSide, LocalGlobal, OneHot, SocialNav, FrontBackSideSimple
    from featureExtractor.drone_feature_extractor import DroneFeatureRisk_speed

    save_folder = None

    if not args.dont_save and not args.play:

        if not args.save_folder:
            print('Provide save folder.')
            exit()

        policy_net_dims = '-policy_net-'
        for dim in args.policy_net_hidden_dims:
            policy_net_dims += str(dim)
            policy_net_dims += '-'

        reward_net_dims = '-reward_net-'
        for dim in args.reward_net_hidden_dims:
            reward_net_dims += str(dim)
            reward_net_dims += '-'

        save_folder = './results/'+ args.save_folder +st + args.feat_extractor + \
                      '-seed-'+str(args.seed) + policy_net_dims + reward_net_dims + \
                      '-total-ep-'+str(args.total_episodes)+'-max-ep-len-'+ str(args.max_ep_length)

        experiment_logger = Logger(save_folder, 'experiment_info.txt')
        experiment_logger.log_header('Arguments for the experiment :')
        experiment_logger.log_info(vars(args))

    window_size = 9
    step_size = 2
    agent_width = 10
    obs_width = 10
    grid_size = 10

    feat_ext = None
    #initialize the feature extractor to be used
    if args.feat_extractor == 'Onehot':
        feat_ext = OneHot(grid_rows=10, grid_cols=10)
    if args.feat_extractor == 'SocialNav':
        feat_ext = SocialNav(fieldList=['agent_state', 'goal_state'])
    if args.feat_extractor == 'FrontBackSideSimple':
        feat_ext = FrontBackSideSimple(
            thresh1=1,
            thresh2=2,
            thresh3=3,
            thresh4=4,
            step_size=step_size,
            agent_width=agent_width,
            obs_width=obs_width,
        )

    if args.feat_extractor == 'LocalGlobal':
        feat_ext = LocalGlobal(
            window_size=11,
            grid_size=grid_size,
            agent_width=agent_width,
            obs_width=obs_width,
            step_size=step_size,
        )

    if args.feat_extractor == 'DroneFeatureSAM1':

        feat_ext = DroneFeatureSAM1(agent_width=agent_width,
                                    obs_width=obs_width,
                                    step_size=step_size,
                                    grid_size=grid_size,
                                    thresh1=15,
                                    thresh2=30)

    if args.feat_extractor == 'DroneFeatureOccup':

        feat_ext = DroneFeatureOccup(agent_width=agent_width,
                                     obs_width=obs_width,
                                     step_size=step_size,
                                     grid_size=grid_size,
                                     window_size=window_size)

    if args.feat_extractor == 'DroneFeatureRisk':

        feat_ext = DroneFeatureRisk(agent_width=agent_width,
                                    obs_width=obs_width,
                                    step_size=step_size,
                                    grid_size=grid_size,
                                    show_agent_persp=True,
                                    thresh1=15,
                                    thresh2=30)

    if args.feat_extractor == 'DroneFeatureRisk_v2':

        feat_ext = DroneFeatureRisk_v2(agent_width=agent_width,
                                       obs_width=obs_width,
                                       step_size=step_size,
                                       grid_size=grid_size,
                                       show_agent_persp=True,
                                       thresh1=15,
                                       thresh2=30)

    if args.feat_extractor == 'DroneFeatureRisk_speed':

        feat_ext = DroneFeatureRisk_speed(agent_width=agent_width,
                                          obs_width=obs_width,
                                          step_size=step_size,
                                          grid_size=grid_size,
                                          show_agent_persp=False,
                                          thresh1=10,
                                          thresh2=15)

    if feat_ext is None:
        print('Please enter proper feature extractor!')
        exit()
    #log feature extractor info

    if not args.dont_save and not args.play:

        experiment_logger.log_header('Parameters of the feature extractor :')
        experiment_logger.log_info(feat_ext.__dict__)

    #initialize the environment
    if args.replace_subject:
        replace_subject = True
    else:
        replace_subject = False

    env = GridWorldDrone(
        display=args.render,
        is_onehot=False,
        seed=args.seed,
        obstacles=None,
        show_trail=False,
        is_random=True,
        annotation_file=args.annotation_file,
        subject=args.subject,
        tick_speed=60,
        obs_width=10,
        step_size=step_size,
        agent_width=agent_width,
        replace_subject=replace_subject,
        segment_size=args.segment_size,
        external_control=True,
        step_reward=0.001,
        show_comparison=True,
        consider_heading=True,
        show_orientation=True,

        #rows=200, cols=300, width=grid_size)
        rows=576,
        cols=720,
        width=grid_size)

    #log environment info
    if not args.dont_save and not args.play:

        experiment_logger.log_header('Environment details :')
        experiment_logger.log_info(env.__dict__)

    #initialize RL
    model = SAC(env,
                feat_extractor=feat_ext,
                log_interval=100,
                max_ep_length=args.max_ep_length,
                hidden_dims=args.policy_net_hidden_dims,
                save_folder=save_folder,
                max_episodes=args.total_episodes)

    #log RL info
    if not args.dont_save and not args.play:

        experiment_logger.log_header('Details of the RL method :')
        experiment_logger.log_info(model.__dict__)

    if args.policy_path is not None:

        from debugtools import numericalSort
        policy_file_list = []
        reward_across_models = []
        if os.path.isfile(args.policy_path):
            policy_file_list.append(args.policy_path)
        if os.path.isdir(args.policy_path):
            policy_names = glob.glob(os.path.join(args.policy_path, '*.pt'))
            policy_file_list = sorted(policy_names, key=numericalSort)

        xaxis = np.arange(len(policy_file_list))

    if not args.play and not args.play_user:
        #no playing of any kind, so training

        if args.reward_path is None:
            if args.policy_path:
                model.policy.load(args.policy_path)
            model.train()
        else:
            from irlmethods.deep_maxent import RewardNet
            state_size = feat_ext.extract_features(env.reset()).shape[0]
            reward_net = RewardNet(state_size, args.reward_net_hidden_dims)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train(reward_net=reward_net)

        if not args.dont_save:
            model.policy.save(save_folder + '/policy-models/')

    if args.play:
        #env.tickSpeed = 15
        from debugtools import compile_results
        xaxis = []
        counter = 1
        plt.figure(0)
        avg_reward_list = []
        frac_good_run_list = []
        for policy_file in policy_file_list:

            print('Playing for policy :', policy_file)
            model.policy.load(policy_file)
            policy_folder = policy_file.strip().split('/')[0:-2]
            save_folder = ''
            for p in policy_folder:
                save_folder = save_folder + p + '/'

            print('The final save folder ', save_folder)
            #env.tickSpeed = 10
            assert args.policy_path is not None, 'pass a policy to play from!'
            if args.exp_trajectory_path is not None:
                from irlmethods.irlUtils import calculate_expert_svf
                expert_svf = calculate_expert_svf(
                    args.exp_trajectory_path,
                    max_time_steps=args.max_ep_length,
                    feature_extractor=feat_ext,
                    gamma=1)
            #reward_across_models.append(model.generate_trajectory(args.num_trajs, args.render))
            if args.exp_trajectory_path is None:

                if args.dont_save:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs, args.render)
                else:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs,
                        args.render,
                        path=save_folder + '/agent_generated_trajectories/')
            else:

                if args.dont_save:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs, args.render, expert_svf=expert_svf)
                else:
                    rewards, state_info, sub_info = model.generate_trajectory(
                        args.num_trajs,
                        args.render,
                        path=save_folder + '/agent_generated_trajectories/',
                        expert_svf=expert_svf)

            avg_reward, good_run_frac = compile_results(
                rewards, state_info, sub_info)
            #pdb.set_trace()
            avg_reward_list.append(avg_reward)
            frac_good_run_list.append(good_run_frac)
            plt.plot(avg_reward_list, c='r')
            plt.plot(frac_good_run_list, c='g')
            plt.draw()
        plt.show()

    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(args.num_trajs,
                                       args.render,
                                       path='./user_generated_trajectories/')
Beispiel #12
0
def main():

    args = parser.parse_args()

    utils.seed_all(args.seed)

    if args.on_server:
        # matplotlib without monitor
        matplotlib.use('Agg')

        # pygame without monitor
        os.environ['SDL_VIDEODRIVER'] = 'dummy'
    from matplotlib import pyplot as plt

    save_folder = None
    if not args.dont_save:
        save_folder = './results/'+ args.save_folder
        experiment_logger = Logger(save_folder,'experiment_info.txt')

        experiment_logger.log_header('Arguments for the experiment :')
        experiment_logger.log_info(vars(args))
    

    mp.set_start_method('spawn')

    if args.render:
        from envs.gridworld import GridWorld
    else:
        from envs.gridworld_clockless import GridWorldClockless as GridWorld
        

    if args.feat_extractor=='MCFeatures':
        feat_ext = MCFeatures(args.state_discretization[0], args.state_discretization[1]) 

    elif args.feat_extractor=='MCFeaturesOnehot':
        feat_ext = MCFeaturesOnehot(args.state_discretization[0], args.state_discretization[1])

    else:
        print('Enter proper feature extractor value.')
        exit()

    if not args.dont_save:
        experiment_logger.log_header('Parameters of the feature extractor :')
        experiment_logger.log_info(feat_ext.__dict__)

    '''
    np.asarray([2,2]),np.asarray([7,4]),np.asarray([3,5]),
                                np.asarray([5,2]),np.asarray([8,3]),np.asarray([7,5]),
                                np.asarray([3,3]),np.asarray([3,7]),np.asarray([5,7])
                                
    env = GridWorld(display=args.render, is_onehot= False,is_random=True,
                    rows=100, agent_width=agent_width,step_size=step_size,
                    obs_width=obs_width,width=grid_size,
                    cols=100,
                    seed=7,
                    buffer_from_obs=0,
                    obstacles=3,
                                
                    goal_state=np.asarray([5,5]))
    '''
    env = gym.make('MountainCar-v0')
    env = env.unwrapped

    if not args.dont_save:

        experiment_logger.log_header('Environment details :')
        experiment_logger.log_info(env.__dict__)


    model = ActorCritic(env, feat_extractor=feat_ext,  gamma=0.99, plot_loss=False,
                        log_interval=10, max_ep_length=300, hidden_dims=args.policy_net_hidden_dims,
                        max_episodes=30, save_folder=save_folder)

    if not args.dont_save:

        experiment_logger.log_header('Details of the RL method :')
        experiment_logger.log_info(model.__dict__)
    
    #pdb.set_trace()

    if args.policy_path is not None:
        policy_file_list =  []
        reward_across_models = []
        if os.path.isfile(args.policy_path):
            policy_file_list.append(args.policy_path)
        if os.path.isdir(args.policy_path):
            policy_names = glob.glob(os.path.join(args.policy_path, '*.pt'))
            policy_file_list = sorted(policy_names, key=numericalSort)
        
        xaxis = np.arange(len(policy_file_list))

    if not args.play and not args.play_user:
        if args.reward_path is None:
            model.train_mp(n_jobs=4)
        else:

            from irlmethods.deep_maxent import RewardNet
            state_size = feat_ext.state_rep_size
            reward_net = RewardNet(state_size, args.policy_net_hidden_dims)
            reward_net.load(args.reward_path)
            print(next(reward_net.parameters()).is_cuda)
            model.train_mp(reward_net = reward_net,n_jobs = 4)

        if not args.dont_save:  
            model.policy.save(save_folder+'/policy/')

    if args.play:
        xaxis = []
        counter = 1
        print(policy_file_list)
        for policy_file in policy_file_list:

            model.policy.load(policy_file)

            env.tickSpeed = 15
            assert args.policy_path is not None, 'pass a policy to play from!'

            reward_across_models.append(model.generate_trajectory(args.num_trajs, args.render))

        #plotting the 2d list

            xaxis.append(counter)
            counter += 1
            reward_across_models_np = np.array(reward_across_models)
            mean_rewards = np.mean(reward_across_models_np, axis=1)
            std_rewards = np.std(reward_across_models_np, axis=1)
            plt.plot(xaxis,mean_rewards,color = 'r',label='IRL trained agent')
            plt.fill_between(xaxis , mean_rewards-std_rewards , 
                        mean_rewards+std_rewards, alpha = 0.5, facecolor = 'r')
            plt.draw()
            plt.pause(0.001)
            '''
            print('RAM usage :')
            display_memory_usage(process.memory_info().rss)
            print('GPU usage :')
            display_memory_usage(torch.cuda.memory_allocated())
            torch.cuda.empty_cache()
            display_memory_usage(torch.cuda.memory_allocated())
            '''
            #plt.show()
        plt.show()
    if args.play_user:
        env.tickSpeed = 200

        model.generate_trajectory_user(args.num_trajs, './trajs/ac_gridworld_user/')