def setup_with_config_file(self, config_file, **kwargs):
        """
        :param config_file: address to config file which contains scenario parameters
        :param kwargs: you can override the parameters in the config file , by passing them directly
        :return:
        """
        with open(config_file) as stream:
            config = yaml.load(stream, Loader=yaml.FullLoader)
            biped_mode = config['General']['biped_mode']
            # opentraj_root = config['Dataset']['OpenTrajRoot']
            robot_replacement_id = config['Dataset']['RobotId']
            obstacles = config['Dataset']['Obstacles']
            fps = config['Dataset']['fps']
            annotation_file = config['Dataset']['Annotation']
            title = config['Dataset']['Title'] if 'Title' in config[
                'Dataset'] else ''
            parser_type = config['Dataset']['Parser']
            if parser_type == "ParserETH":
                dataset = load_eth(annotation_file,
                                   title=title,
                                   use_kalman=False)
                dataset.interpolate_frames(inplace=True)
            elif parser_type == "ParserHermes":
                dataset = load_bottleneck(annotation_file, title=title)
            self.video_files = config['Dataset']['Video']
            world_boundary = []
            if 'WorldBoundary' in config['Dataset']:
                world_x_min = config['Dataset']['WorldBoundary']['x_min']
                world_x_max = config['Dataset']['WorldBoundary']['x_max']
                world_y_min = config['Dataset']['WorldBoundary']['y_min']
                world_y_max = config['Dataset']['WorldBoundary']['y_max']
                world_boundary = [[world_x_min, world_x_max],
                                  [world_y_min, world_y_max]]

        # override parameters by direct arguments
        self.title = kwargs.get("title", title)
        dataset = kwargs.get("dataset", dataset)
        fps = kwargs.get("fps", fps)
        robot_replacement_id = kwargs.get("robot_id", robot_replacement_id)
        biped_mode = kwargs.get("biped_mode", biped_mode)

        self.setup(dataset=dataset,
                   fps=fps,
                   robot_id=robot_replacement_id,
                   biped_mode=biped_mode,
                   obstacles=obstacles,
                   world_boundary=world_boundary)
Пример #2
0
def run(module_directory, args):

    print("\n\n-----------------------------\nRunning test metrics\n-----------------------------")

    eth_dataset = load_eth(module_directory + '/tests/toy trajectories/ETH/seq_eth/obsmat.txt',
                           args.separator)
    all_trajs = eth_dataset.get_trajectories()

    for traj in all_trajs:
        speed = motion.speed(traj)
        p_len = path_length.path_length(traj)
        p_eff = path_length.path_efficiency(traj)

    all_frames = eth_dataset.get_frames()

    # calc pcf on a dataset
    pcf_accum = []
    pcf_range = np.arange(0.2,  # starting radius
                          8,    # end radius
                          0.2)  # step size

    for frame in all_frames:
        pcf_values_t = pcf.pcf(frame[['pos_x', 'pos_y']],
                               list(pcf_range), sigma=0.25)
        if not len(pcf_accum):
            pcf_accum = pcf_values_t
        else:
            pcf_accum += pcf_values_t
    avg_pcf = pcf_accum / len(all_frames)
    print('average pcf = ', avg_pcf)

    # social spaces
    social_spaces = distance.social_space(eth_dataset.data)
    print(social_spaces.values())

    plt.hist(social_spaces.values(), bins=np.arange(0, 5, 0.2), density=True)
    plt.show()

    # traj.plot(y='acc x')
    # ax = plt.gca()
    # for data in np.unique(traj.index.get_level_values('agent id')):
    #     random_color = np.random.rand(3,)
    #     traj[traj.index.get_level_values('agent id') == data].plot(y='acc x', c = random_color, ax=ax)
    #
    # plt.show()
    # print(np.unique(traj.index.get_level_values('agent id')))
    print("\n\n-----------------------------\nTest metrics done\n-----------------------------")
Пример #3
0
    def __init__(self, mode, trajectory_interval, context_length,
                 agent_buffer_size):
        """
        mode:                       str         ['train', 'val', 'test']
        trajectory_interval:        int         time window for a batch data, each frame is 0.4 sec
        context_length:             int         known past trajectories
        agent_buffer_size:          int         max number of agents allowed in one data item
        """
        self.mode = mode
        assert trajectory_interval > context_length
        self.trajectory_interval = trajectory_interval
        self.agent_buffer_size = agent_buffer_size
        self.context_length = context_length
        annot_file = os.path.join('datasets', 'ETH/seq_eth/obsmat.txt')
        traj_dataset = load_eth(annot_file)
        total = traj_dataset.data['timestamp'].last_valid_index()
        total_time = traj_dataset.data['timestamp'][total] - traj_dataset.data[
            'timestamp'][0]
        # split datasets based on ratio of total time
        # checked labels for the dataset. all are pedestrians.
        if mode == 'train':
            start_time = traj_dataset.data['timestamp'][0]
            end_time = traj_dataset.data['timestamp'][0] + total_time * 0.8
        elif mode == 'val':
            start_time = traj_dataset.data['timestamp'][0] + total_time * 0.8
            end_time = traj_dataset.data['timestamp'][0] + total_time * 0.9
        else:
            start_time = traj_dataset.data['timestamp'][0] + total_time * 0.8
            end_time = traj_dataset.data['timestamp'][0] + total_time
        self.traj_dataset = self.extract_dataset(traj_dataset, start_time,
                                                 end_time)
        self.traj_dataset.data.reset_index(inplace=True)

        if mode != 'train':
            self.start_indices = np.arange(
                0,
                len(self.traj_dataset.data) - self.trajectory_interval,
                self.trajectory_interval)
Пример #4
0
def get_datasets(opentraj_root, dataset_names):
    datasets = {}

    # Make a temp dir to store and load trajdatasets (no postprocess anymore)
    trajdataset_dir = os.path.join(opentraj_root, 'trajdatasets__temp')
    if not os.path.exists(trajdataset_dir): os.makedirs(trajdataset_dir)

    for dataset_name in dataset_names:
        dataset_h5_file = os.path.join(trajdataset_dir, dataset_name + '.h5')
        if os.path.exists(dataset_h5_file):
            datasets[dataset_name] = TrajDataset()
            datasets[dataset_name].data = pd.read_pickle(dataset_h5_file)
            datasets[dataset_name].title = dataset_name
            print("loading dataset from pre-processed file: ", dataset_h5_file)
            continue

        print("Loading dataset:", dataset_name)

        # ========== ETH ==============
        if 'eth-univ' == dataset_name.lower():
            eth_univ_root = os.path.join(opentraj_root, 'datasets/ETH/seq_eth/obsmat.txt')
            datasets[dataset_name] = load_eth(eth_univ_root, title=dataset_name, scene_id='Univ',
                                              use_kalman=True)

        elif 'eth-hotel' == dataset_name.lower():
            eth_hotel_root = os.path.join(opentraj_root, 'datasets/ETH/seq_hotel/obsmat.txt')
            datasets[dataset_name] = load_eth(eth_hotel_root, title=dataset_name, scene_id='Hotel')
        # ******************************

        # ========== UCY ==============
        elif 'ucy-zara' == dataset_name.lower():  # all 3 zara sequences
            zara01_dir = os.path.join(opentraj_root, 'datasets/UCY/zara01')
            zara02_dir = os.path.join(opentraj_root, 'datasets/UCY/zara02')
            zara03_dir = os.path.join(opentraj_root, 'datasets/UCY/zara03')
            zara_01_ds = load_crowds(zara01_dir + '/annotation.vsp',
                                     homog_file=zara01_dir + '/H.txt',
                                     scene_id='1', use_kalman=True)
            zara_02_ds = load_crowds(zara02_dir + '/annotation.vsp',
                                     homog_file=zara02_dir + '/H.txt',
                                     scene_id='2', use_kalman=True)
            zara_03_ds = load_crowds(zara03_dir + '/annotation.vsp',
                                     homog_file=zara03_dir + '/H.txt',
                                     scene_id='3', use_kalman=True)
            datasets[dataset_name] = merge_datasets([zara_01_ds, zara_02_ds, zara_03_ds], dataset_name)

        elif 'ucy-univ' == dataset_name.lower():  # all 3 sequences
            st001_dir = os.path.join(opentraj_root, 'datasets/UCY/students01')
            st003_dir = os.path.join(opentraj_root, 'datasets/UCY/students03')
            uni_ex_dir = os.path.join(opentraj_root, 'datasets/UCY/uni_examples')
            #st001_ds = load_Crowds(st001_dir + '/students001.txt',homog_file=st001_dir + '/H.txt',scene_id='1',use_kalman=True)

            st001_ds = load_crowds(st001_dir + '/annotation.vsp',
                                   homog_file=st003_dir + '/H.txt',
                                   scene_id='1', use_kalman=True) 

            st003_ds = load_crowds(st003_dir + '/annotation.vsp',
                                   homog_file=st003_dir + '/H.txt',
                                   scene_id='3', use_kalman=True)
            uni_ex_ds = load_crowds(uni_ex_dir + '/annotation.vsp',
                                    homog_file=st003_dir + '/H.txt',
                                    scene_id='ex', use_kalman=True)
            datasets[dataset_name] = merge_datasets([st001_ds, st003_ds, uni_ex_ds], dataset_name)

        elif 'ucy-zara1' == dataset_name.lower():
            zara01_root = os.path.join(opentraj_root, 'datasets/UCY/zara01/obsmat.txt')
            datasets[dataset_name] = load_eth(zara01_root, title=dataset_name)

        elif 'ucy-zara2' == dataset_name.lower():
            zara02_root = os.path.join(opentraj_root, 'datasets/UCY/zara02/obsmat.txt')
            datasets[dataset_name] = load_eth(zara02_root, title=dataset_name)

        elif 'ucy-univ3' == dataset_name.lower():
            students03_root = os.path.join(opentraj_root, 'datasets/UCY/students03/obsmat.txt')
            datasets[dataset_name] = load_eth(students03_root, title=dataset_name)
        # ******************************

        # ========== HERMES ==============
        elif 'bn' in dataset_name.lower().split('-'):
            [_, exp_flow, cor_size] = dataset_name.split('-')
            if exp_flow == '1d' and cor_size == 'w180':   # 'Bottleneck-udf-180'
                bottleneck_path = os.path.join(opentraj_root, 'datasets/HERMES/Corridor-1D/uo-180-180-120.txt')
            elif exp_flow == '2d' and cor_size == 'w160':  # 'Bottleneck-bdf-160'
                bottleneck_path = os.path.join(opentraj_root, "datasets/HERMES/Corridor-2D/bo-360-160-160.txt")
            else:
                "Unknown Bottleneck dataset!"
                continue
            datasets[dataset_name] = load_bottleneck(bottleneck_path, sampling_rate=6,
                                                     use_kalman=True,
                                                     title=dataset_name)
        # ******************************

        # ========== PETS ==============
        elif 'pets-s2l1' == dataset_name.lower():
            pets_root = os.path.join(opentraj_root, 'datasets/PETS-2009/data')
            datasets[dataset_name] = load_pets(os.path.join(pets_root, 'annotations/PETS2009-S2L1.xml'),  #Pat:was PETS2009-S2L2
                                               calib_path=os.path.join(pets_root, 'calibration/View_001.xml'),
                                               sampling_rate=2,
                                               title=dataset_name)
        # ******************************

        # ========== GC ==============
        elif 'gc' == dataset_name.lower():
            gc_root = os.path.join(opentraj_root, 'datasets/GC/Annotation')
            datasets[dataset_name] = load_gcs(gc_root, world_coord=True, title=dataset_name,
                                              use_kalman=True
                                              )
        # ******************************

        # ========== InD ==============
        elif 'ind-1' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(7, 17 + 1)  # location_id = 1
            ind_1_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' %id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_1_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_1_datasets, new_title=dataset_name)

        elif 'ind-2' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(18, 29 + 1)  # location_id = 1
            ind_2_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_2_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_2_datasets, new_title=dataset_name)

        elif 'ind-3' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(30, 32 + 1)  # location_id = 1
            ind_3_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_3_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_3_datasets, new_title=dataset_name)

        elif 'ind-4' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(0, 6 + 1)  # location_id = 1
            ind_4_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_4_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_4_datasets, new_title=dataset_name)
        # ******************************

        # ========== KITTI ==============
        elif 'kitti' == dataset_name.lower():
            kitti_root = os.path.join(opentraj_root, 'datasets/KITTI/data')
            datasets[dataset_name] = load_kitti(kitti_root, title=dataset_name,
                                                use_kalman=True,
                                                sampling_rate=1)  # FixMe: apparently original_fps = 2.5
        # ******************************

        # ========== L-CAS ==============
        elif 'lcas-minerva' == dataset_name.lower():
            lcas_root = os.path.join(opentraj_root, 'datasets/L-CAS/data')
            datasets[dataset_name] = load_lcas(lcas_root, title=dataset_name,
                                               use_kalman=True,
                                               sampling_rate=1)  # FixMe: apparently original_fps = 2.5
        # ******************************

        # ========== Wild-Track ==============
        elif 'wildtrack' == dataset_name.lower():
            wildtrack_root = os.path.join(opentraj_root, 'datasets/Wild-Track/annotations_positions')
            datasets[dataset_name] = load_wildtrack(wildtrack_root, title=dataset_name,
                                                    use_kalman=True,
                                                    sampling_rate=1)  # original_annot_framerate=2
        # ******************************

        # ========== Edinburgh ==============
        elif 'edinburgh' in dataset_name.lower():
            edinburgh_dir = os.path.join(opentraj_root, 'datasets/Edinburgh/annotations')
            if 'edinburgh' == dataset_name.lower():   # all files
                # edinburgh_path = edinburgh_dir
                # select 1-10 Sep
                Ed_selected_days = ['01Sep', '02Sep', '04Sep', '05Sep', '06Sep', '10Sep']
                partial_ds = []
                for selected_day in Ed_selected_days:
                    edinburgh_path = os.path.join(edinburgh_dir, 'tracks.%s.txt' % selected_day)
                    partial_ds.append(load_edinburgh(edinburgh_path, title=dataset_name,
                                                     use_kalman=True, scene_id=selected_day,
                                                     sampling_rate=4)  # original_framerate=9
                                      )
                merge_datasets(partial_ds)

            else:
                seq_date = dataset_name.split('-')[1]
                edinburgh_path = os.path.join(edinburgh_dir, 'tracks.%s.txt' %seq_date)
            datasets[dataset_name] = load_edinburgh(edinburgh_path, title=dataset_name,
                                                    use_kalman=True,
                                                    sampling_rate=4)  # original_framerate=9
        # ******************************

        # ========== Town-Center ==============
        elif 'towncenter' == dataset_name.lower():
            towncenter_root = os.path.join(opentraj_root, 'datasets/Town-Center')
            # FixMe: might need Kalman Smoother
            datasets[dataset_name] = load_town_center(towncenter_root + '/TownCentre-groundtruth-top.txt',
                                                      calib_path=towncenter_root + '/TownCentre-calibration-ci.txt',
                                                      title=dataset_name,
                                                      use_kalman=True,
                                                      sampling_rate=10)  # original_framerate=25
            # ******************************

        # ========== SDD ==============
        elif 'sdd-' in dataset_name.lower():
            scene_name = dataset_name.split('-')[1]
            sdd_root = os.path.join(opentraj_root, 'datasets', 'SDD')
            annot_files_sdd = sorted(glob.glob(sdd_root + '/' + scene_name + "/**/annotations.txt", recursive=True))

            sdd_scales_yaml_file = os.path.join(sdd_root, 'estimated_scales.yaml')
            with open(sdd_scales_yaml_file, 'r') as f:
                scales_yaml_content = yaml.load(f, Loader=yaml.FullLoader)

            scene_datasets = []
            for file_name in annot_files_sdd:
                filename_parts = file_name.split('/')
                scene_name = filename_parts[-3]
                scene_video_id = filename_parts[-2]
                scale = scales_yaml_content[scene_name][scene_video_id]['scale']
                sdd_dataset_i = load_sdd(file_name, scale=scale,
                                         scene_id=scene_name + scene_video_id.replace('video', ''),
                                         drop_lost_frames=False,
                                         use_kalman=True,
                                         sampling_rate=12)  # original_framerate=30
                scene_datasets.append(sdd_dataset_i)
            scene_dataset = merge_datasets(scene_datasets, dataset_name)
            datasets[dataset_name] = scene_dataset
        # ******************************

        else:
            print("Error! invalid dataset name:", dataset_name)

        # save to h5 file
        datasets[dataset_name].data.to_pickle(dataset_h5_file)
        print("saving dataset into pre-processed file: ", dataset_h5_file)

    return datasets
Пример #5
0
        '--background',
        '--b',
        default='image',
        choices=['image', 'video'],
        help='select background type. video does not exist for all datasets,'
        'you might need to download it first.'
        '(default: "image")')

    args = argparser.parse_args()
    opentraj_root = args.data_root
    traj_dataset = None

    # #============================ ETH =================================
    if args.dataset == 'eth':
        annot_file = os.path.join(opentraj_root, 'ETH/seq_eth/obsmat.txt')
        traj_dataset = load_eth(annot_file)
        homog_file = os.path.join(opentraj_root, 'ETH/seq_eth/H.txt')
        if args.background == 'image':
            media_file = os.path.join(opentraj_root,
                                      'ETH/seq_eth/reference.png')
        elif args.background == 'video':
            media_file = os.path.join(opentraj_root, 'ETH/seq_eth/video.avi')
        else:
            error_msg('background type is invalid')

    elif args.dataset == 'hotel':
        annot_file = os.path.join(opentraj_root, 'ETH/seq_hotel/obsmat.txt')
        traj_dataset = load_eth(annot_file)
        homog_file = os.path.join(opentraj_root, 'ETH/seq_hotel/H.txt')
        # media_file = os.path.join(opentraj_root, 'ETH/seq_hotel/reference.png')
        media_file = os.path.join(opentraj_root, 'ETH/seq_hotel/video.avi')
Пример #6
0
    # p(Xp|Xn)  = p(Xp, Xn) / p(Xn) = p(X) / p(Xn)

    euclidean_distances(trajlets)

    cE = 0
    for traj_i in trajlets:
        xi = traj_i.whole
        xoi = traj_i.obsv
        xpi = traj_i.pred
        for traj_j in trajlets:
            xj = traj_j.whole
            xoj = traj_j.obsv
            xpj = traj_j.pred
            # K_xi_xj = np.exp(-norm(x1 - x2) ** 2 / 2)

    return cE


if __name__ == "__main__":
    from toolkit.loaders.loader_eth import load_eth
    from toolkit.core.trajlet import split_trajectories, to_numpy

    eth_dataset = load_eth(
        "/home/cyrus/workspace2/OpenTraj/datasets/ETH/seq_eth/obsmat.txt")
    eth_trajs = eth_dataset.get_trajectories("pedestrian")
    eth_trajlets = split_trajectories(eth_trajs)
    to_numpy(eth_trajlets)

    eth_trajlets = np.stack(eth_trajlets)
    conditional_entropy(eth_trajlets)
Пример #7
0
                "pos_x", "pos_y", "vel_x", "vel_y", "timestamp"
            ]].to_numpy()
            trl_pred_np = trl_pred[[
                "pos_x", "pos_y", "vel_x", "vel_y", "timestamp"
            ]].to_numpy()
            trl_np_list.append([trl_obsv_np, trl_pred_np])
        trajlets = np.stack(trl_np_list)

    return trajlets


# test
if __name__ == "__main__":
    from toolkit.loaders.loader_eth import load_eth
    import sys, os
    opentraj_root = sys.argv[1]
    # test_dataset = loadETH(os.path.join(opentraj_root, "datasets/ETH/seq_eth/obsmat.txt"))
    test_dataset = load_eth(
        os.path.join(opentraj_root, "datasets/ETH/seq_hotel/obsmat.txt"))
    trajs = test_dataset.get_trajectories()
    trajlets_4_8s = split_trajectories(trajs, length=4.8, to_numpy=True)
    trajlets_8s = split_trajectories(trajs, length=8, to_numpy=True)
    paired_trajlets_4_8s = split_trajectories_paired(trajs,
                                                     length=4.8,
                                                     to_numpy=True)

    print("Test hotel dataset\n******************")
    print("trajlets_4_8s.shape =", trajlets_4_8s.shape)
    print("trajlets_8s.shape =", trajlets_8s.shape)
    print("paired_trajlets_4_8s.shape =", paired_trajlets_4_8s.shape)
Пример #8
0
def run(path, args):

    print("\n-----------------------------\nRunning test load\n-----------------------------")
    if 'eth/' in path.lower():
        print("[Javad]: Directly reading ETH Dataset (seq_eth):")
        traj_dataset = load_eth(path)
        all_trajs = traj_dataset.get_trajectories()
        all_frames = traj_dataset.get_frames()

    if '/sdd' in path.lower():
        if os.path.isdir(path):
            traj_dataset = load_sdd_dir(path)
        else:
            traj_dataset = load_sdd(path)
        trajs = traj_dataset.get_trajectories()
        print("total number of trajectories = ", len(trajs))

    if 'gc/' in path.lower():
        kwargs = {}
        for arg in args:
            if 'homog_file=' in arg:
                kwargs['homog_file'] = arg.replace("homog_file=", "")
        gc_dataset = load_gcs(path, **kwargs)
        trajs = gc_dataset.get_trajectories()
        print("GC: number of trajs = ", len(trajs))

    if 'pets-2009/' in path.lower():
        kwargs = {}
        for arg in args:
            if 'calib_path=' in arg:
                kwargs['calib_path'] = arg.replace("calib_path=", "")
        load_pets(path, **kwargs)

    if 'ind/' in path.lower():
        # Test the InD Dataset
        traj_dataset = load_ind(path)
        all_trajs = traj_dataset.get_trajectories()
        print('------------------------')
        print('First trajectory (InD)')
        print('------------------------')
        print(all_trajs[0])
        all_frames = traj_dataset.get_frames()

    if 'wild-track/' in path.lower():
        traj_dataset = load_wildtrack(path)

    if 'town' in path.lower():
        # Construct arguments dictionary
        kwargs = {}
        for arg in args:
            if 'calib_path=' in arg:
                kwargs['calib_path'] = arg.replace("calib_path=", "")

        # Test the Town Center Dataset
        traj_dataset = load_town_center(path, **kwargs)
        all_trajs = traj_dataset.get_trajectories()
        print('------------------------')
        print('First trajectory (Town Center)')
        print('------------------------')
        print(all_trajs[0])
        all_frames = traj_dataset.get_frames()

    if 'chaos' in path.lower():
        print("\n")
        print("ChAOS Style :")
        print(loaders.loadChAOS(path, args.separator))

        print("\n\n-----------------------------\nTest load done\n-----------------------------")
Пример #9
0
        pred_poss = pred_dps
        for i, id in enumerate(self.cur_agent_ids):
            pred_dps[:, i] += self.obsv_history[id][-1]
        return pred_poss


# test
if __name__ == "__main__":
    import matplotlib.pyplot as plt
    import matplotlib
    matplotlib.use("TkAgg")
    np.set_printoptions(precision=3)

    opentraj_root = "/home/cyrus/workspace2/OpenTraj"
    annot_file = os.path.join(opentraj_root, 'datasets/ETH/seq_eth/obsmat.txt')
    dataset = load_eth(annot_file, title="ETH-Univ")
    obstacles = [[[0, 0], [8, 0]]]
    frames = dataset.get_frames()
    all_agent_ids = dataset.data["agent_id"].unique()
    min_x, max_x = dataset.data["pos_x"].min() * 1.2, dataset.data["pos_x"].max() * 1.2
    min_y, max_y = dataset.data["pos_y"].min() * 1.2, dataset.data["pos_y"].max() * 1.2

    def last_loc(df: pd.DataFrame):
        return df[["pos_x", "pos_y"]].iloc[-1].to_numpy()

    agent_goals = dataset.data.groupby("agent_id").apply(last_loc)
    max_num_concurrent_agents = max([len(fr) for fr in frames])

    predictor_A = TrajPredictor(16, obstacles, "rvo2", "crowdbag")
    predictor_B = TrajPredictor(16, obstacles, "PowerLaw", "umans")