Exemplo n.º 1
0
def load_sdd_dir(path: str, **kwargs):
    search_filter_str = "**/annotations.txt"
    if not path.endswith("/"):
        search_filter_str = "/" + search_filter_str
    files_list = sorted(glob.glob(path + search_filter_str, recursive=True))
    scales_yaml_file = os.path.join(path, 'estimated_scales.yaml')
    with open(scales_yaml_file, 'r') as f:
        scales_yaml_content = yaml.load(f, Loader=yaml.FullLoader)

    partial_datasets = []
    for file in files_list:
        dir_names = file.split('/')
        scene_name = dir_names[-3]
        scene_video_id = dir_names[-2]
        scale = scales_yaml_content[scene_name][scene_video_id]['scale']

        partial_dataset = load_sdd(file,
                                   scale=scale,
                                   scene_id=scene_name +
                                   scene_video_id.replace('video', ''))
        partial_datasets.append(partial_dataset.data)

    traj_dataset = TrajDataset()
    traj_dataset.data = pd.concat(partial_datasets)

    fps = 30
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    return traj_dataset
Exemplo n.º 2
0
def ttc(all_frames, name, trajlets):
    all_ttc = []
    Rp = 0.33  #assume pedestrians radius is 0.33
    new_frames = []
    for frame in all_frames:
        frame.reset_index(inplace=True)
        #if there is only one pedestrian at that time, or encounter invalid vel value
        if len(frame.index) < 2 or frame['vel_x'].isnull().values.any(
        ) or frame['vel_y'].isnull().values.any():
            continue

        #calculate ttc for each pair
        x_4d = np.stack((frame.pos_x.values, frame.pos_y.values,
                         frame.vel_x.values, frame.vel_y.values),
                        axis=1)
        DCA, TTCA = DCA_MTX(x_4d)

        for i in range(len(TTCA)):
            #find out ttc of one agent
            ttc = [
                TTCA[i][j] for j in range(len(TTCA[i]))
                if DCA[i][j] < 2 * Rp and TTCA[i][j] > 0
            ]
            #find out min ttc for one agent
            if len(ttc) > 0:
                min_ttc = np.min(ttc)
                frame.loc[i, 'ttc'] = min_ttc

            min_dca = np.min([j for j in DCA[i] if j > 0])
            frame.loc[i, 'dca'] = min_dca

        new_frames.append(frame)
    new_frames = pd.concat(new_frames)
    new_traj = TrajDataset()
    new_traj.data = new_frames
    trajs = new_traj.get_trajectories(label="pedestrian")
    trajlets[name] = split_trajectories(trajs, to_numpy=False)

    #average local density o each trajlet
    avg_traj_ttc = []
    avg_traj_dca = []
    for trajlet in trajlets[name]:
        avg_traj_ttc.append(np.min(trajlet['ttc'].dropna()))  #min of min
        avg_traj_dca.append(np.min(trajlet['dca'].dropna()))  #min of min

    return avg_traj_ttc, avg_traj_dca
Exemplo n.º 3
0
def local_density(all_frames,trajlets,name):
    #define local density function
    #for all pedestrians at that time, find its distance to NN
    distNN = []
    dens_t = []
    a=1
    new_frames = []
    for frame in all_frames:
       
        if len(frame)>1:
            #find pairwise min distance
            distNN.append([])
            dens_t.append([])
            dist = squareform(pdist(frame[['pos_x','pos_y']].values))
            pair_dist = []
            for pi in dist:
                
                pair_dist.append(np.array(pi))
                min_pi = [j for j in pi if j>0.01]
                if len(min_pi) == 0:
                    min_dist = 0.01
                else:
                    min_dist = np.min(min_pi)
                distNN[-1].append(min_dist)

            #calculate local density for agent pj
            for pj in range(len(dist)):
                dens_t_i = 1/(2*np.pi)*np.sum(1/((a*np.array(distNN[-1]))**2)*np.exp(-np.divide((pair_dist[pj]**2),(2*(a*np.array(distNN[-1]))**2))))
                dens_t[-1].append(dens_t_i)
                frame.loc[frame.index[pj],'p_local'] = dens_t_i
        new_frames.append(frame)
    new_frames = pd.concat(new_frames)
    new_traj = TrajDataset()
    new_traj.data = new_frames

     
    trajs = new_traj.get_trajectories(label="pedestrian")
    trajlets[name] = split_trajectories(trajs, to_numpy=False)

    #average local density for each trajlet
    avg_traj_plocal=[]
    for trajlet in trajlets[name]:
        avg_traj_plocal.append(np.max(trajlet['p_local']))

               
    return avg_traj_plocal
Exemplo n.º 4
0
def get_datasets(opentraj_root, dataset_names):
    datasets = {}

    # Make a temp dir to store and load trajdatasets (no postprocess anymore)
    trajdataset_dir = os.path.join(opentraj_root, 'trajdatasets__temp')
    if not os.path.exists(trajdataset_dir): os.makedirs(trajdataset_dir)

    for dataset_name in dataset_names:
        dataset_h5_file = os.path.join(trajdataset_dir, dataset_name + '.h5')
        if os.path.exists(dataset_h5_file):
            datasets[dataset_name] = TrajDataset()
            datasets[dataset_name].data = pd.read_pickle(dataset_h5_file)
            datasets[dataset_name].title = dataset_name
            print("loading dataset from pre-processed file: ", dataset_h5_file)
            continue

        print("Loading dataset:", dataset_name)

        # ========== ETH ==============
        if 'eth-univ' == dataset_name.lower():
            eth_univ_root = os.path.join(opentraj_root, 'datasets/ETH/seq_eth/obsmat.txt')
            datasets[dataset_name] = load_eth(eth_univ_root, title=dataset_name, scene_id='Univ',
                                              use_kalman=True)

        elif 'eth-hotel' == dataset_name.lower():
            eth_hotel_root = os.path.join(opentraj_root, 'datasets/ETH/seq_hotel/obsmat.txt')
            datasets[dataset_name] = load_eth(eth_hotel_root, title=dataset_name, scene_id='Hotel')
        # ******************************

        # ========== UCY ==============
        elif 'ucy-zara' == dataset_name.lower():  # all 3 zara sequences
            zara01_dir = os.path.join(opentraj_root, 'datasets/UCY/zara01')
            zara02_dir = os.path.join(opentraj_root, 'datasets/UCY/zara02')
            zara03_dir = os.path.join(opentraj_root, 'datasets/UCY/zara03')
            zara_01_ds = load_crowds(zara01_dir + '/annotation.vsp',
                                     homog_file=zara01_dir + '/H.txt',
                                     scene_id='1', use_kalman=True)
            zara_02_ds = load_crowds(zara02_dir + '/annotation.vsp',
                                     homog_file=zara02_dir + '/H.txt',
                                     scene_id='2', use_kalman=True)
            zara_03_ds = load_crowds(zara03_dir + '/annotation.vsp',
                                     homog_file=zara03_dir + '/H.txt',
                                     scene_id='3', use_kalman=True)
            datasets[dataset_name] = merge_datasets([zara_01_ds, zara_02_ds, zara_03_ds], dataset_name)

        elif 'ucy-univ' == dataset_name.lower():  # all 3 sequences
            st001_dir = os.path.join(opentraj_root, 'datasets/UCY/students01')
            st003_dir = os.path.join(opentraj_root, 'datasets/UCY/students03')
            uni_ex_dir = os.path.join(opentraj_root, 'datasets/UCY/uni_examples')
            #st001_ds = load_Crowds(st001_dir + '/students001.txt',homog_file=st001_dir + '/H.txt',scene_id='1',use_kalman=True)

            st001_ds = load_crowds(st001_dir + '/annotation.vsp',
                                   homog_file=st003_dir + '/H.txt',
                                   scene_id='1', use_kalman=True) 

            st003_ds = load_crowds(st003_dir + '/annotation.vsp',
                                   homog_file=st003_dir + '/H.txt',
                                   scene_id='3', use_kalman=True)
            uni_ex_ds = load_crowds(uni_ex_dir + '/annotation.vsp',
                                    homog_file=st003_dir + '/H.txt',
                                    scene_id='ex', use_kalman=True)
            datasets[dataset_name] = merge_datasets([st001_ds, st003_ds, uni_ex_ds], dataset_name)

        elif 'ucy-zara1' == dataset_name.lower():
            zara01_root = os.path.join(opentraj_root, 'datasets/UCY/zara01/obsmat.txt')
            datasets[dataset_name] = load_eth(zara01_root, title=dataset_name)

        elif 'ucy-zara2' == dataset_name.lower():
            zara02_root = os.path.join(opentraj_root, 'datasets/UCY/zara02/obsmat.txt')
            datasets[dataset_name] = load_eth(zara02_root, title=dataset_name)

        elif 'ucy-univ3' == dataset_name.lower():
            students03_root = os.path.join(opentraj_root, 'datasets/UCY/students03/obsmat.txt')
            datasets[dataset_name] = load_eth(students03_root, title=dataset_name)
        # ******************************

        # ========== HERMES ==============
        elif 'bn' in dataset_name.lower().split('-'):
            [_, exp_flow, cor_size] = dataset_name.split('-')
            if exp_flow == '1d' and cor_size == 'w180':   # 'Bottleneck-udf-180'
                bottleneck_path = os.path.join(opentraj_root, 'datasets/HERMES/Corridor-1D/uo-180-180-120.txt')
            elif exp_flow == '2d' and cor_size == 'w160':  # 'Bottleneck-bdf-160'
                bottleneck_path = os.path.join(opentraj_root, "datasets/HERMES/Corridor-2D/bo-360-160-160.txt")
            else:
                "Unknown Bottleneck dataset!"
                continue
            datasets[dataset_name] = load_bottleneck(bottleneck_path, sampling_rate=6,
                                                     use_kalman=True,
                                                     title=dataset_name)
        # ******************************

        # ========== PETS ==============
        elif 'pets-s2l1' == dataset_name.lower():
            pets_root = os.path.join(opentraj_root, 'datasets/PETS-2009/data')
            datasets[dataset_name] = load_pets(os.path.join(pets_root, 'annotations/PETS2009-S2L1.xml'),  #Pat:was PETS2009-S2L2
                                               calib_path=os.path.join(pets_root, 'calibration/View_001.xml'),
                                               sampling_rate=2,
                                               title=dataset_name)
        # ******************************

        # ========== GC ==============
        elif 'gc' == dataset_name.lower():
            gc_root = os.path.join(opentraj_root, 'datasets/GC/Annotation')
            datasets[dataset_name] = load_gcs(gc_root, world_coord=True, title=dataset_name,
                                              use_kalman=True
                                              )
        # ******************************

        # ========== InD ==============
        elif 'ind-1' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(7, 17 + 1)  # location_id = 1
            ind_1_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' %id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_1_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_1_datasets, new_title=dataset_name)

        elif 'ind-2' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(18, 29 + 1)  # location_id = 1
            ind_2_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_2_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_2_datasets, new_title=dataset_name)

        elif 'ind-3' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(30, 32 + 1)  # location_id = 1
            ind_3_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_3_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_3_datasets, new_title=dataset_name)

        elif 'ind-4' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(0, 6 + 1)  # location_id = 1
            ind_4_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_4_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_4_datasets, new_title=dataset_name)
        # ******************************

        # ========== KITTI ==============
        elif 'kitti' == dataset_name.lower():
            kitti_root = os.path.join(opentraj_root, 'datasets/KITTI/data')
            datasets[dataset_name] = load_kitti(kitti_root, title=dataset_name,
                                                use_kalman=True,
                                                sampling_rate=1)  # FixMe: apparently original_fps = 2.5
        # ******************************

        # ========== L-CAS ==============
        elif 'lcas-minerva' == dataset_name.lower():
            lcas_root = os.path.join(opentraj_root, 'datasets/L-CAS/data')
            datasets[dataset_name] = load_lcas(lcas_root, title=dataset_name,
                                               use_kalman=True,
                                               sampling_rate=1)  # FixMe: apparently original_fps = 2.5
        # ******************************

        # ========== Wild-Track ==============
        elif 'wildtrack' == dataset_name.lower():
            wildtrack_root = os.path.join(opentraj_root, 'datasets/Wild-Track/annotations_positions')
            datasets[dataset_name] = load_wildtrack(wildtrack_root, title=dataset_name,
                                                    use_kalman=True,
                                                    sampling_rate=1)  # original_annot_framerate=2
        # ******************************

        # ========== Edinburgh ==============
        elif 'edinburgh' in dataset_name.lower():
            edinburgh_dir = os.path.join(opentraj_root, 'datasets/Edinburgh/annotations')
            if 'edinburgh' == dataset_name.lower():   # all files
                # edinburgh_path = edinburgh_dir
                # select 1-10 Sep
                Ed_selected_days = ['01Sep', '02Sep', '04Sep', '05Sep', '06Sep', '10Sep']
                partial_ds = []
                for selected_day in Ed_selected_days:
                    edinburgh_path = os.path.join(edinburgh_dir, 'tracks.%s.txt' % selected_day)
                    partial_ds.append(load_edinburgh(edinburgh_path, title=dataset_name,
                                                     use_kalman=True, scene_id=selected_day,
                                                     sampling_rate=4)  # original_framerate=9
                                      )
                merge_datasets(partial_ds)

            else:
                seq_date = dataset_name.split('-')[1]
                edinburgh_path = os.path.join(edinburgh_dir, 'tracks.%s.txt' %seq_date)
            datasets[dataset_name] = load_edinburgh(edinburgh_path, title=dataset_name,
                                                    use_kalman=True,
                                                    sampling_rate=4)  # original_framerate=9
        # ******************************

        # ========== Town-Center ==============
        elif 'towncenter' == dataset_name.lower():
            towncenter_root = os.path.join(opentraj_root, 'datasets/Town-Center')
            # FixMe: might need Kalman Smoother
            datasets[dataset_name] = load_town_center(towncenter_root + '/TownCentre-groundtruth-top.txt',
                                                      calib_path=towncenter_root + '/TownCentre-calibration-ci.txt',
                                                      title=dataset_name,
                                                      use_kalman=True,
                                                      sampling_rate=10)  # original_framerate=25
            # ******************************

        # ========== SDD ==============
        elif 'sdd-' in dataset_name.lower():
            scene_name = dataset_name.split('-')[1]
            sdd_root = os.path.join(opentraj_root, 'datasets', 'SDD')
            annot_files_sdd = sorted(glob.glob(sdd_root + '/' + scene_name + "/**/annotations.txt", recursive=True))

            sdd_scales_yaml_file = os.path.join(sdd_root, 'estimated_scales.yaml')
            with open(sdd_scales_yaml_file, 'r') as f:
                scales_yaml_content = yaml.load(f, Loader=yaml.FullLoader)

            scene_datasets = []
            for file_name in annot_files_sdd:
                filename_parts = file_name.split('/')
                scene_name = filename_parts[-3]
                scene_video_id = filename_parts[-2]
                scale = scales_yaml_content[scene_name][scene_video_id]['scale']
                sdd_dataset_i = load_sdd(file_name, scale=scale,
                                         scene_id=scene_name + scene_video_id.replace('video', ''),
                                         drop_lost_frames=False,
                                         use_kalman=True,
                                         sampling_rate=12)  # original_framerate=30
                scene_datasets.append(sdd_dataset_i)
            scene_dataset = merge_datasets(scene_datasets, dataset_name)
            datasets[dataset_name] = scene_dataset
        # ******************************

        else:
            print("Error! invalid dataset name:", dataset_name)

        # save to h5 file
        datasets[dataset_name].data.to_pickle(dataset_h5_file)
        print("saving dataset into pre-processed file: ", dataset_h5_file)

    return datasets