Exemplo n.º 1
0
def load_sdd_dir(path: str, **kwargs):
    search_filter_str = "**/annotations.txt"
    if not path.endswith("/"):
        search_filter_str = "/" + search_filter_str
    files_list = sorted(glob.glob(path + search_filter_str, recursive=True))
    scales_yaml_file = os.path.join(path, 'estimated_scales.yaml')
    with open(scales_yaml_file, 'r') as f:
        scales_yaml_content = yaml.load(f, Loader=yaml.FullLoader)

    partial_datasets = []
    for file in files_list:
        dir_names = file.split('/')
        scene_name = dir_names[-3]
        scene_video_id = dir_names[-2]
        scale = scales_yaml_content[scene_name][scene_video_id]['scale']

        partial_dataset = load_sdd(file,
                                   scale=scale,
                                   scene_id=scene_name +
                                   scene_video_id.replace('video', ''))
        partial_datasets.append(partial_dataset.data)

    traj_dataset = TrajDataset()
    traj_dataset.data = pd.concat(partial_datasets)

    fps = 30
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    return traj_dataset
Exemplo n.º 2
0
def load_bottleneck(path, **kwargs):
    traj_dataset = TrajDataset()

    csv_columns = ["agent_id", "frame_id", "pos_x", "pos_y", "pos_z"]
    # read from csv => fill traj table
    raw_dataset = pd.read_csv(path, sep=r"\s+", header=None, names=csv_columns)

    # convert from cm => meter
    raw_dataset["pos_x"] = raw_dataset["pos_x"] / 100.
    raw_dataset["pos_y"] = raw_dataset["pos_y"] / 100.

    traj_dataset.title = kwargs.get('title', "no_title")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 16)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    transform = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
    traj_dataset.apply_transformation(transform, inplace=True)

    return traj_dataset
Exemplo n.º 3
0
def load_lcas(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = "LCAS"
    minerva_files_list = glob.glob(path + "/minerva/**/data.csv")
    minerva_columns = [
        'frame_id', 'person_id', 'pos_x', 'pos_y', 'rot_z', 'rot_w', 'scene_id'
    ]

    # read from minerva data.csv
    minerva_raw_dataset = []
    # This load data from all files
    for file in minerva_files_list:
        data = pd.read_csv(file, sep=",", header=None, names=minerva_columns)
        minerva_raw_dataset.append(data)
    minerva_raw_dataset = pd.concat(minerva_raw_dataset)
    minerva_raw_dataset['scene_id'] = 'minerva'

    minerva_raw_dataset.reset_index(inplace=True, drop=True)

    traj_dataset.title = kwargs.get('title', "LCAS")
    traj_dataset.data[["frame_id", "agent_id","pos_x", "pos_y","scene_id"]] = \
        minerva_raw_dataset[["frame_id", "person_id","pos_x","pos_y","scene_id"]]

    traj_dataset.data["label"] = "pedestrian"

    # post-process. For LCAS, raw data do not include velocity, velocity info is postprocessed
    fps = kwargs.get('fps', 2.5)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    return traj_dataset
Exemplo n.º 4
0
def load_crowds(path, **kwargs):
    """:param path: string, path to folder"""
    # pass the homography matrix as well

    homog_file = kwargs.get("homog_file", "")
    Homog = (
        np.loadtxt(homog_file)) if os.path.exists(homog_file) else np.eye(3)
    raw_dataset = pd.DataFrame()

    data = CrowdLoader(Homog).load(path)
    raw_dataset["frame_id"] = [data[i].frame for i in range(len(data))]
    raw_dataset["agent_id"] = [data[i].pedestrian for i in range(len(data))]
    raw_dataset["pos_x"] = [data[i].x for i in range(len(data))]
    raw_dataset["pos_y"] = [data[i].y for i in range(len(data))]

    traj_dataset = TrajDataset()

    traj_dataset.title = kwargs.get('title', "Crowds")
    # copy columns
    traj_dataset.data[["frame_id", "agent_id",  "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 25)

    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 5
0
def min_dist_plot(dataset: TrajDataset):
    frames = dataset.get_frames()
    min_dists = np.ones(len(frames)) * 1000  # a big number
    for ii, frame in enumerate(frames):
        N_t = len(frame)
        if N_t < 2: continue
        X_t = frame[["pos_x", "pos_y"]].to_numpy()
        # compute distance matrix between all pairs of agents
        DD_t = euclidean_distances(X_t)
        DD_t = DD_t[~np.eye(N_t, dtype=bool)].reshape(N_t, N_t - 1)
        min_DD_t = np.amin(DD_t)
        min_dists[ii] = min_DD_t

    bins = np.linspace(0, 4, 40)
    hist, bins, patches = plt.hist(min_dists,
                                   bins,
                                   color='green',
                                   density=True,
                                   alpha=0.7)

    plt.title(dataset.title)
    plt.ylabel('histogram of min distances')
    plt.xlabel('meter')
    plt.xlim([bins[0], bins[-1]])
    plt.ylim([0, 2])

    return hist
Exemplo n.º 6
0
def path_efficiency_plot(dataset: TrajDataset):
    ped_trajectories = dataset.get_trajectories()
    efficiencies = []
    for traj in ped_trajectories:
        if len(traj) < 2: continue

        try:
            p_eff = path_efficiency(traj)
            efficiencies.append(p_eff * 100)
        except:
            print('Error in path efficiency metric')

    bins = np.linspace(50, 100, 25)
    hist, bins, patches = plt.hist(efficiencies,
                                   bins,
                                   color='pink',
                                   density=True,
                                   alpha=0.7)
    # hist, bin_edges = np.histogram(efficiencies, bins, density=True)

    plt.title(dataset.title)
    plt.ylabel('path efficiency')
    plt.xlabel('percent')
    plt.xlim([bins[0], bins[-1]])
    plt.ylim([0, 0.5])

    return hist
Exemplo n.º 7
0
def load_forking_path(path, **kwargs):
    traj_dataset = TrajDataset()
    raw_data = []
    with open(path, 'r') as json_file:
        json_content = json_file.read()
        annots_list = json.loads(json_content)
        for annot_dict in annots_list:
            scene_name = annot_dict['scenename']
            x_agents = annot_dict['x_agents']
            ped_controls = annot_dict['ped_controls']

            print(scene_name)
            for frame_id, control_data in ped_controls.items():
                for person_id, _, xyz, direction_vector, speed, time_elasped, is_static in sorted(
                        control_data):
                    print(frame_id, person_id, xyz[:2])
                    raw_data.append([int(frame_id), int(person_id), *xyz[:2]])
                    dummy = 1
            break
    raw_dataset = pd.DataFrame(
        raw_data, columns=["frame_id", "agent_id", "pos_x", "pos_y"])
    raw_dataset = raw_dataset.sort_values(by='frame_id',
                                          ascending=True).reset_index()
    agents = raw_dataset.groupby("agent_id").apply(list)
    agents = [g for gname, g in raw_dataset.groupby("agent_id")]
    return traj_dataset
Exemplo n.º 8
0
def load_town_center(path, **kwargs):
    # Construct dataset
    traj_dataset = TrajDataset()

    # Note: we assume here that the path that is passed is the one to the tracks CSV.
    # Read the tracks
    raw_dataset = pd.read_csv(path, sep=",", header=0,
                              names=["personNumber", "frameNumber", "headValid", "bodyValid", "headLeft", "headTop",
                                     "headRight", "headBottom", "bodyLeft", "bodyTop", "bodyRight", "bodyBottom"])

    # Get bottom (feet) of bounding boxes
    raw_dataset["body_x"] = (raw_dataset["bodyLeft"] + raw_dataset["bodyRight"]) / 2.0
    raw_dataset["body_y"] = raw_dataset["bodyBottom"]

    raw_dataset["head_x"] = (raw_dataset["headLeft"] + raw_dataset["headRight"]) / 2.0
    raw_dataset["head_y"] = (raw_dataset["headTop"] + raw_dataset["headBottom"]) / 2.0

    # Required information
    raw_dataset["label"] = "pedestrian"

    # Read camera calibration
    calibration_path = kwargs.get('calib_path', 'none')
    rvec, tvec, cameraMatrix, distCoeffs =\
        read_projection_parameters(calibration_path)

    # Obtain real world coordinates from image
    pts = np.array([raw_dataset["body_x"], raw_dataset["body_y"]]).T
    objPts = obtainObjectPoints(pts, rvec, tvec,
                                cameraMatrix, distCoeffs)

    # Add object points to raw dataset
    raw_dataset['pos_x'] = objPts[:, 0]
    raw_dataset['pos_y'] = objPts[:, 1]
    raw_dataset['pos_z'] = objPts[:, 2]

    # Remove invalid body bounding boxes
    raw_dataset = raw_dataset[raw_dataset.bodyValid == 1]

    # Copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frameNumber", "personNumber", "pos_x", "pos_y"]]

    # FixMe: for debug
    traj_dataset.data[["body_x", "body_y"]] = \
        raw_dataset[["body_x", "body_y"]].astype(int)

    # Recording information
    traj_dataset.title = kwargs.get('title', "Town-Center")
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 25)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps, sampling_rate=sampling_rate, use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 9
0
def local_density(all_frames,trajlets,name):
    #define local density function
    #for all pedestrians at that time, find its distance to NN
    distNN = []
    dens_t = []
    a=1
    new_frames = []
    for frame in all_frames:
       
        if len(frame)>1:
            #find pairwise min distance
            distNN.append([])
            dens_t.append([])
            dist = squareform(pdist(frame[['pos_x','pos_y']].values))
            pair_dist = []
            for pi in dist:
                
                pair_dist.append(np.array(pi))
                min_pi = [j for j in pi if j>0.01]
                if len(min_pi) == 0:
                    min_dist = 0.01
                else:
                    min_dist = np.min(min_pi)
                distNN[-1].append(min_dist)

            #calculate local density for agent pj
            for pj in range(len(dist)):
                dens_t_i = 1/(2*np.pi)*np.sum(1/((a*np.array(distNN[-1]))**2)*np.exp(-np.divide((pair_dist[pj]**2),(2*(a*np.array(distNN[-1]))**2))))
                dens_t[-1].append(dens_t_i)
                frame.loc[frame.index[pj],'p_local'] = dens_t_i
        new_frames.append(frame)
    new_frames = pd.concat(new_frames)
    new_traj = TrajDataset()
    new_traj.data = new_frames

     
    trajs = new_traj.get_trajectories(label="pedestrian")
    trajlets[name] = split_trajectories(trajs, to_numpy=False)

    #average local density for each trajlet
    avg_traj_plocal=[]
    for trajlet in trajlets[name]:
        avg_traj_plocal.append(np.max(trajlet['p_local']))

               
    return avg_traj_plocal
Exemplo n.º 10
0
def ttc(all_frames, name, trajlets):
    all_ttc = []
    Rp = 0.33  #assume pedestrians radius is 0.33
    new_frames = []
    for frame in all_frames:
        frame.reset_index(inplace=True)
        #if there is only one pedestrian at that time, or encounter invalid vel value
        if len(frame.index) < 2 or frame['vel_x'].isnull().values.any(
        ) or frame['vel_y'].isnull().values.any():
            continue

        #calculate ttc for each pair
        x_4d = np.stack((frame.pos_x.values, frame.pos_y.values,
                         frame.vel_x.values, frame.vel_y.values),
                        axis=1)
        DCA, TTCA = DCA_MTX(x_4d)

        for i in range(len(TTCA)):
            #find out ttc of one agent
            ttc = [
                TTCA[i][j] for j in range(len(TTCA[i]))
                if DCA[i][j] < 2 * Rp and TTCA[i][j] > 0
            ]
            #find out min ttc for one agent
            if len(ttc) > 0:
                min_ttc = np.min(ttc)
                frame.loc[i, 'ttc'] = min_ttc

            min_dca = np.min([j for j in DCA[i] if j > 0])
            frame.loc[i, 'dca'] = min_dca

        new_frames.append(frame)
    new_frames = pd.concat(new_frames)
    new_traj = TrajDataset()
    new_traj.data = new_frames
    trajs = new_traj.get_trajectories(label="pedestrian")
    trajlets[name] = split_trajectories(trajs, to_numpy=False)

    #average local density o each trajlet
    avg_traj_ttc = []
    avg_traj_dca = []
    for trajlet in trajlets[name]:
        avg_traj_ttc.append(np.min(trajlet['ttc'].dropna()))  #min of min
        avg_traj_dca.append(np.min(trajlet['dca'].dropna()))  #min of min

    return avg_traj_ttc, avg_traj_dca
Exemplo n.º 11
0
def load_kitti(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = "KITTI"
    track_files_list = sorted(glob.glob(path + "/label/*.txt"))
    calib_files_list = sorted(glob.glob(path + "/calib/*.txt"))
    imu_files_list = sorted(glob.glob(path + "/oxts/*.txt"))

    #load track data, calibration data, IMU data from all scenes
    track_rawData = loadTrack(track_files_list)  #(left camera coordinate)
    calib_rawData = loadCalib(calib_files_list)
    imu_rawData = loadIMU(imu_files_list)

    #convert track data to world coordinate (imu coordinate in the first frame of that scene)
    track_world_pos = track_camToworld(track_rawData, calib_rawData,
                                       imu_rawData)

    track_rawData = pd.concat(track_rawData)
    track_rawData.reset_index(inplace=True, drop=True)

    traj_dataset.data[["frame_id", "agent_id", "label",
                       "scene_id"]] = track_rawData[[
                           "frame", "agent_id", "type", "scene"
                       ]]
    traj_dataset.data[["pos_x", "pos_y",
                       "pos_z"]] = track_world_pos[["pos_x", "pos_y", "pos_z"]]

    # post-process. For KITTI, raw data do not include velocity, velocity info is postprocessed
    fps = kwargs.get('fps', 10)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 12
0
def load_cff(path, **kwargs):
    traj_dataset = TrajDataset()

    # read from csv => python str
    # sample line:  2012-09-18T06:25:00:036;PIE;17144;50515;1
    # columns = ["Year", "Month", "Day", "Hour", "min", "sec", "msec", "place", "x_mm", "y_mm", "agent_id"]
    with open(path, 'r') as inp_file:
        file_content = inp_file.read()
        file_content = file_content.replace('T',
                                            '-').replace(':', '-').replace(
                                                ';', '-').replace('\n', '-')
    segments = file_content.split('-')
    segments.remove('')
    year = np.array(segments[0::11], dtype=int)
    month = np.array(segments[1::11], dtype=int)
    day = np.array(segments[2::11], dtype=int)
    hour = np.array(segments[3::11], dtype=int)
    minute = np.array(segments[4::11], dtype=int)
    second = np.array(segments[5::11], dtype=int)
    milli_sec = np.array(segments[6::11], dtype=int)
    place = np.array(segments[7::11], dtype=str)
    x_mm = np.array(segments[8::11], dtype=float)
    y_mm = np.array(segments[9::11], dtype=float)
    agent_id = np.array(segments[10::11], dtype=int)
    # skip year and month
    timestamp = (
        (day * 24 + hour) * 60 + minute) * 60 + second + milli_sec / 1000.
    fps = 10

    traj_dataset.title = kwargs.get('title', "Train Terminal")

    raw_dataset = pd.DataFrame({
        "timestamp": timestamp,
        "frame_id": (timestamp * fps).astype(int),
        "agent_id": agent_id,
        "pos_x": x_mm / 1000.,
        "pos_y": y_mm / 1000.,
    })

    # raw_dataset["scene_id"] = place
    scene_id = kwargs.get('scene_id', 0)
    raw_dataset["scene_id"] = scene_id

    # copy columns
    traj_dataset.data[["scene_id", "timestamp", "frame_id", "agent_id",
                       "pos_x", "pos_y"]] = \
        raw_dataset[["scene_id", "timestamp", "frame_id", "agent_id",
                     "pos_x", "pos_y"]]

    traj_dataset.data["label"] = "pedestrian"

    # post-process
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman',
                            True)  # use kalman smoother by default
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 13
0
def load_trajnet(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = kwargs.get('title', "TrajNet")

    csv_columns = ["frame_id", "agent_id", "pos_x", "pos_y"]

    # read from csv => fill traj
    raw_dataset = pd.read_csv(path, sep=" ", header=None, names=csv_columns)
    raw_dataset.replace('?', np.nan, inplace=True)
    raw_dataset.dropna(inplace=True)

    # FIXME: in the cases you load more than one file into a TrajDataset Object

    # rearrange columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get("scene_id", 0)

    # calculate velocities + perform some checks
    if 'stanford' in path:
        fps = 30
    elif 'crowd' in path or 'biwi' in path:
        fps = 16
    else:
        fps = 7
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 14
0
def run():

    traj_dataset = TrajDataset()
    traj_dataset.add_agent(12, 10, 1., 0., 125.)

    print("\n\n-----------------------------\nRunning test crowd\n-----------------------------")

    # f0 = frame(0)
    #
    # print("\n1:\n",f0.get_frame())
    #
    # f0.add_agent(12,1,0,125)
    # f0.add_agent(15,10,52,15)
    #
    # print("\n2:\n",f0.get_frame())
    #
    # f0.set_agents_list_size(1)
    # f0.add_agent(132,15,25,1225)
    #
    # print("\n3:\n",f0.get_frame())
    #
    # f0.reset_frame()
    #
    # print("\n4:\n",f0.get_frame())
    #
    # t = TrajectoryDataset()
    #
    # for i in range(0,3):
    #     f = frame(i+5)
    #     f.add_agent(15,random(),random(),random())
    #     f.add_agent(17,random(),random(),random())
    #     t.add_frame(f)
    #
    # traj = t.get_trajectories()
    # print("Trajectory:\n1:\n",traj)

    # t.reset_trajectory()
    # traj = t.get_trajectories()
    # print("\n3: reset \n",traj)


    print("\n\n-----------------------------\nTest crowd done\n-----------------------------")
Exemplo n.º 15
0
def num_trajlets(dataset: TrajDataset, length=4.8, overlap=2):
    trajs = dataset.get_trajectories(label="pedestrian")
    trajlets = split_trajectories(trajs,
                                  length,
                                  overlap,
                                  static_filter_thresh=0.)
    non_static_trajlets = split_trajectories(trajs,
                                             length,
                                             overlap,
                                             static_filter_thresh=1.)
    return len(trajlets), len(non_static_trajlets)
Exemplo n.º 16
0
def load_wildtrack(path: str, **kwargs):
    """
    :param path: path to annotations dir
    :param kwargs:
    :return:
    """
    traj_dataset = TrajDataset()

    files_list = sorted(glob.glob(path + "/*.json"))
    raw_data = []
    for file_name in files_list:
        frame_id = int(os.path.basename(file_name).replace('.json', ''))

        with open(file_name, 'r') as json_file:
            json_content = json_file.read()
            annots_list = json.loads(json_content)
            for annot in annots_list:
                person_id = annot["personID"]
                position_id = annot["positionID"]

                X = -3.0 + 0.025 * (position_id % 480)
                Y = -9.0 + 0.025 * (position_id / 480)
                raw_data.append([frame_id, person_id, X, Y])

    csv_columns = ["frame_id", "agent_id", "pos_x", "pos_y"]
    raw_dataset = pd.DataFrame(np.array(raw_data), columns=csv_columns)

    traj_dataset.title = kwargs.get('title', "Grand Central")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 10)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 17
0
def load_eth(path, **kwargs):
    traj_dataset = TrajDataset()

    csv_columns = [
        "frame_id", "agent_id", "pos_x", "pos_z", "pos_y", "vel_x", "vel_z",
        "vel_y"
    ]
    # read from csv => fill traj table
    raw_dataset = pd.read_csv(path, sep=r"\s+", header=None, names=csv_columns)

    traj_dataset.title = kwargs.get('title', "no_title")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id",
                       "pos_x", "pos_y",
                       "vel_x", "vel_y"
                       ]] = \
        raw_dataset[["frame_id", "agent_id",
                     "pos_x", "pos_y",
                     "vel_x", "vel_y"
                     ]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', -1)
    if fps < 0:
        d_frame = np.diff(pd.unique(raw_dataset["frame_id"]))
        fps = d_frame[
            0] * 2.5  # 2.5 is the common annotation fps for all (ETH+UCY) datasets

    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 18
0
def density_vanilla_plot(
    dataset: TrajDataset
):  # over observed space of the dataset ([minx,maxx], [miny, maxy])
    space = (dataset.bbox['x']['max'] - dataset.bbox['x']['min']) * \
            (dataset.bbox['y']['max'] - dataset.bbox['y']['min'])
    density_t = []

    frames = dataset.get_frames()
    for frame in frames:
        density_t.append(len(frame) / space)

    bins = np.linspace(0, 1, 40)
    hist, bins, patches = plt.hist(density_t,
                                   bins=bins,
                                   color='red',
                                   density=True,
                                   alpha=0.7)

    plt.title(dataset.title)
    plt.ylabel('distribution of density')
    plt.xlabel('Person per m^2')
Exemplo n.º 19
0
def speed_plot(dataset: TrajDataset):
    trajectories = dataset.get_trajectories()
    speeds = []
    for traj in trajectories:
        speeds_i = speed(traj)
        speeds.extend(speeds_i)

    bins = np.linspace(0, 2.5, 25)
    hist, bins, patches = plt.hist(speeds,
                                   bins,
                                   color='blue',
                                   density=True,
                                   alpha=0.7)
    # hist, bin_edges = np.histogram(ped_speeds, bins, density=True)

    plt.suptitle(dataset.title)
    plt.ylabel('histogram of speeds')
    plt.xlabel('m/s')
    plt.xlim([bins[0], bins[-1]])
    plt.ylim([0, 6])

    return hist
Exemplo n.º 20
0
def load_sdd(path, **kwargs):
    sdd_dataset = TrajDataset()
    sdd_dataset.title = "SDD"

    csv_columns = [
        "agent_id", "x_min", "y_min", "x_max", "y_max", "frame_id", "lost",
        "occluded", "generated", "label"
    ]
    scale = kwargs.get("scale", 1)

    # read from csv => fill traj table
    raw_dataset = pd.read_csv(path, sep=" ", header=None, names=csv_columns)
    raw_dataset["pos_x"] = scale * (raw_dataset["x_min"] +
                                    raw_dataset["x_max"]) / 2
    raw_dataset["pos_y"] = scale * (raw_dataset["y_min"] +
                                    raw_dataset["y_max"]) / 2

    drop_lost_frames = kwargs.get('drop_lost_frames', False)
    if drop_lost_frames:
        raw_dataset = raw_dataset.loc[raw_dataset["lost"] != 1]

    # copy columns
    sdd_dataset.data[["frame_id", "agent_id",
                      "pos_x", "pos_y",
                      # "x_min", "y_min", "x_max", "y_max",
                      "label", "lost", "occluded", "generated"]] = \
        raw_dataset[["frame_id", "agent_id",
                     "pos_x", "pos_y",
                     # "x_min", "y_min", "x_max", "y_max",
                     "label", "lost", "occluded", "generated"]]
    sdd_dataset.data["scene_id"] = kwargs.get("scene_id", 0)

    # calculate velocities + perform some checks
    fps = 30
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    sdd_dataset.postprocess(fps=fps,
                            sampling_rate=sampling_rate,
                            use_kalman=use_kalman)

    return sdd_dataset
Exemplo n.º 21
0
def pcf_plot(dataset: TrajDataset):
    frames = dataset.get_frames()
    # calc pcf on a dataset
    pcf_accum = []
    pcf_range = np.arange(
        0.2,  # starting radius
        8,  # end radius
        0.2)  # step size

    for frame in frames:
        pcf_values_t = pcf(frame[['pos_x', 'pos_y', 'pos_z']],
                           list(pcf_range),
                           sigma=0.25)
        if not len(pcf_accum):
            pcf_accum = pcf_values_t
        else:
            pcf_accum += pcf_values_t
    avg_pcf = pcf_accum / len(frames)

    plt.title(dataset.title)
    plt.ylabel('PCF')
    plt.xlabel('meter')
    plt.plot(avg_pcf, color='purple')
Exemplo n.º 22
0
def acceleration_plot(dataset: TrajDataset):
    trajectories = dataset.get_trajectories()
    accelerations = []
    for traj in trajectories:
        if len(traj) < 2: continue
        accelerations_i = acceleration(traj)
        accelerations.extend(accelerations_i)

    bins = np.linspace(-2.5, 2.5, 100)
    # hist, bin_edges = np.histogram(ped_accelerations, bins, density=True)

    hist, bins, patches = plt.hist(accelerations,
                                   bins,
                                   color='red',
                                   density=True,
                                   alpha=0.7)

    plt.title(dataset.title)
    plt.ylabel('histogram of accelerations')
    plt.xlabel('m/s^2')
    plt.xlim([-2.5, 2.5])
    plt.ylim([0, 5])

    return hist
Exemplo n.º 23
0
def load_edinburgh(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = "Edinburgh"

    if os.path.isdir(path):
        files_list = sorted(glob.glob(path + "/*.txt"))
    elif os.path.exists(path):
        files_list = [path]
    else:
        raise ValueError("loadEdinburgh: input file is invalid")

    csv_columns = ['centre_x', 'centre_y', 'frame', 'agent_id', 'length']

    # read from csv => fill traj table
    raw_dataset = []
    scene = []
    last_scene_frame = 0
    new_id = 0
    scale = 0.0247
    # load data from all files
    for file in files_list:
        data = pd.read_csv(file, sep="\n|=", header=None, index_col=None)
        data.reset_index(inplace=True)
        properties = data[data['index'].str.startswith('Properties')]
        data = data[data['index'].str.startswith('TRACK')]

        #reconstruct the data in arrays
        track_data = []
        print("reading:" + str(file))
        for row in range(len(data)):
            one_prop = properties.iloc[row, 1].split(";")
            one_prop.pop()
            one_prop = [
                ast.literal_eval(i.replace(' ', ',')) for i in one_prop
            ]
            track_length = one_prop[0][0]

            one_track = data.iloc[row, 1].split(";")
            one_track.pop()
            one_track[0] = one_track[0].replace('[[', '[')
            one_track[-1] = one_track[-1].replace(']]', ']')
            one_track = np.array([
                ast.literal_eval(i.replace(' [', '[').replace(' ', ','))
                for i in one_track
            ])
            one_track = np.c_[one_track,
                              np.ones(one_track.shape[0], dtype=int) * row,
                              track_length *
                              np.ones(one_track.shape[0], dtype=int)]
            track_data.extend(one_track)

        #clear repeated trajectories
        track_data_pd = pd.DataFrame(data=np.array(track_data),
                                     columns=csv_columns)

        clean_track = []
        for i in tqdm(track_data_pd.groupby('agent_id')):
            i[1].drop_duplicates(subset="frame", keep='first', inplace=True)
            # clean repeated trajectory for the same agent

            for j in i[1].groupby(['frame', 'centre_x', 'centre_y']):
                j[1].drop_duplicates(subset="frame",
                                     keep='first',
                                     inplace=True)
                clean_track.append(j[1])
        clean_track = np.concatenate(clean_track)

        #re-id
        uid = np.unique(clean_track[:, 3])
        ##added!!
        copy_id = deepcopy(clean_track[:, 3])

        for oneid in uid:
            oneid_idx = [idx for idx, x in enumerate(copy_id) if x == oneid]
            for j in oneid_idx:
                clean_track[j, 3] = new_id
            new_id += 1

        scene.extend([files_list.index(file)] * len(clean_track))

        raw_dataset.extend(clean_track.tolist())
    raw_dataset = pd.DataFrame(np.array(raw_dataset), columns=csv_columns)
    raw_dataset.reset_index(inplace=True, drop=True)

    #find homog matrix
    H = get_homog()
    #apply H matrix to the image point
    img_data = raw_dataset[["centre_x", "centre_y"]].values
    world_data = []
    for row in img_data:
        augImg_data = np.c_[[row], np.array([1])]
        world_data.append(np.matmul(H, augImg_data.reshape(3, 1)).tolist()[:2])

    raw_dataset["centre_x"] = np.array(world_data)[:, 0]
    raw_dataset["centre_y"] = np.array(world_data)[:, 1]

    traj_dataset.data[["frame_id", "agent_id", "pos_x",
                       "pos_y"]] = raw_dataset[[
                           "frame", "agent_id", "centre_x", "centre_y"
                       ]]
    traj_dataset.data["scene_id"] = kwargs.get("scene_id", scene)

    traj_dataset.data["label"] = "pedestrian"

    traj_dataset.title = kwargs.get('title', "Edinburgh")

    # post-process. For Edinburgh, raw data do not include velocity, velocity info is postprocessed
    fps = kwargs.get('fps', 9)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    print("finish")
    return traj_dataset
Exemplo n.º 24
0
def load_chaos(path, separator, **kwargs):
    traj_dataset = TrajDataset()
    # TODO
    #  ChAOS Style: 1 file per agent, pos_x, pos_y
    print("\nLoad Chaos style: not implemented yet\n")
    return traj_dataset
Exemplo n.º 25
0
def load_ind(path, **kwargs):
    traj_dataset = TrajDataset()
    # Note: we assume here that the path that is passed is the one to the tracks CSV.

    # Read the tracks
    raw_dataset = pd.read_csv(
        path,
        sep=",",
        header=0,
        names=[
            "recordingId", "trackId", "frame", "trackLifetime", "xCenter",
            "yCenter", "heading", "width", "length", "xVelocity", "yVelocity",
            "xAcceleration", "yAcceleration", "lonVelocity", "latVelocity",
            "lonAcceleration", "latAcceleration"
        ])

    # Read the recording data
    data_path = pathlib.Path(path)
    datadir_path = data_path.parent
    recording_path = str(datadir_path) + '/{:02d}_recordingMeta.csv'.format(
        raw_dataset['recordingId'][0])
    recording_data = pd.read_csv(recording_path,
                                 sep=",",
                                 header=0,
                                 names=[
                                     "recordingId", "locationId", "frameRate",
                                     "speedLimit", "weekday", "startTime",
                                     "duration", "numTracks", "numVehicles",
                                     "numVRUs", "latLocation", "lonLocation",
                                     "xUtmOrigin", "yUtmOrigin",
                                     "orthoPxToMeter"
                                 ])
    traj_dataset.title = kwargs.get('title', "inD")

    # Read the meta-tracks data
    tracks_path = str(datadir_path) + '/{:02d}_tracksMeta.csv'.format(
        raw_dataset['recordingId'][0])
    tracks_data = pd.read_csv(tracks_path,
                              sep=",",
                              header=0,
                              names=[
                                  "recordingId", "trackId", "initialFrame",
                                  "finalFrame", "numFrames", "width", "length",
                                  "class"
                              ])
    # Get the ids of pedestrians only
    ped_ids = tracks_data[tracks_data["class"] ==
                          "pedestrian"]["trackId"].values
    raw_dataset = raw_dataset[raw_dataset['trackId'].isin(ped_ids)]

    # Copy columns
    traj_dataset.data[["frame_id", "agent_id",
                      "pos_x", "pos_y", "vel_x", "vel_y"]] = \
        raw_dataset[["frame", "trackId",
                     "xCenter", "yCenter", "xVelocity", "yVelocity"]]

    traj_dataset.data["label"] = "pedestrian"
    scene_id = kwargs.get("scene_id", recording_data["locationId"][0])
    traj_dataset.data["scene_id"] = scene_id
    # print("location_id = ", recording_data["locationId"][0])

    # post-process
    fps = int(recording_data["frameRate"][0])  # fps = 25
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    return traj_dataset
Exemplo n.º 26
0
def grouping(dataset: TrajDataset):
    trajs = dataset.get_trajectories("pedestrian")
    trajlets = split_trajectories(trajs)
    for trajlet in trajlets:
        pass
    return
Exemplo n.º 27
0
def load_pets(path, **kwargs):
    """
    :param path: address of annotation file
    :param kwargs:
    :param  calib_path: address of calibration file
    :return: TrajectoryDataset object
    """
    traj_dataset = TrajDataset()

    annot_xtree = et.parse(path)
    annot_xroot = annot_xtree.getroot()  # dataset

    cp, cc = None, None  # calibration parameters

    # load calibration
    calib_path = kwargs.get('calib_path', "")
    if calib_path:
        cp = CameraParameters()
        cc = CalibrationConstants()

        calib_xtree = et.parse(calib_path)
        calib_xroot = calib_xtree.getroot()  # Camera

        geometry_node = calib_xroot.find("Geometry")
        width = int(geometry_node.attrib["width"])
        height = int(geometry_node.attrib["height"])

        cp.Ncx = float(geometry_node.attrib["ncx"])
        cp.Nfx = float(geometry_node.attrib["nfx"])
        cp.dx = float(geometry_node.attrib["dx"])
        cp.dy = float(geometry_node.attrib["dy"])
        cp.dpx = float(geometry_node.attrib["dpx"])
        cp.dpy = float(geometry_node.attrib["dpy"])

        intrinsic_node = calib_xroot.find("Intrinsic")
        cc.f = float(intrinsic_node.attrib["focal"])
        cc.kappa1 = float(
            intrinsic_node.attrib["kappa1"])  # 1st order radial distortion

        cp.Cx = float(intrinsic_node.attrib["cx"])
        cp.Cy = float(intrinsic_node.attrib["cy"])
        cp.sx = float(intrinsic_node.attrib["sx"])

        extrinsic_node = calib_xroot.find("Extrinsic")
        cc.Tx = float(extrinsic_node.attrib["tx"])
        cc.Ty = float(extrinsic_node.attrib["ty"])
        cc.Tz = float(extrinsic_node.attrib["tz"])
        cc.Rx = float(extrinsic_node.attrib["rx"])
        cc.Ry = float(extrinsic_node.attrib["ry"])
        cc.Rz = float(extrinsic_node.attrib["rz"])

        cc.calc_rr()  # Calculate Rotation Matrix

    loaded_data = []  # frame_id, agent_id, pos_x, pos_y, xc, yc, h, w
    for frame_node in annot_xroot:
        objectlist_node = frame_node.find("objectlist")  # .text
        object_nodes = objectlist_node.findall("object")
        frame_id = int(frame_node.attrib.get("number"))

        for obj_node in object_nodes:
            agent_id = obj_node.attrib["id"]

            box_node = obj_node.find("box")
            xc = float(box_node.attrib["xc"])
            yc = float(box_node.attrib["yc"])
            h = float(box_node.attrib["h"])
            w = float(box_node.attrib["w"])

            x_ground = xc
            y_ground = yc + h / 2

            if cp:
                pos_x, pos_y = image_coord_to_world_coord(
                    x_ground, y_ground, 0, cp, cc)
            else:
                pos_x, pos_y = np.nan, np.nan

            loaded_data.append([
                frame_id, agent_id, pos_x / 1000., pos_y / 1000., xc, yc, h, w
            ])

    data_columns = [
        "frame_id", "agent_id", "pos_x", "pos_y", "xc", "yc", "h", "w"
    ]
    raw_dataset = pd.DataFrame(np.array(loaded_data), columns=data_columns)

    traj_dataset.title = kwargs.get('title', "PETS")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id",
                       "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id",
                     "pos_x", "pos_y"]]
    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 7)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
Exemplo n.º 28
0
def get_datasets(opentraj_root, dataset_names):
    datasets = {}

    # Make a temp dir to store and load trajdatasets (no postprocess anymore)
    trajdataset_dir = os.path.join(opentraj_root, 'trajdatasets__temp')
    if not os.path.exists(trajdataset_dir): os.makedirs(trajdataset_dir)

    for dataset_name in dataset_names:
        dataset_h5_file = os.path.join(trajdataset_dir, dataset_name + '.h5')
        if os.path.exists(dataset_h5_file):
            datasets[dataset_name] = TrajDataset()
            datasets[dataset_name].data = pd.read_pickle(dataset_h5_file)
            datasets[dataset_name].title = dataset_name
            print("loading dataset from pre-processed file: ", dataset_h5_file)
            continue

        print("Loading dataset:", dataset_name)

        # ========== ETH ==============
        if 'eth-univ' == dataset_name.lower():
            eth_univ_root = os.path.join(opentraj_root, 'datasets/ETH/seq_eth/obsmat.txt')
            datasets[dataset_name] = load_eth(eth_univ_root, title=dataset_name, scene_id='Univ',
                                              use_kalman=True)

        elif 'eth-hotel' == dataset_name.lower():
            eth_hotel_root = os.path.join(opentraj_root, 'datasets/ETH/seq_hotel/obsmat.txt')
            datasets[dataset_name] = load_eth(eth_hotel_root, title=dataset_name, scene_id='Hotel')
        # ******************************

        # ========== UCY ==============
        elif 'ucy-zara' == dataset_name.lower():  # all 3 zara sequences
            zara01_dir = os.path.join(opentraj_root, 'datasets/UCY/zara01')
            zara02_dir = os.path.join(opentraj_root, 'datasets/UCY/zara02')
            zara03_dir = os.path.join(opentraj_root, 'datasets/UCY/zara03')
            zara_01_ds = load_crowds(zara01_dir + '/annotation.vsp',
                                     homog_file=zara01_dir + '/H.txt',
                                     scene_id='1', use_kalman=True)
            zara_02_ds = load_crowds(zara02_dir + '/annotation.vsp',
                                     homog_file=zara02_dir + '/H.txt',
                                     scene_id='2', use_kalman=True)
            zara_03_ds = load_crowds(zara03_dir + '/annotation.vsp',
                                     homog_file=zara03_dir + '/H.txt',
                                     scene_id='3', use_kalman=True)
            datasets[dataset_name] = merge_datasets([zara_01_ds, zara_02_ds, zara_03_ds], dataset_name)

        elif 'ucy-univ' == dataset_name.lower():  # all 3 sequences
            st001_dir = os.path.join(opentraj_root, 'datasets/UCY/students01')
            st003_dir = os.path.join(opentraj_root, 'datasets/UCY/students03')
            uni_ex_dir = os.path.join(opentraj_root, 'datasets/UCY/uni_examples')
            #st001_ds = load_Crowds(st001_dir + '/students001.txt',homog_file=st001_dir + '/H.txt',scene_id='1',use_kalman=True)

            st001_ds = load_crowds(st001_dir + '/annotation.vsp',
                                   homog_file=st003_dir + '/H.txt',
                                   scene_id='1', use_kalman=True) 

            st003_ds = load_crowds(st003_dir + '/annotation.vsp',
                                   homog_file=st003_dir + '/H.txt',
                                   scene_id='3', use_kalman=True)
            uni_ex_ds = load_crowds(uni_ex_dir + '/annotation.vsp',
                                    homog_file=st003_dir + '/H.txt',
                                    scene_id='ex', use_kalman=True)
            datasets[dataset_name] = merge_datasets([st001_ds, st003_ds, uni_ex_ds], dataset_name)

        elif 'ucy-zara1' == dataset_name.lower():
            zara01_root = os.path.join(opentraj_root, 'datasets/UCY/zara01/obsmat.txt')
            datasets[dataset_name] = load_eth(zara01_root, title=dataset_name)

        elif 'ucy-zara2' == dataset_name.lower():
            zara02_root = os.path.join(opentraj_root, 'datasets/UCY/zara02/obsmat.txt')
            datasets[dataset_name] = load_eth(zara02_root, title=dataset_name)

        elif 'ucy-univ3' == dataset_name.lower():
            students03_root = os.path.join(opentraj_root, 'datasets/UCY/students03/obsmat.txt')
            datasets[dataset_name] = load_eth(students03_root, title=dataset_name)
        # ******************************

        # ========== HERMES ==============
        elif 'bn' in dataset_name.lower().split('-'):
            [_, exp_flow, cor_size] = dataset_name.split('-')
            if exp_flow == '1d' and cor_size == 'w180':   # 'Bottleneck-udf-180'
                bottleneck_path = os.path.join(opentraj_root, 'datasets/HERMES/Corridor-1D/uo-180-180-120.txt')
            elif exp_flow == '2d' and cor_size == 'w160':  # 'Bottleneck-bdf-160'
                bottleneck_path = os.path.join(opentraj_root, "datasets/HERMES/Corridor-2D/bo-360-160-160.txt")
            else:
                "Unknown Bottleneck dataset!"
                continue
            datasets[dataset_name] = load_bottleneck(bottleneck_path, sampling_rate=6,
                                                     use_kalman=True,
                                                     title=dataset_name)
        # ******************************

        # ========== PETS ==============
        elif 'pets-s2l1' == dataset_name.lower():
            pets_root = os.path.join(opentraj_root, 'datasets/PETS-2009/data')
            datasets[dataset_name] = load_pets(os.path.join(pets_root, 'annotations/PETS2009-S2L1.xml'),  #Pat:was PETS2009-S2L2
                                               calib_path=os.path.join(pets_root, 'calibration/View_001.xml'),
                                               sampling_rate=2,
                                               title=dataset_name)
        # ******************************

        # ========== GC ==============
        elif 'gc' == dataset_name.lower():
            gc_root = os.path.join(opentraj_root, 'datasets/GC/Annotation')
            datasets[dataset_name] = load_gcs(gc_root, world_coord=True, title=dataset_name,
                                              use_kalman=True
                                              )
        # ******************************

        # ========== InD ==============
        elif 'ind-1' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(7, 17 + 1)  # location_id = 1
            ind_1_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' %id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_1_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_1_datasets, new_title=dataset_name)

        elif 'ind-2' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(18, 29 + 1)  # location_id = 1
            ind_2_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_2_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_2_datasets, new_title=dataset_name)

        elif 'ind-3' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(30, 32 + 1)  # location_id = 1
            ind_3_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_3_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_3_datasets, new_title=dataset_name)

        elif 'ind-4' == dataset_name.lower():
            ind_root = os.path.join(opentraj_root, 'datasets/InD/inD-dataset-v1.0/data')
            file_ids = range(0, 6 + 1)  # location_id = 1
            ind_4_datasets = []
            for id in file_ids:
                dataset_i = load_ind(os.path.join(ind_root, '%02d_tracks.csv' % id),
                                     scene_id='1-%02d' % id,
                                     sampling_rate=10,
                                     use_kalman=True)
                ind_4_datasets.append(dataset_i)
            datasets[dataset_name] = merge_datasets(ind_4_datasets, new_title=dataset_name)
        # ******************************

        # ========== KITTI ==============
        elif 'kitti' == dataset_name.lower():
            kitti_root = os.path.join(opentraj_root, 'datasets/KITTI/data')
            datasets[dataset_name] = load_kitti(kitti_root, title=dataset_name,
                                                use_kalman=True,
                                                sampling_rate=1)  # FixMe: apparently original_fps = 2.5
        # ******************************

        # ========== L-CAS ==============
        elif 'lcas-minerva' == dataset_name.lower():
            lcas_root = os.path.join(opentraj_root, 'datasets/L-CAS/data')
            datasets[dataset_name] = load_lcas(lcas_root, title=dataset_name,
                                               use_kalman=True,
                                               sampling_rate=1)  # FixMe: apparently original_fps = 2.5
        # ******************************

        # ========== Wild-Track ==============
        elif 'wildtrack' == dataset_name.lower():
            wildtrack_root = os.path.join(opentraj_root, 'datasets/Wild-Track/annotations_positions')
            datasets[dataset_name] = load_wildtrack(wildtrack_root, title=dataset_name,
                                                    use_kalman=True,
                                                    sampling_rate=1)  # original_annot_framerate=2
        # ******************************

        # ========== Edinburgh ==============
        elif 'edinburgh' in dataset_name.lower():
            edinburgh_dir = os.path.join(opentraj_root, 'datasets/Edinburgh/annotations')
            if 'edinburgh' == dataset_name.lower():   # all files
                # edinburgh_path = edinburgh_dir
                # select 1-10 Sep
                Ed_selected_days = ['01Sep', '02Sep', '04Sep', '05Sep', '06Sep', '10Sep']
                partial_ds = []
                for selected_day in Ed_selected_days:
                    edinburgh_path = os.path.join(edinburgh_dir, 'tracks.%s.txt' % selected_day)
                    partial_ds.append(load_edinburgh(edinburgh_path, title=dataset_name,
                                                     use_kalman=True, scene_id=selected_day,
                                                     sampling_rate=4)  # original_framerate=9
                                      )
                merge_datasets(partial_ds)

            else:
                seq_date = dataset_name.split('-')[1]
                edinburgh_path = os.path.join(edinburgh_dir, 'tracks.%s.txt' %seq_date)
            datasets[dataset_name] = load_edinburgh(edinburgh_path, title=dataset_name,
                                                    use_kalman=True,
                                                    sampling_rate=4)  # original_framerate=9
        # ******************************

        # ========== Town-Center ==============
        elif 'towncenter' == dataset_name.lower():
            towncenter_root = os.path.join(opentraj_root, 'datasets/Town-Center')
            # FixMe: might need Kalman Smoother
            datasets[dataset_name] = load_town_center(towncenter_root + '/TownCentre-groundtruth-top.txt',
                                                      calib_path=towncenter_root + '/TownCentre-calibration-ci.txt',
                                                      title=dataset_name,
                                                      use_kalman=True,
                                                      sampling_rate=10)  # original_framerate=25
            # ******************************

        # ========== SDD ==============
        elif 'sdd-' in dataset_name.lower():
            scene_name = dataset_name.split('-')[1]
            sdd_root = os.path.join(opentraj_root, 'datasets', 'SDD')
            annot_files_sdd = sorted(glob.glob(sdd_root + '/' + scene_name + "/**/annotations.txt", recursive=True))

            sdd_scales_yaml_file = os.path.join(sdd_root, 'estimated_scales.yaml')
            with open(sdd_scales_yaml_file, 'r') as f:
                scales_yaml_content = yaml.load(f, Loader=yaml.FullLoader)

            scene_datasets = []
            for file_name in annot_files_sdd:
                filename_parts = file_name.split('/')
                scene_name = filename_parts[-3]
                scene_video_id = filename_parts[-2]
                scale = scales_yaml_content[scene_name][scene_video_id]['scale']
                sdd_dataset_i = load_sdd(file_name, scale=scale,
                                         scene_id=scene_name + scene_video_id.replace('video', ''),
                                         drop_lost_frames=False,
                                         use_kalman=True,
                                         sampling_rate=12)  # original_framerate=30
                scene_datasets.append(sdd_dataset_i)
            scene_dataset = merge_datasets(scene_datasets, dataset_name)
            datasets[dataset_name] = scene_dataset
        # ******************************

        else:
            print("Error! invalid dataset name:", dataset_name)

        # save to h5 file
        datasets[dataset_name].data.to_pickle(dataset_h5_file)
        print("saving dataset into pre-processed file: ", dataset_h5_file)

    return datasets
Exemplo n.º 29
0
def load_gcs(path, **kwargs):
    traj_dataset = TrajDataset()
    raw_dataset = pd.DataFrame()

    file_list = sorted(os.listdir(path))
    raw_data_list = []  # the data to be converted into Pandas DataFrame

    selected_frames = kwargs.get("frames", range(0, 120001))
    agent_id_incremental = 0

    for annot_file in file_list:
        annot_file_full_path = os.path.join(path, annot_file)
        with open(annot_file_full_path, 'r') as f:
            annot_contents = f.read().split()

        agent_id = int(annot_file.replace('.txt', ''))
        agent_id_incremental += 1
        last_frame_id = -1

        for i in range(len(annot_contents) // 3):
            py = float(annot_contents[3 * i])
            px = float(annot_contents[3 * i + 1])
            frame_id = int(annot_contents[3 * i + 2])

            # there are trajectory files with non-continuous timestamps
            # they need to be counted as different agents
            if last_frame_id > 0 and (frame_id - last_frame_id) > 20:
                agent_id_incremental += 1
            last_frame_id = frame_id

            if selected_frames.start <= frame_id < selected_frames.stop:
                raw_data_list.append([frame_id, agent_id_incremental, px, py])

    csv_columns = ["frame_id", "agent_id", "pos_x", "pos_y"]
    raw_data_df = pd.DataFrame(np.stack(raw_data_list), columns=csv_columns)

    raw_data_df_groupby = raw_data_df.groupby("agent_id")
    trajs = [g for _, g in raw_data_df_groupby]

    tr0_ = trajs[0]
    tr1_ = trajs[1]

    for ii, tr in enumerate(trajs):
        if len(tr) < 2: continue
        # interpolate frames (2x up-sampling)
        interp_F = np.arange(tr["frame_id"].iloc[0], tr["frame_id"].iloc[-1],
                             10).astype(int)
        interp_X = interp1d(tr["frame_id"], tr["pos_x"], kind='linear')
        interp_X_ = interp_X(interp_F)
        interp_Y = interp1d(tr["frame_id"], tr["pos_y"], kind='linear')
        interp_Y_ = interp_Y(interp_F)
        agent_id = tr["agent_id"].iloc[0]
        print(agent_id)
        raw_dataset = raw_dataset.append(
            pd.DataFrame({
                "frame_id": interp_F,
                "agent_id": agent_id,
                "pos_x": interp_X_,
                "pos_y": interp_Y_
            }))
    raw_dataset = raw_dataset.reset_index()
    # homog = []
    # homog_file = kwargs.get("homog_file", "")
    # if os.path.exists(homog_file):
    #     with open(homog_file) as f:
    #         homog_str = f.read()
    #         homog = np.array(json.loads(homog_str)['homog'])
    # else:
    homog = [[4.97412897e-02, -4.24730883e-02, 7.25543911e+01],
             [1.45017874e-01, -3.35678711e-03, 7.97920970e+00],
             [1.36068797e-03, -4.98339188e-05, 1.00000000e+00]]
    # homog = np.eye(3)

    world_coords = image_to_world(raw_dataset[["pos_x", "pos_y"]].to_numpy(),
                                  homog)
    raw_dataset[["pos_x", "pos_y"]] = pd.DataFrame(world_coords * 0.8)

    # copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.title = kwargs.get('title', "Grand Central")
    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"
    fps = kwargs.get('fps', 25)

    # post-process
    fps = 30
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    # interpolate = kwargs.get('interpolate', False)
    # if interpolate:
    #     traj_dataset.interpolate_frames()

    return traj_dataset