예제 #1
0
def load_cff(path, **kwargs):
    traj_dataset = TrajDataset()

    # read from csv => python str
    # sample line:  2012-09-18T06:25:00:036;PIE;17144;50515;1
    # columns = ["Year", "Month", "Day", "Hour", "min", "sec", "msec", "place", "x_mm", "y_mm", "agent_id"]
    with open(path, 'r') as inp_file:
        file_content = inp_file.read()
        file_content = file_content.replace('T',
                                            '-').replace(':', '-').replace(
                                                ';', '-').replace('\n', '-')
    segments = file_content.split('-')
    segments.remove('')
    year = np.array(segments[0::11], dtype=int)
    month = np.array(segments[1::11], dtype=int)
    day = np.array(segments[2::11], dtype=int)
    hour = np.array(segments[3::11], dtype=int)
    minute = np.array(segments[4::11], dtype=int)
    second = np.array(segments[5::11], dtype=int)
    milli_sec = np.array(segments[6::11], dtype=int)
    place = np.array(segments[7::11], dtype=str)
    x_mm = np.array(segments[8::11], dtype=float)
    y_mm = np.array(segments[9::11], dtype=float)
    agent_id = np.array(segments[10::11], dtype=int)
    # skip year and month
    timestamp = (
        (day * 24 + hour) * 60 + minute) * 60 + second + milli_sec / 1000.
    fps = 10

    traj_dataset.title = kwargs.get('title', "Train Terminal")

    raw_dataset = pd.DataFrame({
        "timestamp": timestamp,
        "frame_id": (timestamp * fps).astype(int),
        "agent_id": agent_id,
        "pos_x": x_mm / 1000.,
        "pos_y": y_mm / 1000.,
    })

    # raw_dataset["scene_id"] = place
    scene_id = kwargs.get('scene_id', 0)
    raw_dataset["scene_id"] = scene_id

    # copy columns
    traj_dataset.data[["scene_id", "timestamp", "frame_id", "agent_id",
                       "pos_x", "pos_y"]] = \
        raw_dataset[["scene_id", "timestamp", "frame_id", "agent_id",
                     "pos_x", "pos_y"]]

    traj_dataset.data["label"] = "pedestrian"

    # post-process
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman',
                            True)  # use kalman smoother by default
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #2
0
def load_hermes(path, **kwargs):
    traj_dataset = TrajDataset()

    csv_columns = ["agent_id", "frame_id", "pos_x", "pos_y", "pos_z"]
    # read from csv => fill traj table
    raw_dataset = pd.read_csv(path, sep=r"\s+", header=None, names=csv_columns)

    # convert from cm => meter
    raw_dataset["pos_x"] = raw_dataset["pos_x"] / 100.
    raw_dataset["pos_y"] = raw_dataset["pos_y"] / 100.

    traj_dataset.title = kwargs.get('title', "no_title")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 16)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #3
0
def load_trajnet(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = kwargs.get('title', "TrajNet")

    csv_columns = ["frame_id", "agent_id", "pos_x", "pos_y"]

    # read from csv => fill traj
    raw_dataset = pd.read_csv(path, sep=" ", header=None, names=csv_columns)
    raw_dataset.replace('?', np.nan, inplace=True)
    raw_dataset.dropna(inplace=True)

    # FIXME: in the cases you load more than one file into a TrajDataset Object

    # rearrange columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get("scene_id", 0)

    # calculate velocities + perform some checks
    if 'stanford' in path:
        fps = 30
    elif 'crowd' in path or 'biwi' in path:
        fps = 16
    else:
        fps = 7
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #4
0
def load_crowds(path, **kwargs):
    """:param path: string, path to folder"""
    # pass the homography matrix as well

    homog_file = kwargs.get("homog_file", "")
    Homog = (
        np.loadtxt(homog_file)) if os.path.exists(homog_file) else np.eye(3)
    raw_dataset = pd.DataFrame()

    data = CrowdLoader(Homog).load(path)
    raw_dataset["frame_id"] = [data[i].frame for i in range(len(data))]
    raw_dataset["agent_id"] = [data[i].pedestrian for i in range(len(data))]
    raw_dataset["pos_x"] = [data[i].x for i in range(len(data))]
    raw_dataset["pos_y"] = [data[i].y for i in range(len(data))]

    traj_dataset = TrajDataset()

    traj_dataset.title = kwargs.get('title', "Crowds")
    # copy columns
    traj_dataset.data[["frame_id", "agent_id",  "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 25)

    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #5
0
def load_sdd_dir(path: str, **kwargs):
    search_filter_str = "**/annotations.txt"
    if not path.endswith("/"):
        search_filter_str = "/" + search_filter_str
    files_list = sorted(glob.glob(path + search_filter_str, recursive=True))
    scales_yaml_file = os.path.join(path, 'estimated_scales.yaml')
    with open(scales_yaml_file, 'r') as f:
        scales_yaml_content = yaml.load(f, Loader=yaml.FullLoader)

    partial_datasets = []
    for file in files_list:
        dir_names = file.split('/')
        scene_name = dir_names[-3]
        scene_video_id = dir_names[-2]
        scale = scales_yaml_content[scene_name][scene_video_id]['scale']

        partial_dataset = load_sdd(file,
                                   scale=scale,
                                   scene_id=scene_name +
                                   scene_video_id.replace('video', ''))
        partial_datasets.append(partial_dataset.data)

    traj_dataset = TrajDataset()
    traj_dataset.data = pd.concat(partial_datasets)

    fps = 30
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    return traj_dataset
예제 #6
0
def load_lcas(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = "LCAS"
    minerva_files_list = glob.glob(path + "/minerva/**/data.csv")
    minerva_columns = [
        'frame_id', 'person_id', 'pos_x', 'pos_y', 'rot_z', 'rot_w', 'scene_id'
    ]

    # read from minerva data.csv
    minerva_raw_dataset = []
    # This load data from all files
    for file in minerva_files_list:
        data = pd.read_csv(file, sep=",", header=None, names=minerva_columns)
        minerva_raw_dataset.append(data)
    minerva_raw_dataset = pd.concat(minerva_raw_dataset)
    minerva_raw_dataset['scene_id'] = 'minerva'

    minerva_raw_dataset.reset_index(inplace=True, drop=True)

    traj_dataset.title = kwargs.get('title', "LCAS")
    traj_dataset.data[["frame_id", "agent_id","pos_x", "pos_y","scene_id"]] = \
        minerva_raw_dataset[["frame_id", "person_id","pos_x","pos_y","scene_id"]]

    traj_dataset.data["label"] = "pedestrian"

    # post-process. For LCAS, raw data do not include velocity, velocity info is postprocessed
    fps = kwargs.get('fps', 2.5)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    return traj_dataset
예제 #7
0
def load_kitti(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = "KITTI"
    track_files_list = sorted(glob.glob(path + "/label/*.txt"))
    calib_files_list = sorted(glob.glob(path + "/calib/*.txt"))
    imu_files_list = sorted(glob.glob(path + "/oxts/*.txt"))

    #load track data, calibration data, IMU data from all scenes
    track_rawData = loadTrack(track_files_list)  #(left camera coordinate)
    calib_rawData = loadCalib(calib_files_list)
    imu_rawData = loadIMU(imu_files_list)

    #convert track data to world coordinate (imu coordinate in the first frame of that scene)
    track_world_pos = track_camToworld(track_rawData, calib_rawData,
                                       imu_rawData)

    track_rawData = pd.concat(track_rawData)
    track_rawData.reset_index(inplace=True, drop=True)

    traj_dataset.data[["frame_id", "agent_id", "label",
                       "scene_id"]] = track_rawData[[
                           "frame", "agent_id", "type", "scene"
                       ]]
    traj_dataset.data[["pos_x", "pos_y",
                       "pos_z"]] = track_world_pos[["pos_x", "pos_y", "pos_z"]]

    # post-process. For KITTI, raw data do not include velocity, velocity info is postprocessed
    fps = kwargs.get('fps', 10)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #8
0
def load_town_center(path, **kwargs):
    # Construct dataset
    traj_dataset = TrajDataset()

    # Note: we assume here that the path that is passed is the one to the tracks CSV.
    # Read the tracks
    raw_dataset = pd.read_csv(path, sep=",", header=0,
                              names=["personNumber", "frameNumber", "headValid", "bodyValid", "headLeft", "headTop",
                                     "headRight", "headBottom", "bodyLeft", "bodyTop", "bodyRight", "bodyBottom"])

    # Get bottom (feet) of bounding boxes
    raw_dataset["body_x"] = (raw_dataset["bodyLeft"] + raw_dataset["bodyRight"]) / 2.0
    raw_dataset["body_y"] = raw_dataset["bodyBottom"]

    raw_dataset["head_x"] = (raw_dataset["headLeft"] + raw_dataset["headRight"]) / 2.0
    raw_dataset["head_y"] = (raw_dataset["headTop"] + raw_dataset["headBottom"]) / 2.0

    # Required information
    raw_dataset["label"] = "pedestrian"

    # Read camera calibration
    calibration_path = kwargs.get('calib_path', 'none')
    rvec, tvec, cameraMatrix, distCoeffs =\
        read_projection_parameters(calibration_path)

    # Obtain real world coordinates from image
    pts = np.array([raw_dataset["body_x"], raw_dataset["body_y"]]).T
    objPts = obtainObjectPoints(pts, rvec, tvec,
                                cameraMatrix, distCoeffs)

    # Add object points to raw dataset
    raw_dataset['pos_x'] = objPts[:, 0]
    raw_dataset['pos_y'] = objPts[:, 1]
    raw_dataset['pos_z'] = objPts[:, 2]

    # Remove invalid body bounding boxes
    raw_dataset = raw_dataset[raw_dataset.bodyValid == 1]

    # Copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frameNumber", "personNumber", "pos_x", "pos_y"]]

    # FixMe: for debug
    traj_dataset.data[["body_x", "body_y"]] = \
        raw_dataset[["body_x", "body_y"]].astype(int)

    # Recording information
    traj_dataset.title = kwargs.get('title', "Town-Center")
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 25)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps, sampling_rate=sampling_rate, use_kalman=use_kalman)

    return traj_dataset
예제 #9
0
def load_wildtrack(path: str, **kwargs):
    """
    :param path: path to annotations dir
    :param kwargs:
    :return:
    """
    traj_dataset = TrajDataset()

    files_list = sorted(glob.glob(path + "/*.json"))
    raw_data = []
    for file_name in files_list:
        frame_id = int(os.path.basename(file_name).replace('.json', ''))

        with open(file_name, 'r') as json_file:
            json_content = json_file.read()
            annots_list = json.loads(json_content)
            for annot in annots_list:
                person_id = annot["personID"]
                position_id = annot["positionID"]

                X = -3.0 + 0.025 * (position_id % 480)
                Y = -9.0 + 0.025 * (position_id / 480)
                raw_data.append([frame_id, person_id, X, Y])

    csv_columns = ["frame_id", "agent_id", "pos_x", "pos_y"]
    raw_dataset = pd.DataFrame(np.array(raw_data), columns=csv_columns)

    traj_dataset.title = kwargs.get('title', "Grand Central")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 10)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #10
0
def load_sdd(path, **kwargs):
    sdd_dataset = TrajDataset()
    sdd_dataset.title = "SDD"

    csv_columns = [
        "agent_id", "x_min", "y_min", "x_max", "y_max", "frame_id", "lost",
        "occluded", "generated", "label"
    ]
    scale = kwargs.get("scale", 1)

    # read from csv => fill traj table
    raw_dataset = pd.read_csv(path, sep=" ", header=None, names=csv_columns)
    raw_dataset["pos_x"] = scale * (raw_dataset["x_min"] +
                                    raw_dataset["x_max"]) / 2
    raw_dataset["pos_y"] = scale * (raw_dataset["y_min"] +
                                    raw_dataset["y_max"]) / 2

    drop_lost_frames = kwargs.get('drop_lost_frames', False)
    if drop_lost_frames:
        raw_dataset = raw_dataset.loc[raw_dataset["lost"] != 1]

    # copy columns
    sdd_dataset.data[["frame_id", "agent_id",
                      "pos_x", "pos_y",
                      # "x_min", "y_min", "x_max", "y_max",
                      "label", "lost", "occluded", "generated"]] = \
        raw_dataset[["frame_id", "agent_id",
                     "pos_x", "pos_y",
                     # "x_min", "y_min", "x_max", "y_max",
                     "label", "lost", "occluded", "generated"]]
    sdd_dataset.data["scene_id"] = kwargs.get("scene_id", 0)

    # calculate velocities + perform some checks
    fps = 30
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    sdd_dataset.postprocess(fps=fps,
                            sampling_rate=sampling_rate,
                            use_kalman=use_kalman)

    return sdd_dataset
예제 #11
0
def load_eth(path, **kwargs):
    traj_dataset = TrajDataset()

    csv_columns = [
        "frame_id", "agent_id", "pos_x", "pos_z", "pos_y", "vel_x", "vel_z",
        "vel_y"
    ]
    # read from csv => fill traj table
    raw_dataset = pd.read_csv(path, sep=r"\s+", header=None, names=csv_columns)

    traj_dataset.title = kwargs.get('title', "no_title")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id",
                       "pos_x", "pos_y",
                       "vel_x", "vel_y"
                       ]] = \
        raw_dataset[["frame_id", "agent_id",
                     "pos_x", "pos_y",
                     "vel_x", "vel_y"
                     ]]

    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', -1)
    if fps < 0:
        d_frame = np.diff(pd.unique(raw_dataset["frame_id"]))
        fps = d_frame[
            0] * 2.5  # 2.5 is the common annotation fps for all (ETH+UCY) datasets

    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #12
0
def load_edinburgh(path, **kwargs):
    traj_dataset = TrajDataset()
    traj_dataset.title = "Edinburgh"

    if os.path.isdir(path):
        files_list = sorted(glob.glob(path + "/*.txt"))
    elif os.path.exists(path):
        files_list = [path]
    else:
        raise ValueError("loadEdinburgh: input file is invalid")

    csv_columns = ['centre_x', 'centre_y', 'frame', 'agent_id', 'length']

    # read from csv => fill traj table
    raw_dataset = []
    scene = []
    last_scene_frame = 0
    new_id = 0
    scale = 0.0247
    # load data from all files
    for file in files_list:
        data = pd.read_csv(file, sep="\n|=", header=None, index_col=None)
        data.reset_index(inplace=True)
        properties = data[data['index'].str.startswith('Properties')]
        data = data[data['index'].str.startswith('TRACK')]

        #reconstruct the data in arrays
        track_data = []
        print("reading:" + str(file))
        for row in range(len(data)):
            one_prop = properties.iloc[row, 1].split(";")
            one_prop.pop()
            one_prop = [
                ast.literal_eval(i.replace(' ', ',')) for i in one_prop
            ]
            track_length = one_prop[0][0]

            one_track = data.iloc[row, 1].split(";")
            one_track.pop()
            one_track[0] = one_track[0].replace('[[', '[')
            one_track[-1] = one_track[-1].replace(']]', ']')
            one_track = np.array([
                ast.literal_eval(i.replace(' [', '[').replace(' ', ','))
                for i in one_track
            ])
            one_track = np.c_[one_track,
                              np.ones(one_track.shape[0], dtype=int) * row,
                              track_length *
                              np.ones(one_track.shape[0], dtype=int)]
            track_data.extend(one_track)

        #clear repeated trajectories
        track_data_pd = pd.DataFrame(data=np.array(track_data),
                                     columns=csv_columns)

        clean_track = []
        for i in tqdm(track_data_pd.groupby('agent_id')):
            i[1].drop_duplicates(subset="frame", keep='first', inplace=True)
            # clean repeated trajectory for the same agent

            for j in i[1].groupby(['frame', 'centre_x', 'centre_y']):
                j[1].drop_duplicates(subset="frame",
                                     keep='first',
                                     inplace=True)
                clean_track.append(j[1])
        clean_track = np.concatenate(clean_track)

        #re-id
        uid = np.unique(clean_track[:, 3])
        ##added!!
        copy_id = deepcopy(clean_track[:, 3])

        for oneid in uid:
            oneid_idx = [idx for idx, x in enumerate(copy_id) if x == oneid]
            for j in oneid_idx:
                clean_track[j, 3] = new_id
            new_id += 1

        scene.extend([files_list.index(file)] * len(clean_track))

        raw_dataset.extend(clean_track.tolist())
    raw_dataset = pd.DataFrame(np.array(raw_dataset), columns=csv_columns)
    raw_dataset.reset_index(inplace=True, drop=True)

    #find homog matrix
    H = get_homog()
    #apply H matrix to the image point
    img_data = raw_dataset[["centre_x", "centre_y"]].values
    world_data = []
    for row in img_data:
        augImg_data = np.c_[[row], np.array([1])]
        world_data.append(np.matmul(H, augImg_data.reshape(3, 1)).tolist()[:2])

    raw_dataset["centre_x"] = np.array(world_data)[:, 0]
    raw_dataset["centre_y"] = np.array(world_data)[:, 1]

    traj_dataset.data[["frame_id", "agent_id", "pos_x",
                       "pos_y"]] = raw_dataset[[
                           "frame", "agent_id", "centre_x", "centre_y"
                       ]]
    traj_dataset.data["scene_id"] = kwargs.get("scene_id", scene)

    traj_dataset.data["label"] = "pedestrian"

    traj_dataset.title = kwargs.get('title', "Edinburgh")

    # post-process. For Edinburgh, raw data do not include velocity, velocity info is postprocessed
    fps = kwargs.get('fps', 9)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    print("finish")
    return traj_dataset
예제 #13
0
def load_pets(path, **kwargs):
    """
    :param path: address of annotation file
    :param kwargs:
    :param  calib_path: address of calibration file
    :return: TrajectoryDataset object
    """
    traj_dataset = TrajDataset()

    annot_xtree = et.parse(path)
    annot_xroot = annot_xtree.getroot()  # dataset

    cp, cc = None, None  # calibration parameters

    # load calibration
    calib_path = kwargs.get('calib_path', "")
    if calib_path:
        cp = CameraParameters()
        cc = CalibrationConstants()

        calib_xtree = et.parse(calib_path)
        calib_xroot = calib_xtree.getroot()  # Camera

        geometry_node = calib_xroot.find("Geometry")
        width = int(geometry_node.attrib["width"])
        height = int(geometry_node.attrib["height"])

        cp.Ncx = float(geometry_node.attrib["ncx"])
        cp.Nfx = float(geometry_node.attrib["nfx"])
        cp.dx = float(geometry_node.attrib["dx"])
        cp.dy = float(geometry_node.attrib["dy"])
        cp.dpx = float(geometry_node.attrib["dpx"])
        cp.dpy = float(geometry_node.attrib["dpy"])

        intrinsic_node = calib_xroot.find("Intrinsic")
        cc.f = float(intrinsic_node.attrib["focal"])
        cc.kappa1 = float(
            intrinsic_node.attrib["kappa1"])  # 1st order radial distortion

        cp.Cx = float(intrinsic_node.attrib["cx"])
        cp.Cy = float(intrinsic_node.attrib["cy"])
        cp.sx = float(intrinsic_node.attrib["sx"])

        extrinsic_node = calib_xroot.find("Extrinsic")
        cc.Tx = float(extrinsic_node.attrib["tx"])
        cc.Ty = float(extrinsic_node.attrib["ty"])
        cc.Tz = float(extrinsic_node.attrib["tz"])
        cc.Rx = float(extrinsic_node.attrib["rx"])
        cc.Ry = float(extrinsic_node.attrib["ry"])
        cc.Rz = float(extrinsic_node.attrib["rz"])

        cc.calc_rr()  # Calculate Rotation Matrix

    loaded_data = []  # frame_id, agent_id, pos_x, pos_y, xc, yc, h, w
    for frame_node in annot_xroot:
        objectlist_node = frame_node.find("objectlist")  # .text
        object_nodes = objectlist_node.findall("object")
        frame_id = int(frame_node.attrib.get("number"))

        for obj_node in object_nodes:
            agent_id = obj_node.attrib["id"]

            box_node = obj_node.find("box")
            xc = float(box_node.attrib["xc"])
            yc = float(box_node.attrib["yc"])
            h = float(box_node.attrib["h"])
            w = float(box_node.attrib["w"])

            x_ground = xc
            y_ground = yc + h / 2

            if cp:
                pos_x, pos_y = image_coord_to_world_coord(
                    x_ground, y_ground, 0, cp, cc)
            else:
                pos_x, pos_y = np.nan, np.nan

            loaded_data.append([
                frame_id, agent_id, pos_x / 1000., pos_y / 1000., xc, yc, h, w
            ])

    data_columns = [
        "frame_id", "agent_id", "pos_x", "pos_y", "xc", "yc", "h", "w"
    ]
    raw_dataset = pd.DataFrame(np.array(loaded_data), columns=data_columns)

    traj_dataset.title = kwargs.get('title', "PETS")

    # copy columns
    traj_dataset.data[["frame_id", "agent_id",
                       "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id",
                     "pos_x", "pos_y"]]
    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"

    # post-process
    fps = kwargs.get('fps', 7)
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    return traj_dataset
예제 #14
0
def load_gcs(path, **kwargs):
    traj_dataset = TrajDataset()
    raw_dataset = pd.DataFrame()

    file_list = sorted(os.listdir(path))
    raw_data_list = []  # the data to be converted into Pandas DataFrame

    selected_frames = kwargs.get("frames", range(0, 120001))
    agent_id_incremental = 0

    for annot_file in file_list:
        annot_file_full_path = os.path.join(path, annot_file)
        with open(annot_file_full_path, 'r') as f:
            annot_contents = f.read().split()

        agent_id = int(annot_file.replace('.txt', ''))
        agent_id_incremental += 1
        last_frame_id = -1

        for i in range(len(annot_contents) // 3):
            py = float(annot_contents[3 * i])
            px = float(annot_contents[3 * i + 1])
            frame_id = int(annot_contents[3 * i + 2])

            # there are trajectory files with non-continuous timestamps
            # they need to be counted as different agents
            if last_frame_id > 0 and (frame_id - last_frame_id) > 20:
                agent_id_incremental += 1
            last_frame_id = frame_id

            if selected_frames.start <= frame_id < selected_frames.stop:
                raw_data_list.append([frame_id, agent_id_incremental, px, py])

    csv_columns = ["frame_id", "agent_id", "pos_x", "pos_y"]
    raw_data_df = pd.DataFrame(np.stack(raw_data_list), columns=csv_columns)

    raw_data_df_groupby = raw_data_df.groupby("agent_id")
    trajs = [g for _, g in raw_data_df_groupby]

    tr0_ = trajs[0]
    tr1_ = trajs[1]

    for ii, tr in enumerate(trajs):
        if len(tr) < 2: continue
        # interpolate frames (2x up-sampling)
        interp_F = np.arange(tr["frame_id"].iloc[0], tr["frame_id"].iloc[-1],
                             10).astype(int)
        interp_X = interp1d(tr["frame_id"], tr["pos_x"], kind='linear')
        interp_X_ = interp_X(interp_F)
        interp_Y = interp1d(tr["frame_id"], tr["pos_y"], kind='linear')
        interp_Y_ = interp_Y(interp_F)
        agent_id = tr["agent_id"].iloc[0]
        print(agent_id)
        raw_dataset = raw_dataset.append(
            pd.DataFrame({
                "frame_id": interp_F,
                "agent_id": agent_id,
                "pos_x": interp_X_,
                "pos_y": interp_Y_
            }))
    raw_dataset = raw_dataset.reset_index()
    # homog = []
    # homog_file = kwargs.get("homog_file", "")
    # if os.path.exists(homog_file):
    #     with open(homog_file) as f:
    #         homog_str = f.read()
    #         homog = np.array(json.loads(homog_str)['homog'])
    # else:
    homog = [[4.97412897e-02, -4.24730883e-02, 7.25543911e+01],
             [1.45017874e-01, -3.35678711e-03, 7.97920970e+00],
             [1.36068797e-03, -4.98339188e-05, 1.00000000e+00]]
    # homog = np.eye(3)

    world_coords = image_to_world(raw_dataset[["pos_x", "pos_y"]].to_numpy(),
                                  homog)
    raw_dataset[["pos_x", "pos_y"]] = pd.DataFrame(world_coords * 0.8)

    # copy columns
    traj_dataset.data[["frame_id", "agent_id", "pos_x", "pos_y"]] = \
        raw_dataset[["frame_id", "agent_id", "pos_x", "pos_y"]]

    traj_dataset.title = kwargs.get('title', "Grand Central")
    traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
    traj_dataset.data["label"] = "pedestrian"
    fps = kwargs.get('fps', 25)

    # post-process
    fps = 30
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)

    # interpolate = kwargs.get('interpolate', False)
    # if interpolate:
    #     traj_dataset.interpolate_frames()

    return traj_dataset
예제 #15
0
def load_ind(path, **kwargs):
    traj_dataset = TrajDataset()
    # Note: we assume here that the path that is passed is the one to the tracks CSV.

    # Read the tracks
    raw_dataset = pd.read_csv(
        path,
        sep=",",
        header=0,
        names=[
            "recordingId", "trackId", "frame", "trackLifetime", "xCenter",
            "yCenter", "heading", "width", "length", "xVelocity", "yVelocity",
            "xAcceleration", "yAcceleration", "lonVelocity", "latVelocity",
            "lonAcceleration", "latAcceleration"
        ])

    # Read the recording data
    data_path = pathlib.Path(path)
    datadir_path = data_path.parent
    recording_path = str(datadir_path) + '/{:02d}_recordingMeta.csv'.format(
        raw_dataset['recordingId'][0])
    recording_data = pd.read_csv(recording_path,
                                 sep=",",
                                 header=0,
                                 names=[
                                     "recordingId", "locationId", "frameRate",
                                     "speedLimit", "weekday", "startTime",
                                     "duration", "numTracks", "numVehicles",
                                     "numVRUs", "latLocation", "lonLocation",
                                     "xUtmOrigin", "yUtmOrigin",
                                     "orthoPxToMeter"
                                 ])
    traj_dataset.title = kwargs.get('title', "inD")

    # Read the meta-tracks data
    tracks_path = str(datadir_path) + '/{:02d}_tracksMeta.csv'.format(
        raw_dataset['recordingId'][0])
    tracks_data = pd.read_csv(tracks_path,
                              sep=",",
                              header=0,
                              names=[
                                  "recordingId", "trackId", "initialFrame",
                                  "finalFrame", "numFrames", "width", "length",
                                  "class"
                              ])
    # Get the ids of pedestrians only
    ped_ids = tracks_data[tracks_data["class"] ==
                          "pedestrian"]["trackId"].values
    raw_dataset = raw_dataset[raw_dataset['trackId'].isin(ped_ids)]

    # Copy columns
    traj_dataset.data[["frame_id", "agent_id",
                      "pos_x", "pos_y", "vel_x", "vel_y"]] = \
        raw_dataset[["frame", "trackId",
                     "xCenter", "yCenter", "xVelocity", "yVelocity"]]

    traj_dataset.data["label"] = "pedestrian"
    scene_id = kwargs.get("scene_id", recording_data["locationId"][0])
    traj_dataset.data["scene_id"] = scene_id
    # print("location_id = ", recording_data["locationId"][0])

    # post-process
    fps = int(recording_data["frameRate"][0])  # fps = 25
    sampling_rate = kwargs.get('sampling_rate', 1)
    use_kalman = kwargs.get('use_kalman', False)
    traj_dataset.postprocess(fps=fps,
                             sampling_rate=sampling_rate,
                             use_kalman=use_kalman)
    return traj_dataset