Exemple #1
0
def write_label(transform, P2):
    for trackletObj in xmlParser.parseXML(os.path.join(tracklet_path, 'tracklet_labels.xml')):
        for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj:
            label_file = label_path + str(absoluteFrameNumber).zfill(10) + '.txt'
            image_file = image_path + str(absoluteFrameNumber).zfill(10) + '.png'
            img = cv2.imread(image_file)
            img_xmax, img_ymax = img.shape[1], img.shape[0]

            translation = np.append(translation, 1)
            translation = np.dot(transform, translation)
            translation = translation[:3]/translation[3]

            rot = -(rotation[2] + np.pi/2)
            if rot > np.pi:
                rot -= 2*np.pi
            elif rot < -np.pi:
                rot += 2*np.pi
            rot = round(rot, 2)

            local_rot = local_ori(translation, rot)


            bbox = obtain_2Dbox(trackletObj.size, translation, rot, P2, img_xmax, img_ymax)

            with open(label_file, 'a') as file_writer:
                line = [trackletObj.objectType] + [int(truncation),int(occlusion[0]),local_rot] + bbox + [round(size, 2) for size in trackletObj.size] \
                + [round(tran, 2) for tran in translation] + [rot]
                line = ' '.join([str(item) for item in line]) + '\n'
                file_writer.write(line)
def load_tracklets_for_frames(n_frames, xml_path):
    """
    Loads dataset labels also referred to as tracklets, saving them individually for each frame.

    Parameters
    ----------
    n_frames    : Number of frames in the dataset.
    xml_path    : Path to the tracklets XML.

    Returns
    -------
    Tuple of dictionaries with integer keys corresponding to absolute frame numbers and arrays as values. First array
    contains coordinates of bounding box vertices for each object in the frame, and the second array contains objects
    types as strings.
    """
    tracklets = xmlParser.parseXML(xml_path)

    frame_tracklets = {}
    frame_tracklets_types = {}
    frame_tracklets_yaw = {}
    for i in range(n_frames):
        frame_tracklets[i] = []
        frame_tracklets_types[i] = []
        frame_tracklets_yaw[i] = []

    # loop over tracklets
    for i, tracklet in enumerate(tracklets):
        # this part is inspired by kitti object development kit matlab code: computeBox3D
        h, w, l = tracklet.size
        # in velodyne coordinates around zero point and without orientation yet
        trackletBox = np.array([
            [-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
            [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
            [0.0, 0.0, 0.0, 0.0, h, h, h, h]
        ])
        # loop over all data in tracklet
        for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in tracklet:
            # determine if object is in the image; otherwise continue
            if truncation not in (xmlParser.TRUNC_IN_IMAGE, xmlParser.TRUNC_TRUNCATED):
                continue
            # re-create 3D bounding box in velodyne coordinate system
            yaw = rotation[2]  # other rotations are supposedly 0
            assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
            rotMat = np.array([
                [np.cos(yaw), -np.sin(yaw), 0.0],
                [np.sin(yaw), np.cos(yaw), 0.0],
                [0.0, 0.0, 1.0]
            ])
            cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T
            frame_tracklets[absoluteFrameNumber] = frame_tracklets[absoluteFrameNumber] + [cornerPosInVelo]
            frame_tracklets_types[absoluteFrameNumber] = frame_tracklets_types[absoluteFrameNumber] + [
                tracklet.objectType]
            frame_tracklets_yaw[absoluteFrameNumber] = frame_tracklets_yaw[absoluteFrameNumber] + [yaw]

    return (frame_tracklets, frame_tracklets_types, frame_tracklets_yaw)
Exemple #3
0
def read_label_video(label_file, calib):
    tracklets = parseXML(trackletFile=label_file)
    frame_dict_2d = defaultdict(list)
    frame_dict_3d = defaultdict(list)
    frame_dict_obj = defaultdict(list)
    frame_dict_id = defaultdict(list)
    for iTracklet, tracklet in enumerate(tracklets):
        # print('tracklet {0: 3d}: {1}'.format(iTracklet, tracklet))

        h, w, l = tracklet.size
        trackletBox = np.array([  # in velodyne coordinates around zero point and without orientation yet\
            [-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \
            [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \
            [0.0, 0.0, 0.0, 0.0, h, h, h, h]])

        for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber \
                in tracklet:
            # if truncation not in (TRUNC_IN_IMAGE, TRUNC_TRUNCATED):
            #    continue
            # re-create 3D bounding box in velodyne coordinate system
            yaw = rotation[
                2]  # other rotations are 0 in all xml files I checked
            assert np.abs(rotation[:2]).sum(
            ) == 0, 'object rotations other than yaw given!'
            rotMat = np.array([ \
                [np.cos(yaw), -np.sin(yaw), 0.0], \
                [np.sin(yaw), np.cos(yaw), 0.0], \
                [0.0, 0.0, 1.0]])
            cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(
                translation, (8, 1)).T

            # calc yaw as seen from the camera (i.e. 0 degree = facing away from cam), as opposed to
            #   car-centered yaw (i.e. 0 degree = same orientation as car).
            #   makes quite a difference for objects in periphery!
            # Result is in [0, 2pi]
            x, y, z = translation
            yawVisual = (yaw - np.arctan2(y, x)) % (2. * np.pi)
            frame_dict_3d[absoluteFrameNumber].append(cornerPosInVelo.T)
            cornerPosInImg = calib.project_velo_to_image(cornerPosInVelo.T)

    return frame_dict_2d, frame_dict_3d, frame_dict_obj, frame_dict_id
Exemple #4
0
def load_tracklets(n_frames, xml_path):
    tracklets = xmlParser.parseXML(xml_path)

    frame_tracklets = {}
    frame_tracklets_types = {}
    for i in range(n_frames):
        frame_tracklets[i] = []
        frame_tracklets_types[i] = []

    # loop over tracklets
    for i, tracklet in enumerate(tracklets):
        # this part is inspired by kitti object development kit matlab code: computeBox3D
        h, w, l = tracklet.size
        # in velodyne coordinates around zero point and without orientation yet
        trackletBox = np.array(
            [[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
             [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
             [0.0, 0.0, 0.0, 0.0, h, h, h, h]])
        # loop over all data in tracklet
        for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in tracklet:
            # determine if object is in the image; otherwise continue
            if truncation not in (xmlParser.TRUNC_IN_IMAGE,
                                  xmlParser.TRUNC_TRUNCATED):
                continue
            # re-create 3D bounding box in velodyne coordinate system
            yaw = rotation[2]  # other rotations are supposedly 0
            assert np.abs(rotation[:2]).sum(
            ) == 0, 'object rotations other than yaw given!'
            rotMat = np.array([[np.cos(yaw), -np.sin(yaw), 0.0],
                               [np.sin(yaw), np.cos(yaw), 0.0],
                               [0.0, 0.0, 1.0]])
            cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(
                translation, (8, 1)).T
            frame_tracklets[absoluteFrameNumber] = frame_tracklets[
                absoluteFrameNumber] + [cornerPosInVelo]
            frame_tracklets_types[absoluteFrameNumber] = frame_tracklets_types[
                absoluteFrameNumber] + [tracklet.objectType]

    return (frame_tracklets, frame_tracklets_types)
Exemple #5
0
from parseTrackletXML import parseXML
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import numpy as np

dataDir = "/nh/compneuro/Data/KITTI/2011_09_26/2011_09_26_drive_0001_sync/"
trackFile = dataDir + "tracklet_labels.xml"
imageDir = dataDir + "image_02/data/"

for trackletObj in parseXML(trackFile):
    [h, w, l] = trackletObj.size
    for (
        translation,
        rotation,
        state,
        occlusion,
        truncation,
        amtOcclusion,
        amtBorders,
        absoluteFrameNumber,
    ) in trackletObj:

        imageFile = imageDir + str(absoluteFrameNumber).rjust(10, "0") + ".png"
        img = mpimg.imread(imageFile)
        [x, y, z] = translation
        # Draw box
        img[y : y + w, x, 0] = 1
        img[y : y + w, x, 1] = 0
        img[y : y + w, x, 2] = 0
        img[y : y + w, x + h, 0] = 1
Exemple #6
0
from parseTrackletXML import parseXML
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import numpy as np

dataDir = "/nh/compneuro/Data/KITTI/2011_09_26/2011_09_26_drive_0001_sync/"
trackFile = dataDir + "tracklet_labels.xml"
imageDir = dataDir + "image_02/data/"

for trackletObj in parseXML(trackFile):
    [h, w, l] = trackletObj.size
    for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj:


        imageFile = imageDir + str(absoluteFrameNumber).rjust(10, '0') + ".png"
        img = mpimg.imread(imageFile)
        [x, y, z] = translation
        #Draw box
        img[y:y+w,x,0] = 1
        img[y:y+w,x,1] = 0
        img[y:y+w,x,2] = 0
        img[y:y+w,x+h,0] = 1
        img[y:y+w,x+h,1] = 0
        img[y:y+w,x+h,2] = 0
        img[y,x:x+h,0] = 1
        img[y,x:x+h,1] = 0
        img[y,x:x+h,2] = 0
        img[y+w,x:x+h,0] = 1
        img[y+w,x:x+h,1] = 0
        img[y+w,x:x+h,2] = 0
Exemple #7
0
def run_kitti2bag():
    parser = argparse.ArgumentParser(
        description="Convert KITTI dataset to ROS bag file the easy way!")
    # Accepted argument values
    kitti_types = ["raw_synced", "odom_color", "odom_gray"]
    odometry_sequences = []
    for s in range(22):
        odometry_sequences.append(str(s).zfill(2))

    parser.add_argument("kitti_type",
                        choices=kitti_types,
                        help="KITTI dataset type")
    parser.add_argument(
        "dir",
        nargs="?",
        default=os.getcwd(),
        help=
        "base directory of the dataset, if no directory passed the deafult is current working directory"
    )
    parser.add_argument(
        "-t",
        "--date",
        help=
        "date of the raw dataset (i.e. 2011_09_26), option is only for RAW datasets."
    )
    parser.add_argument(
        "-r",
        "--drive",
        help=
        "drive number of the raw dataset (i.e. 0001), option is only for RAW datasets."
    )
    parser.add_argument(
        "-s",
        "--sequence",
        choices=odometry_sequences,
        help=
        "sequence of the odometry dataset (between 00 - 21), option is only for ODOMETRY datasets."
    )
    args = parser.parse_args()

    bridge = CvBridge()
    compression = rosbag.Compression.NONE
    # compression = rosbag.Compression.BZ2
    # compression = rosbag.Compression.LZ4

    # CAMERAS
    cameras = [(0, 'camera_gray_left', '/kitti/camera_gray_left'),
               (1, 'camera_gray_right', '/kitti/camera_gray_right'),
               (2, 'camera_color_left', '/kitti/camera_color_left'),
               (3, 'camera_color_right', '/kitti/camera_color_right')]

    if args.kitti_type.find("raw") != -1:

        if args.date == None:
            print("Date option is not given. It is mandatory for raw dataset.")
            print(
                "Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>"
            )
            sys.exit(1)
        elif args.drive == None:
            print(
                "Drive option is not given. It is mandatory for raw dataset.")
            print(
                "Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>"
            )
            sys.exit(1)

        bag = rosbag.Bag("kitti_{}_drive_{}_{}.bag".format(
            args.date, args.drive, args.kitti_type[4:]),
                         'w',
                         compression=compression)
        kitti = pykitti.raw(args.dir, args.date, args.drive)
        if not os.path.exists(kitti.data_path):
            print('Path {} does not exists. Exiting.'.format(kitti.data_path))
            sys.exit(1)

        if len(kitti.timestamps) == 0:
            print('Dataset is empty? Exiting.')
            sys.exit(1)

        try:
            # IMU
            imu_frame_id = 'imu_link'
            imu_topic = '/kitti/oxts/imu'
            gps_fix_topic = '/kitti/oxts/gps/fix'
            gps_vel_topic = '/kitti/oxts/gps/vel'
            velo_frame_id = 'velo_link'
            velo_topic = '/kitti/velo'
            tracklet_topic = '/kitti/tracklet'

            T_base_link_to_imu = np.eye(4, 4)
            T_base_link_to_imu[0:3, 3] = [-2.71 / 2.0 - 0.05, 0.32, 0.93]

            # tf_static
            transforms = [
                ('base_link', imu_frame_id, T_base_link_to_imu),
                (imu_frame_id, velo_frame_id, inv(kitti.calib.T_velo_imu)),
                (imu_frame_id, cameras[0][1], inv(kitti.calib.T_cam0_imu)),
                (imu_frame_id, cameras[1][1], inv(kitti.calib.T_cam1_imu)),
                (imu_frame_id, cameras[2][1], inv(kitti.calib.T_cam2_imu)),
                (imu_frame_id, cameras[3][1], inv(kitti.calib.T_cam3_imu))
            ]
            util = pykitti.utils.read_calib_file(
                os.path.join(kitti.calib_path, 'calib_cam_to_cam.txt'))
            # Export
            save_static_transforms(bag, transforms, kitti.timestamps)
            save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=None)
            save_imu_data(bag, kitti, imu_frame_id, imu_topic)
            save_gps_fix_data(bag, kitti, imu_frame_id, gps_fix_topic)
            save_gps_vel_data(bag, kitti, imu_frame_id, gps_vel_topic)
            for camera in cameras:
                save_camera_data(bag,
                                 args.kitti_type,
                                 kitti,
                                 util,
                                 bridge,
                                 camera=camera[0],
                                 camera_frame_id=camera[1],
                                 topic=camera[2],
                                 initial_time=None)
            save_velo_data(bag, kitti, velo_frame_id, velo_topic)
            tracklet_labels = os.path.join(kitti.data_path,
                                           "tracklet_labels.xml")
            if os.path.exists(tracklet_labels):
                tracklets = parseTrackletXML.parseXML(tracklet_labels)
                save_tracklet_data(bag, tracklets, kitti.timestamps,
                                   tracklet_topic)

        finally:
            bag.flush()
            print("## OVERVIEW ##")
            print(bag)
            bag.close()

    elif args.kitti_type.find("odom") != -1:

        if args.sequence == None:
            print(
                "Sequence option is not given. It is mandatory for odometry dataset."
            )
            print(
                "Usage for odometry dataset: kitti2bag {odom_color, odom_gray} [dir] -s <sequence>"
            )
            sys.exit(1)

        bag = rosbag.Bag("kitti_data_odometry_{}_sequence_{}.bag".format(
            args.kitti_type[5:], args.sequence),
                         'w',
                         compression=compression)

        kitti = pykitti.odometry(args.dir, args.sequence)
        if not os.path.exists(kitti.sequence_path):
            print('Path {} does not exists. Exiting.'.format(
                kitti.sequence_path))
            sys.exit(1)

        kitti.load_calib()
        kitti.load_timestamps()

        if len(kitti.timestamps) == 0:
            print('Dataset is empty? Exiting.')
            sys.exit(1)

        if args.sequence in odometry_sequences[:11]:
            print(
                "Odometry dataset sequence {} has ground truth information (poses)."
                .format(args.sequence))
            kitti.load_poses()

        try:
            util = pykitti.utils.read_calib_file(
                os.path.join(args.dir, 'sequences', args.sequence,
                             'calib.txt'))
            current_epoch = (datetime.utcnow() -
                             datetime(1970, 1, 1)).total_seconds()
            # Export
            if args.kitti_type.find("gray") != -1:
                used_cameras = cameras[:2]
            elif args.kitti_type.find("color") != -1:
                used_cameras = cameras[-2:]

            save_dynamic_tf(bag,
                            kitti,
                            args.kitti_type,
                            initial_time=current_epoch)
            for camera in used_cameras:
                save_camera_data(bag,
                                 args.kitti_type,
                                 kitti,
                                 util,
                                 bridge,
                                 camera=camera[0],
                                 camera_frame_id=camera[1],
                                 topic=camera[2],
                                 initial_time=current_epoch)

        finally:
            print("## OVERVIEW ##")
            print(bag)
            bag.close()
def method(dir_path,label_path,way):
    data = xmlParser.parseXML(dir_path)
    for IT,it in enumerate(data):
        h,w,l = it.size
        label = it.objectType
        for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in it:
            id = absoluteFrameNumber
            print( id)
            x,y,z =translation
            id= '%03d'% id
            print( id)

            path = label_path+'/0000000'+str(id)+'.txt'
            #print path
            if way=='dir':
                if os.path.exists(path):
                    print ('pass')
                else:
                    os.mknod(path)
            else:
                    file=open(path,'a')
                    print (label)
                    file.write(label)
                    file.write(' ')

                    file.write('1')
                    file.write(' ')

                    file.write('2')
                    file.write(' ')

                    file.write('3')
                    file.write(' ')

                    file.write('4')
                    file.write(' ')

                    file.write('5')
                    file.write(' ')

                    file.write('6')
                    file.write(' ')

                    file.write('7')
                    file.write(' ')

                    file.write(str(h))
                    file.write(' ')
                    file.write(str(w))
                    file.write(' ')

                    file.write(str(l))
                    file.write(' ')

                    file.write(str(x))
                    file.write(' ')

                    file.write(str(y))
                    file.write(' ')

                    file.write(str(z))
                    file.write(' ')

                    file.write(str(rotation[2]))
                    file.write('\n')
                    file.close()