Esempio n. 1
0
    def parse_ground_truth(self):
        camera_mat = transform_to_matrix(self.sensor_camera.transform)
        ego_mat = transform_to_matrix(self.ego_state.transform)
        tf_mat = np.dot(np.linalg.inv(ego_mat), np.linalg.inv(camera_mat))

        labels = []
        for npc, npc_state in zip(self.npcs, self.npcs_state):
            npc_tf = self.get_npc_tf_in_cam_space(npc_state.transform, tf_mat)

            location = self.get_location(npc_tf)
            rotation_y = self.get_rotation_y(npc_tf)
            height, width, length = self.get_dimension(npc.bounding_box)
            alpha = self.get_alpha(location, rotation_y)

            corners_3D = self.get_corners_3D(location, rotation_y,
                                             (height, width, length))
            corners_2D = self.project_3D_to_2D(corners_3D)

            p_min, p_max = corners_2D[:, 0], corners_2D[:, 0]
            for i in range(corners_2D.shape[1]):
                p_min = np.minimum(p_min, corners_2D[:, i])
                p_max = np.maximum(p_max, corners_2D[:, i])

            left, top = p_min
            right, bottom = p_max

            label = "Car -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(
                alpha, left, top, right, bottom, height, width, length,
                location[0], location[1], location[2], rotation_y)
            labels.append(label)

        return labels
Esempio n. 2
0
    def is_npc_obscured(self, npc_transform):
        lidar_mat = np.dot(transform_to_matrix(self.sensor_lidar.transform),
                           transform_to_matrix(self.ego_state.transform))
        start = lgsvl.Vector(
            lidar_mat[3][0],
            lidar_mat[3][1],
            lidar_mat[3][2],
        )
        end = npc_transform.position
        direction = lgsvl.Vector(
            end.x - start.x,
            end.y - start.y,
            end.z - start.z,
        )
        distance = np.linalg.norm(
            np.array([direction.x, direction.y, direction.z]))
        layer_mask = 0
        for bit in [0]:
            layer_mask |= 1 << bit

        hit = self.sim.raycast(start, direction, layer_mask, distance)
        if hit:
            return True

        return False
Esempio n. 3
0
    def get_transform(self, parent_tf, child_tf):
        tf = np.dot(transform_to_matrix(child_tf),
                    np.linalg.inv(transform_to_matrix(parent_tf)))
        tf = np.array(tf)
        tf[:, 3] = tf[3, :]
        tf = tf[:3, :]
        tf_flatten = tf.flatten()

        return tf_flatten
Esempio n. 4
0
    def get_transform(self, parent_tf, child_tf):
        """Get transformation matrix between parent and child"""
        tf = np.dot(transform_to_matrix(child_tf), np.linalg.inv(transform_to_matrix(parent_tf)))
        tf = np.array(tf)
        tf[:, 3] = tf[3, :]
        tf = tf[:3, :]
        tf_flatten = tf.flatten()

        return tf_flatten
Esempio n. 5
0
def parse_ground_truth(self):
    camera_mat = transform_to_matrix(self.sensor_camera.transform)
    ego_mat = transform_to_matrix(self.ego_state.transform)
    tf_mat = np.dot(np.linalg.inv(ego_mat), np.linalg.inv(camera_mat))

    labels = []
    for npc, npc_state in zip(self.npcs, self.npcs_state):

        # print("a", npc.transform)
        # print("b", npc_state.transform)

        # npc_tf = self.get_npc_tf_in_cam_space(npc_state.transform, tf_mat)
        npc_tf = self.get_npc_tf_in_cam_space(npc.transform, tf_mat)

        location = self.get_location(npc_tf)
        rotation_y = self.get_rotation_y(npc_tf)
        height, width, length = self.get_dimension(npc.bounding_box)
        alpha = self.get_alpha(location, rotation_y)

        corners_3D = self.get_corners_3D(location, rotation_y,
                                         (height, width, length))
        corners_2D = self.project_3D_to_2D(corners_3D)

        p_min, p_max = corners_2D[:, 0], corners_2D[:, 0]
        for i in range(corners_2D.shape[1]):
            p_min = np.minimum(p_min, corners_2D[:, i])
            p_max = np.maximum(p_max, corners_2D[:, i])

        left, top = p_min
        right, bottom = p_max

        if npc.name in [
                'Sedan', 'SUV', 'Jeep', 'Hatchback', 'BoxTruck', 'SchoolBus'
        ]:
            label = "Car -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}" \
                        .format(alpha, left, top, right, bottom, height, width, length, location[0], location[1], location[2], rotation_y)

        else:
            # if height > 10.0:
            #     height = 1.70

            label = "Pedestrian -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}" \
                        .format(alpha, left, top, right, bottom, height, width, length, location[0], location[1], location[2], rotation_y)

            print("{}: {}".format(npc.name, height))

        labels.append(label)

    return labels
Esempio n. 6
0
    def get_npc_tf_in_cam_space(self, npc_transform, tf_mat):
        npc_tf = np.dot(transform_to_matrix(npc_transform), tf_mat)

        return npc_tf
Esempio n. 7
0
#!/usr/bin/env python3
#
# Copyright (c) 2019 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#

from lgsvl import Transform, Vector
from lgsvl.utils import (
    transform_to_matrix,
    matrix_inverse,
    matrix_multiply,
    vector_multiply,
)

# global "world" transform (for example, car position)
world = Transform(Vector(10, 20, 30), Vector(11, 22, 33))

# "local" transform, relative to world transform (for example, Lidar sensor)
local = Transform(Vector(0, 2, -0.5), Vector(0, 0, 0))

# combine them in one transform matrix
mtx = matrix_multiply(transform_to_matrix(local), transform_to_matrix(world))

# compute inverse for transforming points
mtx = matrix_inverse(mtx)

# now transform some point from world position into coordinate system of local transform of Lidar
pt = vector_multiply(Vector(15, 20, 30), mtx)
print(pt)
Esempio n. 8
0
    def get_npc_tf_in_cam_space(self, npc_transform, tf_mat):
        """Converts the world space position of the NPC into the EGO camera space."""
        npc_tf = np.dot(transform_to_matrix(npc_transform), tf_mat)

        return npc_tf