Esempio n. 1
0
    def load_img_with_targets(self, index):
        """Load images and targets for the training and validation phase"""
        sample_id = int(self.sample_id_list[index])
        img_path = os.path.join(self.image_dir, '{:06d}.png'.format(sample_id))
        lidarData = self.get_lidar(sample_id)
        calib = self.get_calib(sample_id)
        labels, has_labels = self.get_label(sample_id)
        if has_labels:
            labels[:, 1:] = transformation.camera_to_lidar_box(
                labels[:, 1:], calib.V2C, calib.R0, calib.P2)

        if self.lidar_aug:
            lidarData, labels[:, 1:] = self.lidar_aug(lidarData, labels[:, 1:])

        lidarData, labels = get_filtered_lidar(lidarData, cnf.boundary, labels)

        bev_map = makeBEVMap(lidarData, cnf.boundary)
        bev_map = torch.from_numpy(bev_map)

        hflipped = False
        if np.random.random() < self.hflip_prob:
            hflipped = True
            # C, H, W
            bev_map = torch.flip(bev_map, [-1])

        targets = self.build_targets(labels, hflipped)

        metadatas = {'img_path': img_path, 'hflipped': hflipped}

        return metadatas, bev_map, targets
Esempio n. 2
0
    def load_bevmap_front_vs_back(self, index):
        """Load only image for the testing phase"""
        sample_id = int(self.sample_id_list[index])
        img_path, img_rgb = self.get_image(sample_id)
        lidarData = self.get_lidar(sample_id)

        front_lidar = get_filtered_lidar(lidarData, cnf.boundary)
        front_bevmap = makeBEVMap(front_lidar, cnf.boundary)
        front_bevmap = torch.from_numpy(front_bevmap)

        back_lidar = get_filtered_lidar(lidarData, cnf.boundary_back)
        back_bevmap = makeBEVMap(back_lidar, cnf.boundary_back)
        back_bevmap = torch.from_numpy(back_bevmap)

        metadatas = {
            'img_path': img_path,
        }

        return metadatas, front_bevmap, back_bevmap, img_rgb
Esempio n. 3
0
    def load_img_only(self, index):
        """Load only image for the testing phase"""
        sample_id = int(self.sample_id_list[index])
        img_path, img_rgb = self.get_image(sample_id)
        lidarData = self.get_lidar(sample_id)
        lidarData = get_filtered_lidar(lidarData, cnf.boundary)
        bev_map = makeBEVMap(lidarData, cnf.boundary)
        bev_map = torch.from_numpy(bev_map)

        metadatas = {
            'img_path': img_path,
        }

        return metadatas, bev_map, img_rgb
Esempio n. 4
0
    def draw_img_with_label(self, index):
        sample_id = int(self.sample_id_list[index])
        img_path, img_rgb = self.get_image(sample_id)
        lidarData = self.get_lidar(sample_id)
        calib = self.get_calib(sample_id)
        labels, has_labels = self.get_label(sample_id)
        if has_labels:
            labels[:, 1:] = transformation.camera_to_lidar_box(
                labels[:, 1:], calib.V2C, calib.R0, calib.P2)

        if self.lidar_aug:
            lidarData, labels[:, 1:] = self.lidar_aug(lidarData, labels[:, 1:])

        lidarData, labels = get_filtered_lidar(lidarData, cnf.boundary, labels)
        bev_map = makeBEVMap(lidarData, cnf.boundary)

        return bev_map, labels, img_rgb, img_path
Esempio n. 5
0
def on_scan(scan):
    start = timeit.default_timer()
    rospy.loginfo("Got scan")
    gen = []
    for p in pc2.read_points(scan,
                             field_names=("x", "y", "z", "intensity"),
                             skip_nans=True):
        gen.append(np.array([p[0], p[1], p[2], p[3] / 100.0]))
    gen_numpy = np.array(gen, dtype=np.float32)

    front_lidar = get_filtered_lidar(gen_numpy, cnf.boundary)
    bev_map = makeBEVMap(front_lidar, cnf.boundary)
    bev_map = torch.from_numpy(bev_map)

    with torch.no_grad():
        detections, bev_map, fps = do_detect(configs,
                                             model,
                                             bev_map,
                                             is_front=True)
    print(fps)
    objects_msg = DetectedObjectArray()
    objects_msg.header.stamp = rospy.Time.now()
    objects_msg.header.frame_id = scan.header.frame_id
    flag = False
    for j in range(configs.num_classes):
        class_name = ID_TO_CLASS_NAME[j]

        if len(detections[j]) > 0:
            flag = True
            for det in detections[j]:
                _score, _x, _y, _z, _h, _w, _l, _yaw = det
                yaw = -_yaw
                x = _y / cnf.BEV_HEIGHT * cnf.bound_size_x + cnf.boundary[
                    'minX']
                y = _x / cnf.BEV_WIDTH * cnf.bound_size_y + cnf.boundary['minY']
                z = _z + cnf.boundary['minZ']
                w = _w / cnf.BEV_WIDTH * cnf.bound_size_y
                l = _l / cnf.BEV_HEIGHT * cnf.bound_size_x
                obj = DetectedObject()
                obj.header.stamp = rospy.Time.now()
                obj.header.frame_id = scan.header.frame_id

                obj.score = 0.9
                obj.pose_reliable = True

                obj.space_frame = scan.header.frame_id
                obj.label = class_name
                obj.score = _score
                obj.pose.position.x = x
                obj.pose.position.y = y
                obj.pose.position.z = z
                [qx, qy, qz, qw] = euler_to_quaternion(yaw, 0, 0)
                obj.pose.orientation.x = qx
                obj.pose.orientation.y = qy
                obj.pose.orientation.z = qz
                obj.pose.orientation.w = qw

                obj.dimensions.x = l
                obj.dimensions.y = w
                obj.dimensions.z = _h
                objects_msg.objects.append(obj)
    if flag is True:
        pub.publish(objects_msg)

    stop = timeit.default_timer()
    print('Time: ', stop - start)