コード例 #1
0
from point_viz.converter import PointvizConverter

DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training"
# MODEL_ROOT = "./logs_Car_Pedestrian_Original_2"
MODEL_ROOT = "./logs"

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

if __name__ == "__main__":

    # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing"
    save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/prediction"
    # Initialize and setup output directory.
    Converter = PointvizConverter(save_viz_path)

    params = Parameters()
    pillar_net = build_point_pillar_graph(params)
    pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5"))
    # pillar_net.summary()

    data_reader = KittiDataReader()

    lidar_files = sorted(glob(os.path.join(DATA_ROOT, "velodyne",
                                           "*.bin")))[:100]
    print(len(lidar_files))
    print()
    label_files = sorted(glob(os.path.join(DATA_ROOT, "label_2",
                                           "*.txt")))[:100]
    calibration_files = sorted(glob(os.path.join(DATA_ROOT, "calib",
コード例 #2
0
ファイル: processors.py プロジェクト: zq07075335/PointPillars
    def __getitem__(self, batch_id: int):
        file_ids = np.arange(batch_id * self.batch_size, self.batch_size * (batch_id + 1))
        #         print("inside getitem")
        pillars = []
        voxels = []
        occupancy = []
        position = []
        size = []
        angle = []
        heading = []
        classification = []
        pts_input = []
        gt_boxes3d = []

        save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/original_processor"
        # Initialize and setup output directory.
        Converter = PointvizConverter(save_viz_path)

        for i in file_ids:
            lidar = self.data_reader.read_lidar(self.lidar_files[i])


            Converter.compile("transform_sample_{}".format(i), coors=lidar[:,:3], intensity=lidar[:,3])

            # For each file, dividing the space into a x-y grid to create pillars
            # Voxels are the pillar ids
            pillars_, voxels_ = self.make_point_pillars(lidar)

            # print(pillars_.shape, voxels_.shape)
            pillars.append(pillars_)
            voxels.append(voxels_)

            if self.label_files is not None:
                label = self.data_reader.read_label(self.label_files[i])
                R, t = self.data_reader.read_calibration(self.calibration_files[i])
                # Labels are transformed into the lidar coordinate bounding boxes
                # Label has 7 values, centroid, dimensions and yaw value.
                label_transformed = self.transform_labels_into_lidar_coordinates(label, R, t)

                # These definitions can be found in point_pillars.cpp file
                # We are splitting a 10 dim vector that contains this information.
                occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth(
                    label_transformed)

                occupancy.append(occupancy_)
                position.append(position_)
                size.append(size_)
                angle.append(angle_)
                heading.append(heading_)
                classification.append(classification_)
                pts_input.append(lidar)
                gt_boxes3d.append(label_transformed)

        pillars = np.concatenate(pillars, axis=0)
        voxels = np.concatenate(voxels, axis=0)

        if self.label_files is not None:
            occupancy = np.array(occupancy)
            position = np.array(position)
            size = np.array(size)
            angle = np.array(angle)
            heading = np.array(heading)
            classification = np.array(classification)
            return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input, gt_boxes3d]
        else:
            return [pillars, voxels]