Example #1
0
    return val - np.floor(val / period + offset) * period


if __name__ == "__main__":

    params = Parameters()
    pillar_net = build_point_pillar_graph(params)
    pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5"))
    pillar_net.summary()

    # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing"
    save_viz_path = os.path.join(
        "/home/tan/tjtanaa/PointPillars/visualization",
        MODEL_ROOT.split('/')[-1])
    # Initialize and setup output directory.
    Converter = PointvizConverter(save_viz_path)

    gt_database_dir = os.path.join(DATA_ROOT, "gt_database")

    # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT,
    #                 npoints=20000, split='train',   classes=list(params.classes_map.keys()),
    #                 random_select=True, gt_database_dir=None, aug_hard_ratio=0.7)

    validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,
                                                root_dir=DATA_ROOT,
                                                npoints=20000,
                                                split='train_val_test',
                                                random_select=False,
                                                classes=list(
                                                    params.classes_map.keys()))
    # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,  root_dir=DATA_ROOT,
Example #2
0
from point_viz.converter import PointvizConverter

DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training"
# MODEL_ROOT = "./logs_Car_Pedestrian_Original_2"
MODEL_ROOT = "./logs"

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

if __name__ == "__main__":

    # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing"
    save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/prediction"
    # Initialize and setup output directory.
    Converter = PointvizConverter(save_viz_path)

    params = Parameters()
    pillar_net = build_point_pillar_graph(params)
    pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5"))
    # pillar_net.summary()

    data_reader = KittiDataReader()

    lidar_files = sorted(glob(os.path.join(DATA_ROOT, "velodyne",
                                           "*.bin")))[:100]
    print(len(lidar_files))
    print()
    label_files = sorted(glob(os.path.join(DATA_ROOT, "label_2",
                                           "*.txt")))[:100]
    calibration_files = sorted(glob(os.path.join(DATA_ROOT, "calib",
Example #3
0
from point_viz.converter import PointvizConverter

if __name__ == "__main__":
    # Path to the kitti dataset
    dataset_path = '/media/data3/tjtanaa/kitti_dataset'
    database_path = os.path.join(dataset_path, "gt_database")
    # gt_database_dir = os.path.join(database_path, "train_gt_database_level_Car.pkl")
    aug_dataset = PCKittiSingleStagePointwiseDataset(root_dir=dataset_path, split='train', 
                npoints =16384,
                classes =['Car'], random_select =True,
                gt_database_dir=database_path, aug_hard_ratio=0.7)

    print("============ Grab Aug Sample =====================")
    save_viz_path = "/home/tan/tjtanaa/det3d/demos/pc_kitti_single_stage_pointwise_dataset"
    # Initialize and setup output directory.
    Converter = PointvizConverter(save_viz_path)
    for i in range(20):
        sample_info = aug_dataset.get_rpn_sample(i) 
        # sample_info['pts_input'] = pts_input
        # sample_info['pts_rect'] = aug_pts_rect
        # sample_info['pts_features'] = ret_pts_features
        # sample_info['rpn_cls_label'] = rpn_cls_label
        # sample_info['rpn_reg_label'] = rpn_reg_label
        # sample_info['gt_boxes3d'] = aug_gt_boxes3d
        # Pass data and create html files.
        sample_info['pts_rect'][:,1] *= -1 # mirror the y axis
        coors = sample_info['pts_rect']
        bbox_params = np.stack([sample_info['gt_boxes3d'][:,5], sample_info['gt_boxes3d'][:,3], sample_info['gt_boxes3d'][:,4],
                                sample_info['gt_boxes3d'][:,0], -(sample_info['gt_boxes3d'][:,1] - sample_info['gt_boxes3d'][:,3] / 2) , 
                                sample_info['gt_boxes3d'][:,2],
                                sample_info['gt_boxes3d'][:,6]], axis=1)
# parser = argparse.ArgumentParser(description='Analayse the kitti dataset dimensions:\
#     mode: 0 := generate the numpy file ')
# parser.add_argument('integers', metavar='N', type=int, nargs='+',
#                     help='an integer for the accumulator')
# parser.add_argument('--sum', dest='accumulate', action='store_const',
#                     const=sum, default=max,
#                     help='sum the integers (default: find the max)')

# args = parser.parse_args()

if __name__ == "__main__":
    print("Current Directory: ", currentdir)
    print("Grand Grand Parent Directory: ", grandgrandparentdir)

    save_viz_path = os.path.join(currentdir, 'visualization/modelnet40')
    Converter = PointvizConverter(save_viz_path)
    # modelnet_40_dataset = ModelNet40(npoint=10000,
    #                      phase='test',
    #                      batch_size=16,
    #                      normal=False,
    #                      augmentation=False,
    #                      gauss_drop=False,
    #                      rotate_setting=[0., 0., np.pi],
    #                      scale_setting=[0.1, 0.1, 0.1],
    #                      normalization='0~1',
    #                      abs=True,
    #                      home="/media/data3/tjtanaa/ModelNet40_Tony/ModelNet40_10k")

    # aug_config = {"rotate_mode": "g",
    #               "rotate_range": np.pi,
    #               "scale_mode": "g",
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"


def limit_period(val, offset=0.5, period=np.pi):
    return val - np.floor(val / period + offset) * period

if __name__ == "__main__":

    params = Parameters()
    # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing"
    # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_gt_only"
    # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_labels_only"
    save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/pedestrian_input_coordinate_analysis_point_pillar_v2_gt_and_labels"
    # Initialize and setup output directory.
    Converter = PointvizConverter(save_viz_path)

    gt_database_dir = os.path.join(DATA_ROOT, "gt_database")

    validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,  root_dir=DATA_ROOT, 
            npoints=16384, split='train_val_test',random_select=False,  classes=list(params.classes_map.keys()))

    for sample_id in validation_gen.sample_id_list:
        print(sample_id)

    # for batch_idx in range(0,20):
    #     [pillars, voxels], [occupancy_, position_, size_, angle_, heading_, classification_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx]


    #     set_boxes, confidences = [], []
    #     loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1
Example #6
0
    def __getitem__(self, batch_id: int):
        file_ids = np.arange(batch_id * self.batch_size, self.batch_size * (batch_id + 1))
        #         print("inside getitem")
        pillars = []
        voxels = []
        occupancy = []
        position = []
        size = []
        angle = []
        heading = []
        classification = []
        pts_input = []
        gt_boxes3d = []

        save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/original_processor"
        # Initialize and setup output directory.
        Converter = PointvizConverter(save_viz_path)

        for i in file_ids:
            lidar = self.data_reader.read_lidar(self.lidar_files[i])


            Converter.compile("transform_sample_{}".format(i), coors=lidar[:,:3], intensity=lidar[:,3])

            # For each file, dividing the space into a x-y grid to create pillars
            # Voxels are the pillar ids
            pillars_, voxels_ = self.make_point_pillars(lidar)

            # print(pillars_.shape, voxels_.shape)
            pillars.append(pillars_)
            voxels.append(voxels_)

            if self.label_files is not None:
                label = self.data_reader.read_label(self.label_files[i])
                R, t = self.data_reader.read_calibration(self.calibration_files[i])
                # Labels are transformed into the lidar coordinate bounding boxes
                # Label has 7 values, centroid, dimensions and yaw value.
                label_transformed = self.transform_labels_into_lidar_coordinates(label, R, t)

                # These definitions can be found in point_pillars.cpp file
                # We are splitting a 10 dim vector that contains this information.
                occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth(
                    label_transformed)

                occupancy.append(occupancy_)
                position.append(position_)
                size.append(size_)
                angle.append(angle_)
                heading.append(heading_)
                classification.append(classification_)
                pts_input.append(lidar)
                gt_boxes3d.append(label_transformed)

        pillars = np.concatenate(pillars, axis=0)
        voxels = np.concatenate(voxels, axis=0)

        if self.label_files is not None:
            occupancy = np.array(occupancy)
            position = np.array(position)
            size = np.array(size)
            angle = np.array(angle)
            heading = np.array(heading)
            classification = np.array(classification)
            return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input, gt_boxes3d]
        else:
            return [pillars, voxels]
Example #7
0
if __name__ == "__main__":
    print("Current Directory: ", currentdir)

    # pc_template_path
    pc_template_path = os.path.join(currentdir, args.pc_template_path)
    pc_bin_list = [
        filename for filename in os.listdir(pc_template_path)
        if '.bin' in filename
    ]
    print("List of raw point clouds: ", pc_bin_list)

    if args.step == 0:

        save_viz_path = os.path.join(
            currentdir, 'visualization/modelnet40_offset_sample/')
        Converter = PointvizConverter(save_viz_path)

        for idx, angle in enumerate(orientations):
            angle = -angle  # to return it to zero
            R = np.array([[np.cos(angle), -np.sin(angle), 0],
                          [np.sin(angle), np.cos(angle), 0],
                          [0, 0, 1]])  # rotation around z axis

            print(angle)
            coors = np.load(os.path.join(
                pc_template_path, pc_bin_list[idx]))[:, :3]  # ignore the rgb
            print(coors.shape)
            coors = transform(coors[:, [2, 0, 1]], R)
            bbox_params = [[0.0001, 0.0001, 0.0001, 10, 10, 10, 0]]
            # pts_coors = coors[i]
            # # pts_coors[:,1] *= -1