def convert_to_ply(cls, root_path, out_path):
        """Convert FacilityDataset to PLY format that is compatible with
    Synthia dataset. Assumes file structure as given by the dataset.
    Outputs the processed PLY files to `FACILITY_OUT_PATH`.
    """

        txtfiles = glob.glob(os.path.join(root_path, '*/*/*.txt'))
        for txtfile in tqdm(txtfiles):
            file_sp = os.path.normpath(txtfile).split(os.path.sep)
            target_path = os.path.join(out_path, file_sp[-3])
            out_file = os.path.join(target_path, file_sp[-2] + '.ply')

            if os.path.exists(out_file):
                print(out_file, ' exists')
                continue

            annotation, _ = os.path.split(txtfile)
            subclouds = glob.glob(os.path.join(annotation,
                                               'Annotations/*.txt'))
            coords, feats, labels = [], [], []
            for inst, subcloud in enumerate(subclouds):
                # Read ply file and parse its rgb values.
                xyz, rgbi = cls.read_txt(subcloud)
                _, annotation_subfile = os.path.split(subcloud)
                clsidx = cls.CLASSES.index(annotation_subfile.split('_')[0])

                coords.append(xyz)
                feats.append(rgbi)
                labels.append(np.ones((len(xyz), 1), dtype=np.int32) * clsidx)

            if len(coords) == 0:
                print(txtfile, ' has 0 files.')
            else:
                # Concat
                coords = np.concatenate(coords, 0)
                feats = np.concatenate(feats, 0)
                labels = np.concatenate(labels, 0)
                inds, collabels = ME.utils.sparse_quantize(
                    coords,
                    feats,
                    labels,
                    return_index=True,
                    ignore_label=255,
                    quantization_size=0.0001  # 0.01 = 1cm
                )
                pointcloud = np.concatenate(
                    (coords[inds], feats[inds], collabels[:, None]), axis=1)

                # Write ply file.
                mkdir_p(target_path)
                save_point_cloud(pointcloud,
                                 out_file,
                                 with_label=True,
                                 verbose=True,
                                 intensity=FacilityDatasetConverter.INTENSITY)
    def test_pointcloud(self, pred_dir):
        print('Running full pointcloud evaluation.')
        eval_path = os.path.join(pred_dir, 'fulleval')
        os.makedirs(eval_path, exist_ok=True)
        # Join room by their area and room id.
        # Test independently for each room.
        sys.setrecursionlimit(100000)  # Increase recursion limit for k-d tree.
        hist = np.zeros((self.NUM_LABELS, self.NUM_LABELS))
        for i, data_path in enumerate(self.data_paths):
            room_id = self.get_output_id(i)
            pred = np.load(
                os.path.join(pred_dir, 'pred_%04d_%02d.npy' % (i, 0)))

            # save voxelized pointcloud predictions
            save_point_cloud(np.hstack(
                (pred[:, :3],
                 np.array([SCANNET_COLOR_MAP[i] for i in pred[:, -1]]))),
                             f'{eval_path}/{room_id}_voxel.ply',
                             verbose=False)

            fullply_f = self.data_root / data_path
            query_pointcloud = read_plyfile(fullply_f)
            query_xyz = query_pointcloud[:, :3]
            query_label = query_pointcloud[:, -1]
            # Run test for each room.
            pred_tree = spatial.KDTree(pred[:, :3], leafsize=500)
            _, result = pred_tree.query(query_xyz)
            ptc_pred = pred[result, 3].astype(int)
            # Save prediciton in txt format for submission.
            np.savetxt(f'{eval_path}/{room_id}.txt', ptc_pred, fmt='%i')
            # Save prediciton in colored pointcloud for visualization.
            save_point_cloud(np.hstack(
                (query_xyz, np.array([SCANNET_COLOR_MAP[i]
                                      for i in ptc_pred]))),
                             f'{eval_path}/{room_id}.ply',
                             verbose=False)
            # Evaluate IoU.
            if self.IGNORE_LABELS is not None:
                ptc_pred = np.array([self.label_map[x] for x in ptc_pred],
                                    dtype=np.int)
                query_label = np.array(
                    [self.label_map[x] for x in query_label], dtype=np.int)
            hist += fast_hist(ptc_pred, query_label, self.NUM_LABELS)
        ious = per_class_iu(hist) * 100
        print('mIoU: ' + str(np.nanmean(ious)) + '\n'
              'Class names: ' + ', '.join(CLASS_LABELS) + '\n'
              'IoU: ' + ', '.join(np.round(ious, 2).astype(str)))
Пример #3
0
 def visualize_groundtruth(self, datum, iteration):
     super().visualize_groundtruth(datum, iteration)
     coords = datum['coords'].numpy()
     batch_size = coords[:, 0].max() + 1
     output_path = pathlib.Path(self.config.visualize_path)
     for i in range(batch_size):
         coords_mask = coords[:, 0] == i
         coords_b = coords[coords_mask, 1:]
         instance_idxs = datum['instance_target'].cpu().numpy()
         instance_mask = instance_idxs != self.config.ignore_label
         if np.any(instance_mask):
             inst_ptc = pc_utils.colorize_pointcloud(
                 coords_b[instance_mask],
                 instance_idxs[instance_mask],
                 repeat_color=True)
             inst_dest = output_path / ('visualize_%04d_inst_gt.ply' %
                                        iteration)
             pc_utils.save_point_cloud(inst_ptc, inst_dest)
Пример #4
0
def handle_process(path):
    f = Path(path.split(',')[0])
    phase_out_path = Path(path.split(',')[1])
    pointcloud = read_plyfile(f)
    # Make sure alpha value is meaningless.
    assert np.unique(pointcloud[:, -1]).size == 1
    # Load label file.
    label_f = f.parent / (f.stem + '.labels' + f.suffix)
    if label_f.is_file():
        label = read_plyfile(label_f)
        # Sanity check that the pointcloud and its label has same vertices.
        assert pointcloud.shape[0] == label.shape[0]
        assert np.allclose(pointcloud[:, :3], label[:, :3])
    else:  # Label may not exist in test case.
        label = np.zeros_like(pointcloud)
    out_f = phase_out_path / (f.name[:-len(POINTCLOUD_FILE)] + f.suffix)
    processed = np.hstack((pointcloud[:, :6], np.array([label[:, -1]]).T))
    save_point_cloud(processed, out_f, with_label=True, verbose=False)
Пример #5
0
 def visualize_predictions(self, datum, output, iteration):
     super().visualize_predictions(datum, output, iteration)
     batch_size = datum['coords'].numpy()[:, 0].max() + 1
     output_path = pathlib.Path(self.config.visualize_path)
     for i in range(batch_size):
         if output.get('pred_instances') is not None:
             inst_coords, inst_idxs = [], []
             for j, (mask_pred, _,
                     _) in enumerate(output.get('pred_instances')):
                 inst_coords.append(datum['coords'][:,
                                                    1:].numpy()[mask_pred])
                 inst_idxs.append(np.ones(mask_pred.sum(), dtype=int) * j)
             inst_ptc = pc_utils.colorize_pointcloud(
                 np.vstack(inst_coords),
                 np.concatenate(inst_idxs),
                 repeat_color=True)
             inst_dest = output_path / ('visualize_%04d_inst_pred.ply' %
                                        iteration)
             pc_utils.save_point_cloud(inst_ptc, inst_dest)
Пример #6
0
def visualize_results(coords, input, target, upsampled_pred, config, iteration):
  # Get filter for valid predictions in the first batch.
  target_batch = coords[:, 3].numpy() == 0
  input_xyz = coords[:, :3].numpy()
  target_valid = target.numpy() != 255
  target_pred = np.logical_and(target_batch, target_valid)
  target_nonpred = np.logical_and(target_batch, ~target_valid)
  ptc_nonpred = np.hstack((input_xyz[target_nonpred], np.zeros((np.sum(target_nonpred), 3))))
  # Unwrap file index if tested with rotation.
  file_iter = iteration
  if config.test_rotation >= 1:
    file_iter = iteration // config.test_rotation
  # Create directory to save visualization results.
  os.makedirs(config.visualize_path, exist_ok=True)
  # Label visualization in RGB.
  xyzlabel = colorize_pointcloud(input_xyz[target_pred], upsampled_pred[target_pred])
  xyzlabel = np.vstack((xyzlabel, ptc_nonpred))
  filename = '_'.join([config.dataset, config.model, 'pred', '%04d.ply' % file_iter])
  save_point_cloud(xyzlabel, os.path.join(config.visualize_path, filename), verbose=False)
  # RGB input values visualization.
  xyzrgb = np.hstack((input_xyz[target_batch], input[:, :3].cpu().numpy()[target_batch]))
  filename = '_'.join([config.dataset, config.model, 'rgb', '%04d.ply' % file_iter])
  save_point_cloud(xyzrgb, os.path.join(config.visualize_path, filename), verbose=False)
  # Ground-truth visualization in RGB.
  xyzgt = colorize_pointcloud(input_xyz[target_pred], target.numpy()[target_pred])
  xyzgt = np.vstack((xyzgt, ptc_nonpred))
  filename = '_'.join([config.dataset, config.model, 'gt', '%04d.ply' % file_iter])
  save_point_cloud(xyzgt, os.path.join(config.visualize_path, filename), verbose=False)
Пример #7
0
 def visualize_predictions(self, datum, output, iteration):
   coords = datum['coords'].numpy()
   batch_size = coords[:, 0].max() + 1
   output_path = pathlib.Path(self.config.visualize_path)
   output_path.mkdir(exist_ok=True)
   for i in range(batch_size):
     # Visualize RGB input.
     coords_mask = coords[:, 0] == i
     coords_b = coords[coords_mask, 1:]
     if datum['input'].shape[1] > 3:
       rgb_b = ((datum['input'][:, :3] + 0.5) * 255).numpy()
     else:
       rgb_b = ((datum['input'].repeat(1, 3) - datum['input'].min())
                / (datum['input'].max() - datum['input'].min()) * 255).numpy()
     rgb_ply_dest = output_path / ('visualize_%04d_rgb.ply' % iteration)
     pc_utils.save_point_cloud(np.hstack((coords_b, rgb_b)), rgb_ply_dest)
     # Visualize positive anchor centers.
     positive_anchors_mask = F.softmax(output['rpn_class_logits'].cpu(), dim=2)[0, :, 1] > 0.5
     positive_anchors = datum['anchors'][torch.where(positive_anchors_mask)]
     if len(positive_anchors.shape) == 1:
       positive_anchors = np.array([positive_anchors])
     if positive_anchors.shape[0] > 0:
       positive_anchors = np.unique((positive_anchors[:, 3:] + positive_anchors[:, :3]) / 2,
                                    axis=0)
       anchors_all_ply_dest = output_path / ('visualize_%04d_anchors_pred.ply' % iteration)
       pc_utils.save_point_cloud(positive_anchors, anchors_all_ply_dest)
     # Visualize region proposals.
     rpn_rois = output['rpn_rois'][i].cpu().numpy()
     rpn_mask = ~np.all(rpn_rois == 0, 1)
     rpn_rois = rpn_rois[rpn_mask][:100]
     rpn_rois = detection_utils.unnormalize_boxes(rpn_rois, self.config.max_ptc_size)
     if output.get('rpn_rois_rotation') is None:
       rpn_rois_ptc = pc_utils.visualize_bboxes(rpn_rois)
     else:
       rpn_rois_rotation = output['rpn_rois_rotation'][i].cpu().numpy()[rpn_mask][:100]
       rpn_rois = np.hstack((rpn_rois, rpn_rois_rotation[:, None]))
       rpn_rois_ptc = pc_utils.visualize_bboxes(rpn_rois, bbox_param='xyzxyzr')
     rpn_rois_ply_dest = output_path / ('visualize_%04d_rpn_rois_pred.ply' % iteration)
     pc_utils.save_point_cloud(rpn_rois_ptc, rpn_rois_ply_dest)
     # Visualize final detection.
     detection_pred = output['detection'][i]
     if detection_pred.shape[1] == 8:
       detection_ptc = pc_utils.visualize_bboxes(detection_pred[:, :6], detection_pred[:, 6])
     elif detection_pred.shape[1] == 9:
       detection_ptc = pc_utils.visualize_bboxes(detection_pred[:, :7], detection_pred[:, 7],
                                                 bbox_param='xyzxyzr')
     else:
       raise ValueError('Unknown bounding box output.')
     detection_dest = output_path / ('visualize_%04d_detection_pred.ply' % iteration)
     pc_utils.save_point_cloud(detection_ptc, detection_dest)
Пример #8
0
 def visualize_groundtruth(self, datum, iteration):
   coords = datum['coords'].numpy()
   batch_size = coords[:, 0].max() + 1
   output_path = pathlib.Path(self.config.visualize_path)
   output_path.mkdir(exist_ok=True)
   for i in range(batch_size):
     # Visualize ground-truth positive anchors.
     anchors_gt = datum['anchors'][torch.where(datum['rpn_match'].cpu() == 1)[1]]
     anchors_gt_ptc = pc_utils.visualize_bboxes(anchors_gt)
     anchors_gt_ply_dest = output_path / ('visualize_%04d_anchors_gt.ply' % iteration)
     pc_utils.save_point_cloud(anchors_gt_ptc, anchors_gt_ply_dest)
     # Visualize center location of all ground-truth anchors.
     anchors_all = np.unique((datum['anchors'][:, 3:] + datum['anchors'][:, :3]) / 2, axis=0)
     anchors_all_ply_dest = output_path / ('visualize_%04d_all_anchors_centers.ply' % iteration)
     pc_utils.save_point_cloud(anchors_all, anchors_all_ply_dest)
     # Visualize ground-truth positive anchors.
     if datum.get('bboxes_rotations') is None:
       bboxes_gt = pc_utils.visualize_bboxes(datum['bboxes_coords'][i], datum['bboxes_cls'][i])
     else:
       bboxes_gt = np.hstack((datum['bboxes_coords'][i], datum['bboxes_rotations'][i][:, None]))
       bboxes_gt = pc_utils.visualize_bboxes(bboxes_gt, datum['bboxes_cls'][i],
                                             bbox_param='xyzxyzr')
     bboxes_gt_ply_dest = output_path / ('visualize_%04d_bboxes_gt.ply' % iteration)
     pc_utils.save_point_cloud(bboxes_gt, bboxes_gt_ply_dest)
     # Visualize reconstructed ground-truth rpn targets.
     rpn_bbox_anchors = datum['anchors'][(datum['rpn_match'].flatten() == 1).cpu().numpy()]
     rpn_bbox_anchors = detection_utils.normalize_boxes(rpn_bbox_anchors, self.config.max_ptc_size)
     rpn_bbox_target = datum['rpn_bbox'].reshape(-1, 6)
     rpn_bbox_mask = ~torch.all(rpn_bbox_target == 0, 1)
     rpn_bbox_target = rpn_bbox_target[rpn_bbox_mask].cpu().numpy()
     rpn_bbox_target *= np.reshape(self.config.rpn_bbox_std, (1, len(self.config.rpn_bbox_std)))
     rpn_bbox_target = detection_utils.apply_box_deltas(torch.from_numpy(rpn_bbox_anchors),
                                                        torch.from_numpy(rpn_bbox_target),
                                                        self.config.normalize_bbox)
     rpn_bbox_target = detection_utils.unnormalize_boxes(rpn_bbox_target.numpy(),
                                                         self.config.max_ptc_size)
     if datum.get('rpn_rotation') is None:
       bboxes_gt_recon = pc_utils.visualize_bboxes(rpn_bbox_target)
     else:
       rpn_rot_target = datum['rpn_rotation'][i][rpn_bbox_mask].cpu().numpy()
       bboxes_gt_recon = np.hstack((rpn_bbox_target, rpn_rot_target[:, None]))
       bboxes_gt_recon = pc_utils.visualize_bboxes(bboxes_gt_recon, bbox_param='xyzxyzr')
     bboxes_gt_recon_ply_dest = output_path / ('visualize_%04d_bboxes_gt_recon.ply' % iteration)
     pc_utils.save_point_cloud(bboxes_gt_recon, bboxes_gt_recon_ply_dest)
      crop_bound = np.maximum(np.ceil(xyz_range / crop_half), 2).astype(int)
      crop_min = xyz.min(0) - (crop_bound * crop_half - xyz_range) / 2

    all_points = np.empty((0, 3))
    if CROP_SIZE > 0:
      for i, crop_idx in enumerate(product(*map(range, crop_bound - 1))):
        curr_crop_min = crop_min + np.array(crop_idx) * crop_half
        mask = np.logical_and(np.all(xyz > curr_crop_min, 1),
                              np.all(xyz <= curr_crop_min + crop_half * 2, 1))
        if not np.any(mask):
          continue
        processed = np.hstack((pointcloud[mask, :6], np.array([label[mask, -1]]).T))
        # Check if the crop size is correctly applied.
        assert np.all((processed.max(0) - processed.min(0))[:3] < CROP_SIZE)
        out_f = phase_out_path / (f.name[:-len(POINTCLOUD_FILE)] + f'_{i:02}' + f.suffix)
        save_point_cloud(processed, out_f, with_label=True, verbose=False)
        all_points = np.vstack((all_points, xyz[mask]))
    else:
      processed = np.hstack((pointcloud[:, :6], np.array([label[:, -1]]).T))

    # Check that all points are included in the crops.
    assert set(tuple(l) for l in all_points.tolist()) == set(tuple(l) for l in xyz.tolist())

# Split trainval data to train/val according to scene.
trainval_files = [f.name for f in (SCANNET_OUT_PATH / TRAIN_DEST).glob('*.ply')]
trainval_scenes = list(set(f.split('_')[0] for f in trainval_files))
shuffle(trainval_scenes)
num_train = int(len(trainval_scenes))
train_scenes = trainval_scenes[:num_train]
val_scenes = trainval_scenes[num_train:]
Пример #10
0
        xyz = pointcloud[:, :3]
        pool = ProcessPoolExecutor(max_workers=9)
        all_points = np.empty((0, 3))
        out_f = k / (f.name[:-len(POINTCLOUD_FILE)] + f.suffix)
        processed = np.hstack((pointcloud[:, :6], np.array([label[:, -1]]).T))
        save_point_cloud(processed, out_f, with_label=True, verbose=False)
path_list=[]
for out_path, in_path in SUBSETS.items():
    phase_out_path = SCANNET_OUT_PATH / out_path
    phase_out_path.mkdir(parents=True, exist_ok=True)
    for f in (SCANNET_RAW_PATH / in_path).glob('*/*' + POINTCLOUD_FILE):
        path_list.append(str(f)+','+str(phase_out_path))
print(path_list)
pool = ProcessPoolExecutor(max_workers=20)
result = list(pool.map(handle_process,path_list))
for i in result:
    pass

"""

# Fix bug in the data.
for files, bug_index in BUGS.items():
    print(files)

    for f in SCANNET_OUT_PATH.glob(files):
        pointcloud = read_plyfile(f)
        bug_mask = pointcloud[:, -1] == bug_index
        print(f'Fixing {f} bugged label {bug_index} x {bug_mask.sum()}')
        pointcloud[bug_mask, -1] = 0
        save_point_cloud(pointcloud, f, with_label=True, verbose=False)