Exemplo n.º 1
0
 def __call__(self, scan):
     prob = random.random()
     if prob > self.augm_prob:
         return scan
     else:
         diff = random.uniform(-self.max_height_diff, self.max_height_diff)
         points = scan.points + np.array([diff, 0, 0])
         theta = np.arcsin(scan.points[:, 2] /
                           np.linalg.norm(scan.points, axis=1))
         fov_up = np.min([
             np.max(theta) * 180 / np.pi,
             scan.proj_fov_up + self.max_angle_diff
         ])
         fov_down = np.max([
             np.min(theta) * 180 / np.pi,
             scan.proj_fov_down - self.max_angle_diff
         ])
         new_laserscan = SemLaserScan(self.color_map,
                                      project=scan.project,
                                      H=scan.proj_H,
                                      W=scan.proj_W,
                                      fov_up=fov_up,
                                      fov_down=fov_down)
         new_laserscan.set_points(points, scan.remissions)
         return new_laserscan
Exemplo n.º 2
0
    def __getitem__(self, index):
        frame_num = self.frame_num
        proj_colors_list = []
        path_seq_list = []
        path_name_list = []

        for idx in range(index * frame_num, index * frame_num + frame_num):
            # get item in tensor shape
            scan_file = self.scan_files[idx]
            if self.gt:
                label_file = self.label_files[idx]

            # open a semantic laserscan
            if self.gt:
                scan = SemLaserScan(self.color_map,
                                    project=True,
                                    H=self.sensor_img_H,
                                    W=1024,
                                    fov_up=self.sensor_fov_up,
                                    fov_down=self.sensor_fov_down)
            else:
                scan = LaserScan(project=True,
                                 H=self.sensor_img_H,
                                 W=self.sensor_img_W,
                                 fov_up=self.sensor_fov_up,
                                 fov_down=self.sensor_fov_down)

            # open and obtain scan
            scan.open_scan(scan_file)
            if self.gt:
                scan.open_label(label_file)
                # map unused classes to used classes (also for projection)
                scan.sem_label = self.map(scan.sem_label, self.learning_map)
                mask = scan.proj_idx >= 0
                scan.proj_sem_color[mask] = scan.sem_color_lut[scan.sem_label[
                    scan.proj_idx[mask]]]

            if self.gt:
                proj_colors = torch.from_numpy(scan.proj_sem_color).clone()
            else:
                proj_colors = []
            # get name and sequence
            path_norm = os.path.normpath(scan_file)
            path_split = path_norm.split(os.sep)
            path_seq = path_split[-3]
            path_name = path_split[-1].replace(".bin", ".label")
            if self.gt:
                proj_colors_list.append(proj_colors.unsqueeze(0))
            path_seq_list.append(path_seq)
            path_name_list.append(path_name)

        if self.gt:
            proj_colors_seq = torch.cat(proj_colors_list, dim=0)
        else:
            proj_colors_seq = []

        # return
        return proj_colors_seq, path_seq_list, path_name_list
Exemplo n.º 3
0
 def __call__(self, scan):
     v_prob = random.random()
     h_prob = random.random()
     if v_prob > self.augm_prob and h_prob > self.augm_prob:
         return scan
     else:
         v_flip = True
         h_flip = True
         if v_prob > self.augm_prob:
             v_flip = False
         if h_flip > self.augm_prob:
             h_flip = False
         new_laserscan = SemLaserScan(self.color_map,
                                      project=scan.project,
                                      H=scan.proj_H,
                                      W=scan.proj_W,
                                      fov_up=scan.proj_fov_up,
                                      fov_down=scan.proj_fov_down,
                                      h_flip=h_flip,
                                      v_flip=v_flip)
         new_laserscan.set_points(scan.points, scan.remissions)
         return new_laserscan
Exemplo n.º 4
0
def eval(test_sequences, splits, pred):
    # get scan paths
    scan_names = []
    for sequence in test_sequences:
        sequence = '{0:02d}'.format(int(sequence))
        scan_paths = os.path.join(FLAGS.dataset, "sequences", str(sequence),
                                  "velodyne")
        # populate the scan names
        seq_scan_names = [
            os.path.join(dp, f)
            for dp, dn, fn in os.walk(os.path.expanduser(scan_paths))
            for f in fn if ".bin" in f
        ]
        seq_scan_names.sort()
        scan_names.extend(seq_scan_names)
    # print(scan_names)

    # get label paths
    label_names = []
    for sequence in test_sequences:
        sequence = '{0:02d}'.format(int(sequence))
        label_paths = os.path.join(FLAGS.dataset, "sequences", str(sequence),
                                   "labels")
        # populate the label names
        seq_label_names = [
            os.path.join(dp, f)
            for dp, dn, fn in os.walk(os.path.expanduser(label_paths))
            for f in fn if ".label" in f
        ]
        seq_label_names.sort()
        label_names.extend(seq_label_names)
    # print(label_names)

    # get predictions paths
    pred_names = []
    for sequence in test_sequences:
        sequence = '{0:02d}'.format(int(sequence))
        pred_paths = os.path.join(FLAGS.predictions, "sequences", sequence,
                                  "predictions")
        # populate the label names
        seq_pred_names = [
            os.path.join(dp, f)
            for dp, dn, fn in os.walk(os.path.expanduser(pred_paths))
            for f in fn if ".label" in f
        ]
        seq_pred_names.sort()
        pred_names.extend(seq_pred_names)
    # print(pred_names)

    # check that I have the same number of files
    print("labels: ", len(label_names))
    # for label_name in label_names:
    #     print(label_name)
    print("predictions: ", len(pred_names))
    # for pred_name in pred_names:
    #     print(pred_name)

    assert (len(label_names) == len(scan_names)
            and len(label_names) == len(pred_names))

    print("Evaluating sequences: ")
    # open each file, get the tensor, and make the iou comparison
    for scan_file, label_file, pred_file in zip(scan_names, label_names,
                                                pred_names):
        print("evaluating label ", label_file, "with", pred_file)
        # open label
        label = SemLaserScan(project=False)
        label.open_scan(scan_file)
        label.open_label(label_file)
        u_label_sem = remap_lut[label.sem_label]  # remap to xentropy format
        if FLAGS.limit is not None:
            u_label_sem = u_label_sem[:FLAGS.limit]

        # open prediction
        pred = SemLaserScan(project=False)
        pred.open_scan(scan_file)
        pred.open_label(pred_file)
        u_pred_sem = remap_lut[pred.sem_label]  # remap to xentropy format
        if FLAGS.limit is not None:
            u_pred_sem = u_pred_sem[:FLAGS.limit]

        # add single scan to evaluation
        evaluator.addBatch(u_pred_sem, u_label_sem)

    # when I am done, print the evaluation
    m_accuracy = evaluator.getacc()
    m_jaccard, class_jaccard = evaluator.getIoU()

    print('{split} set:\n'
          'Acc avg {m_accuracy:.3f}\n'
          'IoU avg {m_jaccard:.3f}'.format(split=splits,
                                           m_accuracy=m_accuracy,
                                           m_jaccard=m_jaccard))

    save_to_log(
        FLAGS.predictions, 'pred.txt', '{split} set:\n'
        'Acc avg {m_accuracy:.3f}\n'
        'IoU avg {m_jaccard:.3f}'.format(split=splits,
                                         m_accuracy=m_accuracy,
                                         m_jaccard=m_jaccard))
    # print also classwise
    for i, jacc in enumerate(class_jaccard):
        if i not in ignore:
            print('IoU class {i:} [{class_str:}] = {jacc:.3f}'.format(
                i=i, class_str=class_strings[class_inv_remap[i]], jacc=jacc))
            save_to_log(
                FLAGS.predictions, 'pred.txt',
                'IoU class {i:} [{class_str:}] = {jacc:.3f}'.format(
                    i=i,
                    class_str=class_strings[class_inv_remap[i]],
                    jacc=jacc))

    # print for spreadsheet
    print("*" * 80)
    print("below can be copied straight for paper table")
    for i, jacc in enumerate(class_jaccard):
        if i not in ignore:
            sys.stdout.write('{jacc:.3f}'.format(jacc=jacc.item()))
            sys.stdout.write(",")
    sys.stdout.write('{jacc:.3f}'.format(jacc=m_jaccard.item()))
    sys.stdout.write(",")
    sys.stdout.write('{acc:.3f}'.format(acc=m_accuracy.item()))
    sys.stdout.write('\n')
    sys.stdout.flush()
Exemplo n.º 5
0
from common.laserscan import LaserScan, SemLaserScan

parser = argparse.ArgumentParser("./visualize.py")
parser.add_argument(
    '--config',
    '-c',
    type=str,
    required=False,
    default="config/labels/semantic-kitti.yaml",
    help='Dataset config file. Defaults to %(default)s',
)
FLAGS, unparsed = parser.parse_known_args()

CFG = yaml.safe_load(open(FLAGS.config, 'r'))
color_dict = CFG["color_map"]
scan = SemLaserScan(color_dict, project=False)

scan_root = r'G:\DataSet\semanticKITTI\dataset\sequences'
gt_label_root = r'G:\DataSet\semanticKITTI\dataset\sequences'
gt_img_root = r'G:\DataSet\semanticKITTI\visualization\groundtruth\sequences'

segv2_label_root = r'G:\DataSet\semanticKITTI\prediction\SqueezeSegV2\Static\sequences'
segv2_img_root = r'G:\DataSet\semanticKITTI\visualization\SqueezeSegV2\Static\sequences'

segv2_sap1_label_root = r'G:\DataSet\semanticKITTI\prediction\SqueezeSegV2\SAP-1\sequences'
segv2_sap1_img_root = r'G:\DataSet\semanticKITTI\visualization\SqueezeSegV2\SAP-1\sequences'

segv2_asap1_label_root = r'G:\DataSet\semanticKITTI\prediction\SqueezeSegV2\ASAP-1\sequences'
segv2_asap1_img_root = r'G:\DataSet\semanticKITTI\visualization\SqueezeSegV2\ASAP-1\sequences'

segv2_asap2_label_root = r'G:\DataSet\semanticKITTI\prediction\SqueezeSegV2\ASAP-2\sequences'
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]
        if self.gt:
            label_file = self.label_files[index]

        # open a semantic laserscan
        if self.gt:
            scan = SemLaserScan(self.color_map,
                                project=True,
                                H=self.sensor_img_H,
                                W=self.sensor_img_W,
                                fov_up=self.sensor_fov_up,
                                fov_down=self.sensor_fov_down)
        else:
            scan = LaserScan(project=True,
                             H=self.sensor_img_H,
                             W=self.sensor_img_W,
                             fov_up=self.sensor_fov_up,
                             fov_down=self.sensor_fov_down)

        # open and obtain scan
        scan.open_scan(scan_file)
        if self.gt:
            scan.open_label(label_file)
            # map unused classes to used classes (also for projection)
            scan.sem_label = self.map(scan.sem_label, self.learning_map)
            scan.proj_sem_label = self.map(scan.proj_sem_label,
                                           self.learning_map)

        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        if self.gt:
            unproj_labels = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.int32)
            unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
        else:
            unproj_labels = []

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask)
        if self.gt:
            proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])
        proj = (proj - self.sensor_img_means[:, None, None]
                ) / self.sensor_img_stds[:, None, None]
        proj = proj * proj_mask.float()

        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")
        # print("path_norm: ", path_norm)
        # print("path_seq", path_seq)
        # print("path_name", path_name)

        # return
        return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
        pred_names.extend(seq_pred_names)
    # print(pred_names)

    # check that I have the same number of files
    # print("labels: ", len(label_names))
    # print("predictions: ", len(pred_names))
    assert (len(label_names) == len(scan_names)
            and len(label_names) == len(pred_names))

    print("Evaluating sequences: ")
    # open each file, get the tensor, and make the iou comparison
    for scan_file, label_file, pred_file in zip(scan_names, label_names,
                                                pred_names):
        print("evaluating label ", label_file, "with", pred_file)
        # open label
        label = SemLaserScan(project=False)
        label.open_scan(scan_file)
        label.open_label(label_file)
        u_label_sem = remap_lut[label.sem_label]  # remap to xentropy format
        if FLAGS.limit is not None:
            u_label_sem = u_label_sem[:FLAGS.limit]

        # open prediction
        pred = SemLaserScan(project=False)
        pred.open_scan(scan_file)
        pred.open_label(pred_file)
        u_pred_sem = remap_lut[pred.sem_label]  # remap to xentropy format
        if FLAGS.limit is not None:
            u_pred_sem = u_pred_sem[:FLAGS.limit]

        # add single scan to evaluation
Exemplo n.º 8
0
      quit()
    # populate the pointclouds
    label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
        os.path.expanduser(label_paths)) for f in fn]
    label_names.sort()

    # check that there are same amount of labels and scans
    if not FLAGS.ignore_safety:
      assert(len(label_names) == len(scan_names))

  # create a scan
  if FLAGS.ignore_semantics:
    scan = LaserScan(project=True)  # project all opened scans to spheric proj
  else:
    color_dict = CFG["color_map"]
    scan = SemLaserScan(color_dict, project=True)

  # create a visualizer
  semantics = not FLAGS.ignore_semantics
  if not semantics:
    label_names = None
  vis = LaserScanVis(scan=scan,
                     scan_names=scan_names,
                     label_names=label_names,
                     offset=FLAGS.offset,
                     semantics=semantics,
                     instances=False)

  # print instructions
  print("To navigate:")
  print("\tb: back (previous scan)")
Exemplo n.º 9
0
    # get device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # define the border mask
    bm = borderMask(300, device, FLAGS.border, FLAGS.conn, FLAGS.exclude_class)

    # imports for inference part
    import cv2
    import numpy as np
    from common.laserscan import SemLaserScan

    # open label and project
    scan = SemLaserScan(project=True, max_classes=300)
    scan.open_scan(FLAGS.scan)
    scan.open_label(FLAGS.label)

    # get the things I need
    proj_range = torch.from_numpy(scan.proj_range).to(device)
    proj_sem_label = torch.from_numpy(scan.proj_sem_label).long().to(device)
    proj_sem_color = torch.from_numpy(scan.proj_sem_color).to(device)

    # run the border mask
    border_mask = bm(proj_sem_label)

    # bring to numpy and normalize for showing
    proj_range = proj_range.cpu().numpy()
    proj_sem_label = proj_sem_label.cpu().numpy()
    proj_sem_color = proj_sem_color.cpu().numpy()
Exemplo n.º 10
0
  def __getitem__(self, index):
    # get item in tensor shape
    scan_file = self.scan_files[index]
    if self.gt:
      label_file = self.label_files[index]

    # open a semantic laserscan
    DA = False
    flip_sign = False
    rot = False
    drop_points = False
    if self.transform:
        if random.random() > 0.5:
            if random.random() > 0.5:
                DA = True
            if random.random() > 0.5:
                flip_sign = True
            if random.random() > 0.5:
                rot = True
            drop_points = random.uniform(0, 0.5)

    if self.gt:
      scan = SemLaserScan(self.color_map,
                          project=True,
                          H=self.sensor_img_H,
                          W=self.sensor_img_W,
                          fov_up=self.sensor_fov_up,
                          fov_down=self.sensor_fov_down,
                          DA=DA,
                          flip_sign=flip_sign,
                          drop_points=drop_points)
    else:
      scan = LaserScan(project=True,
                       H=self.sensor_img_H,
                       W=self.sensor_img_W,
                       fov_up=self.sensor_fov_up,
                       fov_down=self.sensor_fov_down,
                       DA=DA,
                       rot=rot,
                       flip_sign=flip_sign,
                       drop_points=drop_points)

    # open and obtain scan
    scan.open_scan(scan_file)
    if self.gt:
      scan.open_label(label_file)
      # map unused classes to used classes (also for projection)
      scan.sem_label = self.map(scan.sem_label, self.learning_map)
      scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map)

    # get points and labels
    proj_range = torch.from_numpy(scan.proj_range).clone()
    proj_segment_angle = torch.from_numpy(scan.segment_angle).clone()
    proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
    proj_remission = torch.from_numpy(scan.proj_remission).clone()
    proj_mask = torch.from_numpy(scan.proj_mask)
    if self.gt:
      proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
      proj_labels = proj_labels * proj_mask
    else:
      proj_labels = []

    # return
    return proj_range, proj_segment_angle, proj_xyz, proj_remission, proj_mask, proj_labels, scan.points, scan_file, scan.sem_label
Exemplo n.º 11
0
        label_names.sort()

        # check that there are same amount of labels and scans
        if not FLAGS.ignore_safety:
            assert (len(label_names) == len(scan_names))

    # create a scan
    if FLAGS.ignore_semantics:
        scan = LaserScan(
            project=True)  # project all opened scans to spheric proj
    else:
        color_dict = CFG["color_map"]
        #scan = SemLaserScan(color_dict, project=True)
        scan = SemLaserScan(color_dict,
                            project=True,
                            H=64,
                            W=1024,
                            fov_up=3,
                            fov_down=-25)

    # create a visualizer
    semantics = not FLAGS.ignore_semantics
    if not semantics:
        label_names = None
    vis = LaserScanVis(scan=scan,
                       scan_names=scan_names,
                       label_names=label_names,
                       offset=FLAGS.offset,
                       semantics=semantics,
                       instances=False)

    # print instructions
Exemplo n.º 12
0
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]

        # JLLIU
        # print(scan_file)

        if self.gt:
            label_file = self.label_files[index]

        # open a semantic laserscan
        DA = False
        flip_sign = False
        rot = False
        drop_points = False
        if self.transform:
            if random.random() > 0.5:
                if random.random() > 0.5:
                    DA = True
                if random.random() > 0.5:
                    flip_sign = True
                if random.random() > 0.5:
                    rot = True
                drop_points = random.uniform(0, 0.5)

        if self.gt:
            scan = SemLaserScan(self.color_map,
                                project=True,
                                H=self.sensor_img_H,
                                W=self.sensor_img_W,
                                fov_up=self.sensor_fov_up,
                                fov_down=self.sensor_fov_down,
                                DA=DA,
                                flip_sign=flip_sign,
                                drop_points=drop_points)
        else:
            scan = LaserScan(project=True,
                             H=self.sensor_img_H,
                             W=self.sensor_img_W,
                             fov_up=self.sensor_fov_up,
                             fov_down=self.sensor_fov_down,
                             DA=DA,
                             rot=rot,
                             flip_sign=flip_sign,
                             drop_points=drop_points)

        # open and obtain scan
        scan.open_scan(scan_file)
        if self.gt:
            scan.open_label(label_file)
            # map unused classes to used classes (also for projection)
            scan.sem_label = self.map(scan.sem_label, self.learning_map)
            scan.proj_sem_label = self.map(scan.proj_sem_label,
                                           self.learning_map)

        #print("proj_x.shape: ",proj_x.shape)
        #print("proj_y.shape: ",proj_y.shape)

        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        if self.gt:
            unproj_labels = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.int32)
            unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
        else:
            unproj_labels = []


# JLLIU:
# 底下的 proj_xyz.shape:  torch.Size([64, 2048, 3]) 就是之前在 laserscan.py 看到的那個
# intensity 的 proj_remission[proj_y, proj_x] 也是
        """
    seq_now = os.path.normpath(scan_file).split(os.sep)[-3]
    file_now = os.path.normpath(scan_file).split(os.sep)[-1].replace(".bin", ".npy")

    proj_xyzi = np.zeros(shape=(64,2048,4))
    proj_xyzi[:,:,0:3] = scan.proj_xyz
    proj_xyzi[:,:,3] = scan.proj_remission
    np.save('/home/doggy/SalsaNext/train/tasks/semantic/dataset/xyzi_npy/'+ seq_now + '/' + file_now , proj_xyzi)
    
    print("\nnpy_name: ", file_now)
    print("proj_xyz[50,50]: ", scan.proj_xyz[50,50])
    print("proj_i[50,50]: ", scan.proj_remission[50,50])
    print("proj_xyzi[50,50]: ", proj_xyzi[50,50])
    """

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask)
        if self.gt:
            proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])
        proj = (proj - self.sensor_img_means[:, None, None]
                ) / self.sensor_img_stds[:, None, None]
        proj = proj * proj_mask.float()

        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")

        # return
        return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
Exemplo n.º 13
0
            for dp, dn, fn in os.walk(os.path.expanduser(label_paths))
            for f in fn
        ]
        label_names.sort()

        # check that there are same amount of labels and scans
        if not FLAGS.ignore_safety:
            assert (len(label_names) == len(scan_names))

    # create a scan
    if FLAGS.ignore_semantics:
        scan = LaserScan(
            project=True)  # project all opened scans to spheric proj
    else:
        color_dict = CFG["color_map"]
        scan = SemLaserScan(color_dict, project=True, H=80, W=2048)

    # create a visualizer
    semantics = not FLAGS.ignore_semantics
    if not semantics:
        label_names = None
    vis = LaserScanVis(scan=scan,
                       scan_names=scan_names,
                       label_names=label_names,
                       offset=FLAGS.offset,
                       semantics=semantics,
                       instances=False)

    # print instructions
    print("To navigate:")
    print("\tb: back (previous scan)")
Exemplo n.º 14
0
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]
        if self.gt:
            label_file = self.label_files[index]

        # open a semantic laserscan
        if self.gt:
            scan = SemLaserScan(self.color_map,
                                project=True,
                                H=self.sensor_img_H,
                                W=self.sensor_img_W,
                                fov_up=self.sensor_fov_up,
                                fov_down=self.sensor_fov_down)
        else:
            scan = LaserScan(project=True,
                             H=self.sensor_img_H,
                             W=self.sensor_img_W,
                             fov_up=self.sensor_fov_up,
                             fov_down=self.sensor_fov_down)

        # open and obtain scan
        scan.open_scan(scan_file)
        if self.gt:
            scan.open_label(label_file)
            # map unused classes to used classes (also for projection)
            scan.sem_label = self.map(scan.sem_label, self.learning_map)
            scan.proj_sem_label = self.map(scan.proj_sem_label,
                                           self.learning_map)

        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        if self.gt:
            unproj_labels = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.int32)
            unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
        else:
            unproj_labels = []

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask)
        if self.gt:
            proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])

        proj_blocked = proj.unsqueeze(1)  # Swap Batch and channel dimensions

        proj = (proj - self.sensor_img_means[:, None, None]
                ) / self.sensor_img_stds[:, None, None]

        proj = proj * proj_mask.float()

        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")
        # print("path_norm: ", path_norm)
        # print("path_seq", path_seq)
        # print("path_name", path_name)

        # import time
        # import cv2
        # cv2.imwrite('/home/snowflake/Desktop/big8192-128.png', proj_blocked[0,0, :, :].numpy()*15)
        # print('proj_blocked.shape')
        # print(proj_blocked.shape)
        # time.sleep(1000)

        n, c, h, w = proj_blocked.size()
        proj2 = proj.clone()
        proj = proj.unsqueeze(0)
        mask_image = proj_mask.unsqueeze(0).unsqueeze(0).float()
        downsamplings = 4
        representations = {}
        representations['image'] = []
        representations['points'] = []
        windows_size = 3  # windows size

        for i in range(downsamplings):

            proj_chan_group_points = f.unfold(proj_blocked,
                                              kernel_size=3,
                                              stride=1,
                                              padding=1)
            projmask_chan_group_points = f.unfold(mask_image,
                                                  kernel_size=3,
                                                  stride=1,
                                                  padding=1)

            # Get the mean point (taking apart non-valid points)
            proj_chan_group_points_sum = torch.sum(proj_chan_group_points,
                                                   dim=1)
            projmask_chan_group_points_sum = torch.sum(
                projmask_chan_group_points, dim=1)
            proj_chan_group_points_mean = proj_chan_group_points_sum / projmask_chan_group_points_sum

            # tile it for being able to substract it to the other points
            tiled_proj_chan_group_points_mean = proj_chan_group_points_mean.unsqueeze(
                1).repeat(1, windows_size * windows_size, 1)

            # remove nans due to empty blocks
            is_nan = tiled_proj_chan_group_points_mean != tiled_proj_chan_group_points_mean
            tiled_proj_chan_group_points_mean[is_nan] = 0.

            # compute valid mask per point
            tiled_projmask_chan_group_points = (
                1 - projmask_chan_group_points.repeat(n, 1, 1)).byte()

            # substract mean point to points
            proj_chan_group_points_relative = proj_chan_group_points - tiled_proj_chan_group_points_mean

            # set to zero points which where non valid at the beginning
            proj_chan_group_points_relative[
                tiled_projmask_chan_group_points] = 0.

            # compute distance (radius) to mean point
            # xyz_relative = proj_chan_group_points_relative[1:4,...]
            # relative_distance = torch.norm(xyz_relative, dim=0).unsqueeze(0)

            # NOW proj_chan_group_points_relative HAS Xr, Yr, Zr, Rr, Dr relative to the mean point
            proj_norm_chan_group_points = f.unfold(proj.permute(1, 0, 2, 3),
                                                   kernel_size=3,
                                                   stride=1,
                                                   padding=1)
            # NOW proj_norm_chan_group_points HAS X, Y, Z, R, D. Now we have to concat them both
            proj_chan_group_points_combined = torch.cat(
                [proj_norm_chan_group_points, proj_chan_group_points_relative],
                dim=0)
            # convert back to image for image-convolution-branch
            proj_out = f.fold(proj_chan_group_points_combined,
                              proj_blocked.shape[-2:],
                              kernel_size=3,
                              stride=1,
                              padding=1)
            proj_out = proj_out.squeeze(1)

            proj = nn.functional.interpolate(proj,
                                             size=(int(proj.shape[2] / 2),
                                                   int(proj.shape[3] / 2)),
                                             mode='nearest')
            proj_blocked = nn.functional.interpolate(
                proj_blocked.permute(1, 0, 2, 3),
                size=(int(proj_blocked.shape[2] / 2),
                      int(proj_blocked.shape[3] / 2)),
                mode='nearest').permute(1, 0, 2, 3)
            mask_image = nn.functional.interpolate(
                mask_image,
                size=(int(mask_image.shape[2] / 2),
                      int(mask_image.shape[3] / 2)),
                mode='nearest')

            representations['points'].append(proj_chan_group_points_combined)
            representations['image'].append(proj_out)
            # print('append' +str(i))
            #
            # print(proj_chan_group_points_combined.shape)
            # print(proj_out.shape)

        return proj2, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points, representations