# check that I have the same number of files # print("labels: ", len(label_names)) # print("predictions: ", len(pred_names)) assert (len(label_names) == len(scan_names) and len(label_names) == len(pred_names)) print("Evaluating sequences: ") # open each file, get the tensor, and make the iou comparison for scan_file, label_file, pred_file in zip(scan_names, label_names, pred_names): print("evaluating label ", label_file, "with", pred_file) # open label label = SemLaserScan(project=False) label.open_scan(scan_file) label.open_label(label_file) u_label_sem = remap_lut[label.sem_label] # remap to xentropy format if FLAGS.limit is not None: u_label_sem = u_label_sem[:FLAGS.limit] # open prediction pred = SemLaserScan(project=False) pred.open_scan(scan_file) pred.open_label(pred_file) u_pred_sem = remap_lut[pred.sem_label] # remap to xentropy format if FLAGS.limit is not None: u_pred_sem = u_pred_sem[:FLAGS.limit] # add single scan to evaluation evaluator.addBatch(u_pred_sem, u_label_sem)
def __getitem__(self, index): # get item in tensor shape scan_file = self.scan_files[index] if self.gt: label_file = self.label_files[index] # open a semantic laserscan if self.gt: scan = SemLaserScan(self.color_map, project=True, H=self.sensor_img_H, W=self.sensor_img_W, fov_up=self.sensor_fov_up, fov_down=self.sensor_fov_down) else: scan = LaserScan(project=True, H=self.sensor_img_H, W=self.sensor_img_W, fov_up=self.sensor_fov_up, fov_down=self.sensor_fov_down) # open and obtain scan scan.open_scan(scan_file) if self.gt: scan.open_label(label_file) # map unused classes to used classes (also for projection) scan.sem_label = self.map(scan.sem_label, self.learning_map) scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map) # make a tensor of the uncompressed data (with the max num points) unproj_n_points = scan.points.shape[0] unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float) unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points) unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float) unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range) unproj_remissions = torch.full([self.max_points], -1.0, dtype=torch.float) unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions) if self.gt: unproj_labels = torch.full([self.max_points], -1.0, dtype=torch.int32) unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label) else: unproj_labels = [] # get points and labels proj_range = torch.from_numpy(scan.proj_range).clone() proj_xyz = torch.from_numpy(scan.proj_xyz).clone() proj_remission = torch.from_numpy(scan.proj_remission).clone() proj_mask = torch.from_numpy(scan.proj_mask) if self.gt: proj_labels = torch.from_numpy(scan.proj_sem_label).clone() proj_labels = proj_labels * proj_mask else: proj_labels = [] proj_x = torch.full([self.max_points], -1, dtype=torch.long) proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x) proj_y = torch.full([self.max_points], -1, dtype=torch.long) proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y) proj = torch.cat([proj_range.unsqueeze(0).clone(), proj_xyz.clone().permute(2, 0, 1), proj_remission.unsqueeze(0).clone()]) proj = (proj - self.sensor_img_means[:, None, None] ) / self.sensor_img_stds[:, None, None] proj = proj * proj_mask.float() # get name and sequence path_norm = os.path.normpath(scan_file) path_split = path_norm.split(os.sep) path_seq = path_split[-3] path_name = path_split[-1].replace(".bin", ".label") # print("path_norm: ", path_norm) # print("path_seq", path_seq) # print("path_name", path_name) # return return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
device = torch.device('cuda') else: device = torch.device('cpu') # define the border mask bm = borderMask(300, device, FLAGS.border, FLAGS.conn, FLAGS.exclude_class) # imports for inference part import cv2 import numpy as np from common.laserscan import LaserScan, SemLaserScan # open label and project scan = SemLaserScan(project=True, max_classes=300) scan.open_scan(FLAGS.scan) scan.open_label(FLAGS.label) # get the things I need proj_range = torch.from_numpy(scan.proj_range).to(device) proj_sem_label = torch.from_numpy(scan.proj_sem_label).long().to(device) proj_sem_color = torch.from_numpy(scan.proj_sem_color).to(device) # run the border mask border_mask = bm(proj_sem_label) # bring to numpy and normalize for showing proj_range = proj_range.cpu().numpy() proj_sem_label = proj_sem_label.cpu().numpy() proj_sem_color = proj_sem_color.cpu().numpy() border_mask = border_mask.cpu().numpy().squeeze()
def eval(test_sequences, splits, pred): # get scan paths scan_names = [] for sequence in test_sequences: sequence = '{0:02d}'.format(int(sequence)) scan_paths = os.path.join(FLAGS.dataset, "sequences", str(sequence), "velodyne") # populate the scan names seq_scan_names = [ os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(scan_paths)) for f in fn if ".bin" in f ] seq_scan_names.sort() scan_names.extend(seq_scan_names) #print(scan_names) # get label paths label_names = [] for sequence in test_sequences: sequence = '{0:02d}'.format(int(sequence)) label_paths = os.path.join(FLAGS.dataset, "sequences", str(sequence), "labels") # populate the label names seq_label_names = [ os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(label_paths)) for f in fn if ".label" in f ] seq_label_names.sort() label_names.extend(seq_label_names) #print(label_names) # get predictions paths pred_names = [] for sequence in test_sequences: sequence = '{0:02d}'.format(int(sequence)) pred_paths = os.path.join(FLAGS.predictions, "sequences", sequence, "predictions") # populate the label names seq_pred_names = [ os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(pred_paths)) for f in fn if ".label" in f ] seq_pred_names.sort() pred_names.extend(seq_pred_names) print("pred_names", pred_names) # check that I have the same number of files print("\n\n\nlabels: ", len(label_names)) print("predictions: ", len(pred_names)) print("\n\n") assert (len(label_names) == len(scan_names) and len(label_names) == len(pred_names)) print("Evaluating sequences: ") # open each file, get the tensor, and make the iou comparison for scan_file, label_file, pred_file in zip(scan_names, label_names, pred_names): print("evaluating label ", label_file, "with", pred_file) # open label label = SemLaserScan(project=False) label.open_scan(scan_file) label.open_label(label_file) u_label_sem = remap_lut[label.sem_label] # remap to xentropy format if FLAGS.limit is not None: u_label_sem = u_label_sem[:FLAGS.limit] # open prediction pred = SemLaserScan(project=False) pred.open_scan(scan_file) pred.open_label(pred_file) u_pred_sem = remap_lut[pred.sem_label] # remap to xentropy format if FLAGS.limit is not None: u_pred_sem = u_pred_sem[:FLAGS.limit] # add single scan to evaluation evaluator.addBatch(u_pred_sem, u_label_sem) # when I am done, print the evaluation m_accuracy = evaluator.getacc() m_jaccard, class_jaccard = evaluator.getIoU() print('{split} set:\n' 'Acc avg {m_accuracy:.3f}\n' 'IoU avg {m_jaccard:.3f}'.format(split=splits, m_accuracy=m_accuracy, m_jaccard=m_jaccard)) save_to_log( FLAGS.predictions, 'pred.txt', '{split} set:\n' 'Acc avg {m_accuracy:.3f}\n' 'IoU avg {m_jaccard:.3f}'.format(split=splits, m_accuracy=m_accuracy, m_jaccard=m_jaccard)) # print also classwise for i, jacc in enumerate(class_jaccard): if i not in ignore: print('IoU class {i:} [{class_str:}] = {jacc:.3f}'.format( i=i, class_str=class_strings[class_inv_remap[i]], jacc=jacc)) save_to_log( FLAGS.predictions, 'pred.txt', 'IoU class {i:} [{class_str:}] = {jacc:.3f}'.format( i=i, class_str=class_strings[class_inv_remap[i]], jacc=jacc)) # print for spreadsheet print("*" * 80) print("below can be copied straight for paper table") for i, jacc in enumerate(class_jaccard): if i not in ignore: sys.stdout.write('{jacc:.3f}'.format(jacc=jacc.item())) sys.stdout.write(",") sys.stdout.write('{jacc:.3f}'.format(jacc=m_jaccard.item())) sys.stdout.write(",") sys.stdout.write('{acc:.3f}'.format(acc=m_accuracy.item())) sys.stdout.write('\n') sys.stdout.flush()
def __getitem__(self, index): # get item in tensor shape scan_file = self.scan_files[index] # JLLIU # print(scan_file) if self.gt: label_file = self.label_files[index] # open a semantic laserscan DA = False flip_sign = False rot = False drop_points = False if self.transform: if random.random() > 0.5: if random.random() > 0.5: DA = True if random.random() > 0.5: flip_sign = True if random.random() > 0.5: rot = True drop_points = random.uniform(0, 0.5) if self.gt: scan = SemLaserScan(self.color_map, project=True, H=self.sensor_img_H, W=self.sensor_img_W, fov_up=self.sensor_fov_up, fov_down=self.sensor_fov_down, DA=DA, flip_sign=flip_sign, drop_points=drop_points) else: scan = LaserScan(project=True, H=self.sensor_img_H, W=self.sensor_img_W, fov_up=self.sensor_fov_up, fov_down=self.sensor_fov_down, DA=DA, rot=rot, flip_sign=flip_sign, drop_points=drop_points) # open and obtain scan scan.open_scan(scan_file) if self.gt: scan.open_label(label_file) # map unused classes to used classes (also for projection) scan.sem_label = self.map(scan.sem_label, self.learning_map) scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map) #print("proj_x.shape: ",proj_x.shape) #print("proj_y.shape: ",proj_y.shape) # make a tensor of the uncompressed data (with the max num points) unproj_n_points = scan.points.shape[0] unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float) unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points) unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float) unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range) unproj_remissions = torch.full([self.max_points], -1.0, dtype=torch.float) unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions) if self.gt: unproj_labels = torch.full([self.max_points], -1.0, dtype=torch.int32) unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label) else: unproj_labels = [] # JLLIU: # 底下的 proj_xyz.shape: torch.Size([64, 2048, 3]) 就是之前在 laserscan.py 看到的那個 # intensity 的 proj_remission[proj_y, proj_x] 也是 """ seq_now = os.path.normpath(scan_file).split(os.sep)[-3] file_now = os.path.normpath(scan_file).split(os.sep)[-1].replace(".bin", ".npy") proj_xyzi = np.zeros(shape=(64,2048,4)) proj_xyzi[:,:,0:3] = scan.proj_xyz proj_xyzi[:,:,3] = scan.proj_remission np.save('/home/doggy/SalsaNext/train/tasks/semantic/dataset/xyzi_npy/'+ seq_now + '/' + file_now , proj_xyzi) print("\nnpy_name: ", file_now) print("proj_xyz[50,50]: ", scan.proj_xyz[50,50]) print("proj_i[50,50]: ", scan.proj_remission[50,50]) print("proj_xyzi[50,50]: ", proj_xyzi[50,50]) """ # get points and labels proj_range = torch.from_numpy(scan.proj_range).clone() proj_xyz = torch.from_numpy(scan.proj_xyz).clone() proj_remission = torch.from_numpy(scan.proj_remission).clone() proj_mask = torch.from_numpy(scan.proj_mask) if self.gt: proj_labels = torch.from_numpy(scan.proj_sem_label).clone() proj_labels = proj_labels * proj_mask else: proj_labels = [] proj_x = torch.full([self.max_points], -1, dtype=torch.long) proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x) proj_y = torch.full([self.max_points], -1, dtype=torch.long) proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y) proj = torch.cat([ proj_range.unsqueeze(0).clone(), proj_xyz.clone().permute(2, 0, 1), proj_remission.unsqueeze(0).clone() ]) proj = (proj - self.sensor_img_means[:, None, None] ) / self.sensor_img_stds[:, None, None] proj = proj * proj_mask.float() # get name and sequence path_norm = os.path.normpath(scan_file) path_split = path_norm.split(os.sep) path_seq = path_split[-3] path_name = path_split[-1].replace(".bin", ".label") # return return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
if i % 10 == 0: continue frame_idx = i scan_path = os.path.join(seq_scan_path, 'velodyne', '%06d.bin' % frame_idx) if seq_label_path == seq_scan_path: label_path = os.path.join(seq_label_path, 'labels', '%06d.label' % frame_idx) else: label_path = os.path.join(seq_label_path, 'predictions', '%06d.label' % frame_idx) scan.open_scan(scan_path) scan.open_label(label_path) scan.colorize() # points = np.fromfile(path, dtype=np.float32).reshape(-1, 4) # create scatter object and fill in the data scatter = visuals.Markers() scatter.set_data(scan.points, face_color=scan.sem_label_color[..., ::-1], edge_color=scan.sem_label_color[..., ::-1], size=2, edge_width=2.0) canvas = vispy.scene.SceneCanvas(keys='interactive', show=False, bgcolor='w', size=(1980, 1000))
def __getitem__(self, index): # get item in tensor shape scan_file = self.scan_files[index] if self.gt: label_file = self.label_files[index] # open a semantic laserscan if self.gt: scan = SemLaserScan(self.color_map, project=True, H=self.sensor_img_H, W=self.sensor_img_W, fov_up=self.sensor_fov_up, fov_down=self.sensor_fov_down) else: scan = LaserScan(project=True, H=self.sensor_img_H, W=self.sensor_img_W, fov_up=self.sensor_fov_up, fov_down=self.sensor_fov_down) # open and obtain scan scan.open_scan(scan_file) if self.gt: scan.open_label(label_file) # map unused classes to used classes (also for projection) scan.sem_label = self.map(scan.sem_label, self.learning_map) scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map) # make a tensor of the uncompressed data (with the max num points) unproj_n_points = scan.points.shape[0] unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float) unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points) unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float) unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range) unproj_remissions = torch.full([self.max_points], -1.0, dtype=torch.float) unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions) if self.gt: unproj_labels = torch.full([self.max_points], -1.0, dtype=torch.int32) unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label) else: unproj_labels = [] # get points and labels proj_range = torch.from_numpy(scan.proj_range).clone() proj_xyz = torch.from_numpy(scan.proj_xyz).clone() proj_remission = torch.from_numpy(scan.proj_remission).clone() proj_mask = torch.from_numpy(scan.proj_mask) if self.gt: proj_labels = torch.from_numpy(scan.proj_sem_label).clone() proj_labels = proj_labels * proj_mask else: proj_labels = [] proj_x = torch.full([self.max_points], -1, dtype=torch.long) proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x) proj_y = torch.full([self.max_points], -1, dtype=torch.long) proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y) proj = torch.cat([ proj_range.unsqueeze(0).clone(), proj_xyz.clone().permute(2, 0, 1), proj_remission.unsqueeze(0).clone() ]) proj_blocked = proj.unsqueeze(1) # Swap Batch and channel dimensions proj = (proj - self.sensor_img_means[:, None, None] ) / self.sensor_img_stds[:, None, None] proj = proj * proj_mask.float() # get name and sequence path_norm = os.path.normpath(scan_file) path_split = path_norm.split(os.sep) path_seq = path_split[-3] path_name = path_split[-1].replace(".bin", ".label") # print("path_norm: ", path_norm) # print("path_seq", path_seq) # print("path_name", path_name) # import time # import cv2 # cv2.imwrite('/home/snowflake/Desktop/big8192-128.png', proj_blocked[0,0, :, :].numpy()*15) # print('proj_blocked.shape') # print(proj_blocked.shape) # time.sleep(1000) n, c, h, w = proj_blocked.size() proj2 = proj.clone() proj = proj.unsqueeze(0) mask_image = proj_mask.unsqueeze(0).unsqueeze(0).float() downsamplings = 4 representations = {} representations['image'] = [] representations['points'] = [] windows_size = 3 # windows size for i in range(downsamplings): proj_chan_group_points = f.unfold(proj_blocked, kernel_size=3, stride=1, padding=1) projmask_chan_group_points = f.unfold(mask_image, kernel_size=3, stride=1, padding=1) # Get the mean point (taking apart non-valid points) proj_chan_group_points_sum = torch.sum(proj_chan_group_points, dim=1) projmask_chan_group_points_sum = torch.sum( projmask_chan_group_points, dim=1) proj_chan_group_points_mean = proj_chan_group_points_sum / projmask_chan_group_points_sum # tile it for being able to substract it to the other points tiled_proj_chan_group_points_mean = proj_chan_group_points_mean.unsqueeze( 1).repeat(1, windows_size * windows_size, 1) # remove nans due to empty blocks is_nan = tiled_proj_chan_group_points_mean != tiled_proj_chan_group_points_mean tiled_proj_chan_group_points_mean[is_nan] = 0. # compute valid mask per point tiled_projmask_chan_group_points = ( 1 - projmask_chan_group_points.repeat(n, 1, 1)).byte() # substract mean point to points proj_chan_group_points_relative = proj_chan_group_points - tiled_proj_chan_group_points_mean # set to zero points which where non valid at the beginning proj_chan_group_points_relative[ tiled_projmask_chan_group_points] = 0. # compute distance (radius) to mean point # xyz_relative = proj_chan_group_points_relative[1:4,...] # relative_distance = torch.norm(xyz_relative, dim=0).unsqueeze(0) # NOW proj_chan_group_points_relative HAS Xr, Yr, Zr, Rr, Dr relative to the mean point proj_norm_chan_group_points = f.unfold(proj.permute(1, 0, 2, 3), kernel_size=3, stride=1, padding=1) # NOW proj_norm_chan_group_points HAS X, Y, Z, R, D. Now we have to concat them both proj_chan_group_points_combined = torch.cat( [proj_norm_chan_group_points, proj_chan_group_points_relative], dim=0) # convert back to image for image-convolution-branch proj_out = f.fold(proj_chan_group_points_combined, proj_blocked.shape[-2:], kernel_size=3, stride=1, padding=1) proj_out = proj_out.squeeze(1) proj = nn.functional.interpolate(proj, size=(int(proj.shape[2] / 2), int(proj.shape[3] / 2)), mode='nearest') proj_blocked = nn.functional.interpolate( proj_blocked.permute(1, 0, 2, 3), size=(int(proj_blocked.shape[2] / 2), int(proj_blocked.shape[3] / 2)), mode='nearest').permute(1, 0, 2, 3) mask_image = nn.functional.interpolate( mask_image, size=(int(mask_image.shape[2] / 2), int(mask_image.shape[3] / 2)), mode='nearest') representations['points'].append(proj_chan_group_points_combined) representations['image'].append(proj_out) # print('append' +str(i)) # # print(proj_chan_group_points_combined.shape) # print(proj_out.shape) return proj2, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points, representations