def sum_std_values(laserscan_file: str, mean_vector):
    laserscan = LaserScan()
    laserscan.open_scan(laserscan_file)
    xmean = mean_vector[INDEX_ORDER.index("X")]
    ymean = mean_vector[INDEX_ORDER.index("Y")]
    zmean = mean_vector[INDEX_ORDER.index("Z")]
    remmean = mean_vector[INDEX_ORDER.index("REMISSION")]
    rangemean = mean_vector[INDEX_ORDER.index("RANGE")]
    return lh.sum_laserscan_std(laserscan, xmean, ymean, zmean, remmean,
                                rangemean)
def get_laserscan_matrix(laserscan_file: str, label_file: str):
    laser_filename = os.path.splitext(os.path.basename(laserscan_file))[0]
    label_filename = os.path.splitext(os.path.basename(label_file))[0]
    if laser_filename != label_filename:
        warnings.warn(
            UserWarning,
            f"The laser-file {laser_filename} and label-file {label_filename} are not "
            f"the same. Skipping these files...")
        return None, None
    laserscan = LaserScan()
    laserscan.open_scan(laserscan_file)
    laserscan.open_labels(label_file)
    laserscan_matrix = np.concatenate([
        laserscan.points,
        laserscan.remissions.reshape(-1, 1),
        laserscan.labels.reshape(-1, 1)
    ],
                                      axis=1)
    return laserscan_matrix, laser_filename
Example #3
0
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]
        if self.gt:
            mask_file = self.mask_files[index]

        mask = np.load(mask_file)
        mask[mask > 0.5] = 1
        mask[mask <= 0.5] = 0
        mask = np.expand_dims(mask, axis=0)
        scan = LaserScan(project=True,
                         H=self.sensor_img_H,
                         W=self.sensor_img_W,
                         fov_up=self.sensor_fov_up,
                         fov_down=self.sensor_fov_down)

        # open and obtain scan
        scan.open_scan(scan_file)
        # if self.gt:
        depth_image = np.copy(scan.proj_range)
        rows = depth_image.shape[0]
        cols = depth_image.shape[1]
        row_gap = 1
        col_gap = 1
        max_range = 80
        normal_image = np.zeros((rows, cols, 3))
        for r in range(row_gap, rows - row_gap):
            for c in range(col_gap, cols - col_gap):
                if depth_image[r, c] > max_range:
                    continue
                if depth_image[r + row_gap, c] < 0 or depth_image[r - row_gap,
                                                                  c] < 0:
                    continue
                if depth_image[r, c + col_gap] < 0 or depth_image[r, c -
                                                                  col_gap] < 0:
                    continue
                dx = depth_image[r + row_gap, c] - depth_image[r - row_gap, c]
                dy = depth_image[r, c + col_gap] - depth_image[r, c - col_gap]
                d = np.array([-dx, -dy, 1])
                normal = d / np.linalg.norm(d)
                p = scan.proj_xyz[r, c]
                if np.dot(normal, p) > 0:
                    normal *= -1
                normal_image[r, c] = normal

        curvature_image = np.full(depth_image.shape, -1, dtype=np.float32)
        for r in range(row_gap, rows - row_gap):
            for c in range(col_gap, cols - col_gap):
                dnx = normal_image[r + row_gap, c] - normal_image[r - row_gap,
                                                                  c]
                dny = normal_image[r, c + col_gap] - normal_image[r,
                                                                  c - col_gap]
                if np.linalg.norm(
                        normal_image[r + 1, c]) < 1e-3 or np.linalg.norm(
                            normal_image[r - 1, c]) < 1e-3:
                    continue
                if np.linalg.norm(
                        normal_image[r, c + 1]) < 1e-3 or np.linalg.norm(
                            normal_image[r, c - 1]) < 1e-3:
                    continue
                squared_curvature = np.linalg.norm(dnx)**2 + np.linalg.norm(
                    dny)**2
                curvature_image[r, c] = np.sqrt(squared_curvature)
        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        unproj_labels = []

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_normals = torch.from_numpy(normal_image).clone()
        proj_normals = proj_normals.type(torch.FloatTensor)
        proj_curvature = torch.from_numpy(curvature_image).clone()
        proj_curvature = proj_curvature.type(torch.FloatTensor)
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask).clone()
        # proj_mask = proj_mask.type(torch.FloatTensor)
        if self.gt:
            proj_labels = torch.from_numpy(mask).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj_normalized = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])
        proj_normalized = (proj_normalized -
                           self.sensor_img_means[:, None, None]
                           ) / self.sensor_img_stds[:, None, None]
        proj = torch.cat([
            proj_normalized,
            proj_normals.clone().permute(2, 0, 1),
            proj_curvature.unsqueeze(0).clone()
        ])

        proj = proj * proj_mask.float()
        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")
        # print("path_norm: ", path_norm)
        # print("path_seq", path_seq)
        # print("path_name", path_name)

        # return
        return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
def sum_mean_values(laserscan_file: str):
    laserscan = LaserScan()
    laserscan.open_scan(laserscan_file)
    return lh.sum_laserscan_properties(laserscan)