def sum_std_values(laserscan_file: str, mean_vector):
    laserscan = LaserScan()
    laserscan.open_scan(laserscan_file)
    xmean = mean_vector[INDEX_ORDER.index("X")]
    ymean = mean_vector[INDEX_ORDER.index("Y")]
    zmean = mean_vector[INDEX_ORDER.index("Z")]
    remmean = mean_vector[INDEX_ORDER.index("REMISSION")]
    rangemean = mean_vector[INDEX_ORDER.index("RANGE")]
    return lh.sum_laserscan_std(laserscan, xmean, ymean, zmean, remmean,
                                rangemean)
Esempio n. 2
0
    def __getitem__(self, index):
        frame_num = self.frame_num
        proj_colors_list = []
        path_seq_list = []
        path_name_list = []

        for idx in range(index * frame_num, index * frame_num + frame_num):
            # get item in tensor shape
            scan_file = self.scan_files[idx]
            if self.gt:
                label_file = self.label_files[idx]

            # open a semantic laserscan
            if self.gt:
                scan = SemLaserScan(self.color_map,
                                    project=True,
                                    H=self.sensor_img_H,
                                    W=1024,
                                    fov_up=self.sensor_fov_up,
                                    fov_down=self.sensor_fov_down)
            else:
                scan = LaserScan(project=True,
                                 H=self.sensor_img_H,
                                 W=self.sensor_img_W,
                                 fov_up=self.sensor_fov_up,
                                 fov_down=self.sensor_fov_down)

            # open and obtain scan
            scan.open_scan(scan_file)
            if self.gt:
                scan.open_label(label_file)
                # map unused classes to used classes (also for projection)
                scan.sem_label = self.map(scan.sem_label, self.learning_map)
                mask = scan.proj_idx >= 0
                scan.proj_sem_color[mask] = scan.sem_color_lut[scan.sem_label[
                    scan.proj_idx[mask]]]

            if self.gt:
                proj_colors = torch.from_numpy(scan.proj_sem_color).clone()
            else:
                proj_colors = []
            # get name and sequence
            path_norm = os.path.normpath(scan_file)
            path_split = path_norm.split(os.sep)
            path_seq = path_split[-3]
            path_name = path_split[-1].replace(".bin", ".label")
            if self.gt:
                proj_colors_list.append(proj_colors.unsqueeze(0))
            path_seq_list.append(path_seq)
            path_name_list.append(path_name)

        if self.gt:
            proj_colors_seq = torch.cat(proj_colors_list, dim=0)
        else:
            proj_colors_seq = []

        # return
        return proj_colors_seq, path_seq_list, path_name_list
def get_laserscan_matrix(laserscan_file: str, label_file: str):
    laser_filename = os.path.splitext(os.path.basename(laserscan_file))[0]
    label_filename = os.path.splitext(os.path.basename(label_file))[0]
    if laser_filename != label_filename:
        warnings.warn(
            UserWarning,
            f"The laser-file {laser_filename} and label-file {label_filename} are not "
            f"the same. Skipping these files...")
        return None, None
    laserscan = LaserScan()
    laserscan.open_scan(laserscan_file)
    laserscan.open_labels(label_file)
    laserscan_matrix = np.concatenate([
        laserscan.points,
        laserscan.remissions.reshape(-1, 1),
        laserscan.labels.reshape(-1, 1)
    ],
                                      axis=1)
    return laserscan_matrix, laser_filename
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]
        if self.gt:
            label_file = self.label_files[index]

        # open a semantic laserscan
        if self.gt:
            scan = SemLaserScan(self.color_map,
                                project=True,
                                H=self.sensor_img_H,
                                W=self.sensor_img_W,
                                fov_up=self.sensor_fov_up,
                                fov_down=self.sensor_fov_down)
        else:
            scan = LaserScan(project=True,
                             H=self.sensor_img_H,
                             W=self.sensor_img_W,
                             fov_up=self.sensor_fov_up,
                             fov_down=self.sensor_fov_down)

        # open and obtain scan
        scan.open_scan(scan_file)
        if self.gt:
            scan.open_label(label_file)
            # map unused classes to used classes (also for projection)
            scan.sem_label = self.map(scan.sem_label, self.learning_map)
            scan.proj_sem_label = self.map(scan.proj_sem_label,
                                           self.learning_map)

        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        if self.gt:
            unproj_labels = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.int32)
            unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
        else:
            unproj_labels = []

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask)
        if self.gt:
            proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])
        proj = (proj - self.sensor_img_means[:, None, None]
                ) / self.sensor_img_stds[:, None, None]
        proj = proj * proj_mask.float()

        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")
        # print("path_norm: ", path_norm)
        # print("path_seq", path_seq)
        # print("path_name", path_name)

        # return
        return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
Esempio n. 5
0
      print("Labels folder exists! Using labels from %s" % label_paths)
    else:
      print("Labels folder doesn't exist! Exiting...")
      quit()
    # populate the pointclouds
    label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
        os.path.expanduser(label_paths)) for f in fn]
    label_names.sort()

    # check that there are same amount of labels and scans
    if not FLAGS.ignore_safety:
      assert(len(label_names) == len(scan_names))

  # create a scan
  if FLAGS.ignore_semantics:
    scan = LaserScan(project=True)  # project all opened scans to spheric proj
  else:
    color_dict = CFG["color_map"]
    scan = SemLaserScan(color_dict, project=True)

  # create a visualizer
  semantics = not FLAGS.ignore_semantics
  if not semantics:
    label_names = None
  vis = LaserScanVis(scan=scan,
                     scan_names=scan_names,
                     label_names=label_names,
                     offset=FLAGS.offset,
                     semantics=semantics,
                     instances=False)
Esempio n. 6
0
  def __getitem__(self, index):
    # get item in tensor shape
    scan_file = self.scan_files[index]
    if self.gt:
      label_file = self.label_files[index]

    # open a semantic laserscan
    DA = False
    flip_sign = False
    rot = False
    drop_points = False
    if self.transform:
        if random.random() > 0.5:
            if random.random() > 0.5:
                DA = True
            if random.random() > 0.5:
                flip_sign = True
            if random.random() > 0.5:
                rot = True
            drop_points = random.uniform(0, 0.5)

    if self.gt:
      scan = SemLaserScan(self.color_map,
                          project=True,
                          H=self.sensor_img_H,
                          W=self.sensor_img_W,
                          fov_up=self.sensor_fov_up,
                          fov_down=self.sensor_fov_down,
                          DA=DA,
                          flip_sign=flip_sign,
                          drop_points=drop_points)
    else:
      scan = LaserScan(project=True,
                       H=self.sensor_img_H,
                       W=self.sensor_img_W,
                       fov_up=self.sensor_fov_up,
                       fov_down=self.sensor_fov_down,
                       DA=DA,
                       rot=rot,
                       flip_sign=flip_sign,
                       drop_points=drop_points)

    # open and obtain scan
    scan.open_scan(scan_file)
    if self.gt:
      scan.open_label(label_file)
      # map unused classes to used classes (also for projection)
      scan.sem_label = self.map(scan.sem_label, self.learning_map)
      scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map)

    # get points and labels
    proj_range = torch.from_numpy(scan.proj_range).clone()
    proj_segment_angle = torch.from_numpy(scan.segment_angle).clone()
    proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
    proj_remission = torch.from_numpy(scan.proj_remission).clone()
    proj_mask = torch.from_numpy(scan.proj_mask)
    if self.gt:
      proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
      proj_labels = proj_labels * proj_mask
    else:
      proj_labels = []

    # return
    return proj_range, proj_segment_angle, proj_xyz, proj_remission, proj_mask, proj_labels, scan.points, scan_file, scan.sem_label
Esempio n. 7
0
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]
        if self.gt:
            mask_file = self.mask_files[index]

        mask = np.load(mask_file)
        mask[mask > 0.5] = 1
        mask[mask <= 0.5] = 0
        mask = np.expand_dims(mask, axis=0)
        scan = LaserScan(project=True,
                         H=self.sensor_img_H,
                         W=self.sensor_img_W,
                         fov_up=self.sensor_fov_up,
                         fov_down=self.sensor_fov_down)

        # open and obtain scan
        scan.open_scan(scan_file)
        # if self.gt:
        depth_image = np.copy(scan.proj_range)
        rows = depth_image.shape[0]
        cols = depth_image.shape[1]
        row_gap = 1
        col_gap = 1
        max_range = 80
        normal_image = np.zeros((rows, cols, 3))
        for r in range(row_gap, rows - row_gap):
            for c in range(col_gap, cols - col_gap):
                if depth_image[r, c] > max_range:
                    continue
                if depth_image[r + row_gap, c] < 0 or depth_image[r - row_gap,
                                                                  c] < 0:
                    continue
                if depth_image[r, c + col_gap] < 0 or depth_image[r, c -
                                                                  col_gap] < 0:
                    continue
                dx = depth_image[r + row_gap, c] - depth_image[r - row_gap, c]
                dy = depth_image[r, c + col_gap] - depth_image[r, c - col_gap]
                d = np.array([-dx, -dy, 1])
                normal = d / np.linalg.norm(d)
                p = scan.proj_xyz[r, c]
                if np.dot(normal, p) > 0:
                    normal *= -1
                normal_image[r, c] = normal

        curvature_image = np.full(depth_image.shape, -1, dtype=np.float32)
        for r in range(row_gap, rows - row_gap):
            for c in range(col_gap, cols - col_gap):
                dnx = normal_image[r + row_gap, c] - normal_image[r - row_gap,
                                                                  c]
                dny = normal_image[r, c + col_gap] - normal_image[r,
                                                                  c - col_gap]
                if np.linalg.norm(
                        normal_image[r + 1, c]) < 1e-3 or np.linalg.norm(
                            normal_image[r - 1, c]) < 1e-3:
                    continue
                if np.linalg.norm(
                        normal_image[r, c + 1]) < 1e-3 or np.linalg.norm(
                            normal_image[r, c - 1]) < 1e-3:
                    continue
                squared_curvature = np.linalg.norm(dnx)**2 + np.linalg.norm(
                    dny)**2
                curvature_image[r, c] = np.sqrt(squared_curvature)
        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        unproj_labels = []

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_normals = torch.from_numpy(normal_image).clone()
        proj_normals = proj_normals.type(torch.FloatTensor)
        proj_curvature = torch.from_numpy(curvature_image).clone()
        proj_curvature = proj_curvature.type(torch.FloatTensor)
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask).clone()
        # proj_mask = proj_mask.type(torch.FloatTensor)
        if self.gt:
            proj_labels = torch.from_numpy(mask).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj_normalized = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])
        proj_normalized = (proj_normalized -
                           self.sensor_img_means[:, None, None]
                           ) / self.sensor_img_stds[:, None, None]
        proj = torch.cat([
            proj_normalized,
            proj_normals.clone().permute(2, 0, 1),
            proj_curvature.unsqueeze(0).clone()
        ])

        proj = proj * proj_mask.float()
        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")
        # print("path_norm: ", path_norm)
        # print("path_seq", path_seq)
        # print("path_name", path_name)

        # return
        return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
Esempio n. 8
0
        # check that there are same amount of labels and scans
        if not FLAGS.ignore_safety:

            print("label names: \n")
            print(label_names)
            print("\nscan_names: \n")
            print(scan_names)
            scan_names = scan_names[1:]

            assert (len(label_names) == len(scan_names))

    # create a scan
    if FLAGS.ignore_semantics:
        # project all opened scans to spheric proj
        scan = LaserScan(project=True)
    else:
        color_dict = CFG["color_map"]
        scan = SemLaserScan(color_dict, project=True)

    # create a visualizer
    semantics = not FLAGS.ignore_semantics
    if not semantics:
        label_names = None
    vis = LaserScanVis(scan=scan,
                       scan_names=scan_names,
                       label_names=label_names,
                       offset=FLAGS.offset,
                       semantics=semantics,
                       instances=False)
Esempio n. 9
0
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]

        # JLLIU
        # print(scan_file)

        if self.gt:
            label_file = self.label_files[index]

        # open a semantic laserscan
        DA = False
        flip_sign = False
        rot = False
        drop_points = False
        if self.transform:
            if random.random() > 0.5:
                if random.random() > 0.5:
                    DA = True
                if random.random() > 0.5:
                    flip_sign = True
                if random.random() > 0.5:
                    rot = True
                drop_points = random.uniform(0, 0.5)

        if self.gt:
            scan = SemLaserScan(self.color_map,
                                project=True,
                                H=self.sensor_img_H,
                                W=self.sensor_img_W,
                                fov_up=self.sensor_fov_up,
                                fov_down=self.sensor_fov_down,
                                DA=DA,
                                flip_sign=flip_sign,
                                drop_points=drop_points)
        else:
            scan = LaserScan(project=True,
                             H=self.sensor_img_H,
                             W=self.sensor_img_W,
                             fov_up=self.sensor_fov_up,
                             fov_down=self.sensor_fov_down,
                             DA=DA,
                             rot=rot,
                             flip_sign=flip_sign,
                             drop_points=drop_points)

        # open and obtain scan
        scan.open_scan(scan_file)
        if self.gt:
            scan.open_label(label_file)
            # map unused classes to used classes (also for projection)
            scan.sem_label = self.map(scan.sem_label, self.learning_map)
            scan.proj_sem_label = self.map(scan.proj_sem_label,
                                           self.learning_map)

        #print("proj_x.shape: ",proj_x.shape)
        #print("proj_y.shape: ",proj_y.shape)

        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        if self.gt:
            unproj_labels = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.int32)
            unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
        else:
            unproj_labels = []


# JLLIU:
# 底下的 proj_xyz.shape:  torch.Size([64, 2048, 3]) 就是之前在 laserscan.py 看到的那個
# intensity 的 proj_remission[proj_y, proj_x] 也是
        """
    seq_now = os.path.normpath(scan_file).split(os.sep)[-3]
    file_now = os.path.normpath(scan_file).split(os.sep)[-1].replace(".bin", ".npy")

    proj_xyzi = np.zeros(shape=(64,2048,4))
    proj_xyzi[:,:,0:3] = scan.proj_xyz
    proj_xyzi[:,:,3] = scan.proj_remission
    np.save('/home/doggy/SalsaNext/train/tasks/semantic/dataset/xyzi_npy/'+ seq_now + '/' + file_now , proj_xyzi)
    
    print("\nnpy_name: ", file_now)
    print("proj_xyz[50,50]: ", scan.proj_xyz[50,50])
    print("proj_i[50,50]: ", scan.proj_remission[50,50])
    print("proj_xyzi[50,50]: ", proj_xyzi[50,50])
    """

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask)
        if self.gt:
            proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])
        proj = (proj - self.sensor_img_means[:, None, None]
                ) / self.sensor_img_stds[:, None, None]
        proj = proj * proj_mask.float()

        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")

        # return
        return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
Esempio n. 10
0
def sum_mean_values(laserscan_file: str):
    laserscan = LaserScan()
    laserscan.open_scan(laserscan_file)
    return lh.sum_laserscan_properties(laserscan)
Esempio n. 11
0
    def __getitem__(self, index):
        # get item in tensor shape
        scan_file = self.scan_files[index]
        if self.gt:
            label_file = self.label_files[index]

        # open a semantic laserscan
        if self.gt:
            scan = SemLaserScan(self.color_map,
                                project=True,
                                H=self.sensor_img_H,
                                W=self.sensor_img_W,
                                fov_up=self.sensor_fov_up,
                                fov_down=self.sensor_fov_down)
        else:
            scan = LaserScan(project=True,
                             H=self.sensor_img_H,
                             W=self.sensor_img_W,
                             fov_up=self.sensor_fov_up,
                             fov_down=self.sensor_fov_down)

        # open and obtain scan
        scan.open_scan(scan_file)
        if self.gt:
            scan.open_label(label_file)
            # map unused classes to used classes (also for projection)
            scan.sem_label = self.map(scan.sem_label, self.learning_map)
            scan.proj_sem_label = self.map(scan.proj_sem_label,
                                           self.learning_map)

        # make a tensor of the uncompressed data (with the max num points)
        unproj_n_points = scan.points.shape[0]
        unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
        unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
        unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
        unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
        unproj_remissions = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.float)
        unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
        if self.gt:
            unproj_labels = torch.full([self.max_points],
                                       -1.0,
                                       dtype=torch.int32)
            unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
        else:
            unproj_labels = []

        # get points and labels
        proj_range = torch.from_numpy(scan.proj_range).clone()
        proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
        proj_remission = torch.from_numpy(scan.proj_remission).clone()
        proj_mask = torch.from_numpy(scan.proj_mask)
        if self.gt:
            proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
            proj_labels = proj_labels * proj_mask
        else:
            proj_labels = []
        proj_x = torch.full([self.max_points], -1, dtype=torch.long)
        proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
        proj_y = torch.full([self.max_points], -1, dtype=torch.long)
        proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
        proj = torch.cat([
            proj_range.unsqueeze(0).clone(),
            proj_xyz.clone().permute(2, 0, 1),
            proj_remission.unsqueeze(0).clone()
        ])

        proj_blocked = proj.unsqueeze(1)  # Swap Batch and channel dimensions

        proj = (proj - self.sensor_img_means[:, None, None]
                ) / self.sensor_img_stds[:, None, None]

        proj = proj * proj_mask.float()

        # get name and sequence
        path_norm = os.path.normpath(scan_file)
        path_split = path_norm.split(os.sep)
        path_seq = path_split[-3]
        path_name = path_split[-1].replace(".bin", ".label")
        # print("path_norm: ", path_norm)
        # print("path_seq", path_seq)
        # print("path_name", path_name)

        # import time
        # import cv2
        # cv2.imwrite('/home/snowflake/Desktop/big8192-128.png', proj_blocked[0,0, :, :].numpy()*15)
        # print('proj_blocked.shape')
        # print(proj_blocked.shape)
        # time.sleep(1000)

        n, c, h, w = proj_blocked.size()
        proj2 = proj.clone()
        proj = proj.unsqueeze(0)
        mask_image = proj_mask.unsqueeze(0).unsqueeze(0).float()
        downsamplings = 4
        representations = {}
        representations['image'] = []
        representations['points'] = []
        windows_size = 3  # windows size

        for i in range(downsamplings):

            proj_chan_group_points = f.unfold(proj_blocked,
                                              kernel_size=3,
                                              stride=1,
                                              padding=1)
            projmask_chan_group_points = f.unfold(mask_image,
                                                  kernel_size=3,
                                                  stride=1,
                                                  padding=1)

            # Get the mean point (taking apart non-valid points)
            proj_chan_group_points_sum = torch.sum(proj_chan_group_points,
                                                   dim=1)
            projmask_chan_group_points_sum = torch.sum(
                projmask_chan_group_points, dim=1)
            proj_chan_group_points_mean = proj_chan_group_points_sum / projmask_chan_group_points_sum

            # tile it for being able to substract it to the other points
            tiled_proj_chan_group_points_mean = proj_chan_group_points_mean.unsqueeze(
                1).repeat(1, windows_size * windows_size, 1)

            # remove nans due to empty blocks
            is_nan = tiled_proj_chan_group_points_mean != tiled_proj_chan_group_points_mean
            tiled_proj_chan_group_points_mean[is_nan] = 0.

            # compute valid mask per point
            tiled_projmask_chan_group_points = (
                1 - projmask_chan_group_points.repeat(n, 1, 1)).byte()

            # substract mean point to points
            proj_chan_group_points_relative = proj_chan_group_points - tiled_proj_chan_group_points_mean

            # set to zero points which where non valid at the beginning
            proj_chan_group_points_relative[
                tiled_projmask_chan_group_points] = 0.

            # compute distance (radius) to mean point
            # xyz_relative = proj_chan_group_points_relative[1:4,...]
            # relative_distance = torch.norm(xyz_relative, dim=0).unsqueeze(0)

            # NOW proj_chan_group_points_relative HAS Xr, Yr, Zr, Rr, Dr relative to the mean point
            proj_norm_chan_group_points = f.unfold(proj.permute(1, 0, 2, 3),
                                                   kernel_size=3,
                                                   stride=1,
                                                   padding=1)
            # NOW proj_norm_chan_group_points HAS X, Y, Z, R, D. Now we have to concat them both
            proj_chan_group_points_combined = torch.cat(
                [proj_norm_chan_group_points, proj_chan_group_points_relative],
                dim=0)
            # convert back to image for image-convolution-branch
            proj_out = f.fold(proj_chan_group_points_combined,
                              proj_blocked.shape[-2:],
                              kernel_size=3,
                              stride=1,
                              padding=1)
            proj_out = proj_out.squeeze(1)

            proj = nn.functional.interpolate(proj,
                                             size=(int(proj.shape[2] / 2),
                                                   int(proj.shape[3] / 2)),
                                             mode='nearest')
            proj_blocked = nn.functional.interpolate(
                proj_blocked.permute(1, 0, 2, 3),
                size=(int(proj_blocked.shape[2] / 2),
                      int(proj_blocked.shape[3] / 2)),
                mode='nearest').permute(1, 0, 2, 3)
            mask_image = nn.functional.interpolate(
                mask_image,
                size=(int(mask_image.shape[2] / 2),
                      int(mask_image.shape[3] / 2)),
                mode='nearest')

            representations['points'].append(proj_chan_group_points_combined)
            representations['image'].append(proj_out)
            # print('append' +str(i))
            #
            # print(proj_chan_group_points_combined.shape)
            # print(proj_out.shape)

        return proj2, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points, representations