示例#1
0
def read_binary_files_factorized(filename, rootdir='./'):
    """Read from compressed binary files:
    1) Compressed latent features.
    2) Number of input points.
    3) Positions of each cube.
  """

    print('===== Read binary files =====')
    file_strings = os.path.join(rootdir, filename + '.strings')
    file_pointnums = os.path.join(rootdir, filename + '.pointnums')
    file_cubepos = os.path.join(rootdir, filename + '.cubepos')
    ply_cubepos = os.path.join(rootdir, filename + '_cubepos.ply')

    with open(file_strings, 'rb') as f:
        shape = np.frombuffer(f.read(2 * 5), dtype=np.int16)
        min_v, max_v = np.frombuffer(f.read(1 * 2), dtype=np.int8)
        strings = f.read()

    with open(file_pointnums, 'rb') as f:
        points_numbers = np.frombuffer(f.read(), dtype=np.uint16)

    gpcc_decode(file_cubepos, ply_cubepos)
    cube_positions = load_ply_data(ply_cubepos)

    return strings, points_numbers, cube_positions, min_v, max_v, shape
示例#2
0
def postprocess(output_file, cubes, points_numbers, cube_positions, scale,
                cube_size, rho):
    """Classify voxels to occupied or free, then extract points and write to file.
  Input:  deocded cubes, cube positions, points numbers, cube size and rho=ouput numbers/input numbers.
  """

    print('===== Post process =====')
    # Classify.
    start = time.time()
    output = select_voxels(cubes, points_numbers, rho)

    # Extract points.
    points = voxels2points(output)
    print("Classify and extract points: {}s".format(
        round(time.time() - start, 4)))

    # scaling (optional)
    start = time.time()
    if scale == 1:
        save_points(points, cube_positions, output_file, cube_size)
    else:
        scaling_output_file = './downsampling_rec.ply'
        save_points(points, cube_positions, scaling_output_file, cube_size)
        pc = load_ply_data(scaling_output_file)
        # pc_up = pc.astype('float32') * scale
        pc_up = pc.astype('float32') * 1 / float(scale)  #
        write_ply_data(output_file, pc_up)
    print("Write point cloud to {}: {}s".format(output_file,
                                                round(time.time() - start, 4)))

    return
示例#3
0
def preprocess(input_file, scale, cube_size, min_num):
  """Scaling, Partition & Voxelization.
  Input: .ply file and arguments for pre-process.  
  Output: partitioned cubes, cube positions, and number of points in each cube. 
  """

  print('===== Preprocess =====')
  # scaling (optional)
  start = time.time()
  if scale == 1:
    scaling_file = input_file 
  else:
    pc = load_ply_data(input_file)
    pc_down = np.round(pc.astype('float32') * scale)
    pc_down = np.unique(pc_down, axis=0)# remove duplicated points
    scaling_file = './downscaling.ply'
    write_ply_data(scaling_file, pc_down)
  print("Scaling: {}s".format(round(time.time()-start, 4)))

  # partition.
  start = time.time()
  partitioned_points, cube_positions = load_points(scaling_file, cube_size, min_num)
  print("Partition: {}s".format(round(time.time()-start, 4)))

  # voxelization.
  start = time.time()
  cubes = points2voxels(partitioned_points, cube_size)
  points_numbers = np.sum(cubes, axis=(1,2,3,4)).astype(np.uint16)
  print("Voxelization: {}s".format(round(time.time()-start, 4)))

  print('cubes shape: {}'.format(cubes.shape))
  print('points numbers (sum/mean/max/min): {} {} {} {}'.format( 
  points_numbers.sum(), round(points_numbers.mean()), points_numbers.max(), points_numbers.min()))

  return cubes, cube_positions, points_numbers
示例#4
0
def read_binary_files_hyper(filename, rootdir='./'):
    """Read from compressed binary files:
    1) Compressed latent features.
    2) Compressed hyperprior.
    3) Number of input points.
    4) Positions of each cube.
  """

    print('===== Read binary files =====')
    file_strings = os.path.join(rootdir, filename + '.strings')
    file_strings_head = os.path.join(rootdir, filename + '.strings_head')
    file_strings_hyper = os.path.join(rootdir, filename + '.strings_hyper')
    file_pointnums = os.path.join(rootdir, filename + '.pointnums')
    file_cubepos = os.path.join(rootdir, filename + '.cubepos')
    ply_cubepos = os.path.join(rootdir, filename + '_cubepos.ply')

    with open(file_strings_head, 'rb') as f:
        y_strings_num = int(np.frombuffer(f.read(1 * 2), dtype=np.int16))
        y_max_min_vs = np.frombuffer(f.read(y_strings_num * 1),
                                     dtype=np.uint8).astype('int32')
        y_max_vs = y_max_min_vs // 16
        y_min_vs = -(y_max_min_vs % 16)
        # y_min_vs = np.array(y_min_vs).astype('int32')
        # y_max_vs = np.array(y_max_vs).astype('int32')

        y_strings_lens = []
        for i in range(y_strings_num):
            l = np.frombuffer(f.read(1 * 1), dtype=np.uint8)
            if l == 0:
                l = int(np.frombuffer(f.read(1 * 2), dtype=np.int16))
            y_strings_lens.append(l)
        y_strings_lens = np.array(y_strings_lens, dtype=np.int32)
        y_shape = np.frombuffer(f.read(2 * 5), dtype=np.int16)

        f.close()

    with open(file_strings, 'rb') as f:
        y_strings = []
        for i, l in enumerate(y_strings_lens):
            y_strings.append(f.read(int(l)))
        y_strings = np.array(y_strings)
        f.close()

    with open(file_strings_hyper, 'rb') as f:
        z_shape = np.frombuffer(f.read(2 * 5), dtype=np.int16)
        z_min_v, z_max_v = np.frombuffer(f.read(1 * 2), dtype=np.int8)
        z_strings = f.read()

    with open(file_pointnums, 'rb') as f:
        points_numbers = np.frombuffer(f.read(), dtype=np.uint16)

    gpcc_decode(file_cubepos, ply_cubepos)
    cube_positions = load_ply_data(ply_cubepos)

    # y_shape = np.array([1, 16, 16, 16, 16])
    return y_strings, z_strings, points_numbers, cube_positions, y_min_vs, y_max_vs, y_shape, z_min_v, z_max_v, z_shape
示例#5
0
def postprocess(output_file,
                cubes,
                points_numbers,
                cube_positions,
                scale,
                cube_size,
                rho,
                fixed_thres=None):
    """Classify voxels to occupied or free, then extract points and write to file.
    Input:  deocded cubes, cube positions, points numbers, cube size and rho=ouput numbers/input numbers.
    """
    prefix = output_file.split('/')[-1].split('_')[0] + str(
        random.randint(1, 100))
    print('===== Post process =====')
    # Classify.
    start = time.time()
    output = select_voxels(cubes, points_numbers, rho, fixed_thres=fixed_thres)

    # Extract points.
    #points = voxels2points(output.numpy())
    points = voxels2points(output)
    print("Classify and extract points: {}s".format(
        round(time.time() - start, 4)))

    # scaling (optional)
    start = time.time()
    if scale == 1:
        save_points(points, cube_positions, output_file, cube_size)
    else:
        scaling_output_file = prefix + 'downsampling_rec.ply'
        save_points(points, cube_positions, scaling_output_file, cube_size)
        pc = load_ply_data(scaling_output_file)
        pc_up = pc.astype('float32') * float(1 / scale)
        write_ply_data(output_file, pc_up)
        os.system("rm " + scaling_output_file)
    print("Write point cloud to {}: {}s".format(output_file,
                                                round(time.time() - start, 4)))

    return