コード例 #1
0
def eval(data, batch_size):
    bpps = 0.
    IoUs = 0.
    # generate input data.
    for i in range(len(data) // batch_size):
        samples = data[i * batch_size:(i + 1) * batch_size]
        samples_points = []
        for _, f in enumerate(samples):
            points = h5py.File(f, 'r')['data'][:].astype('int')
            samples_points.append(points)
        voxels = points2voxels(samples_points, 64).astype('float32')

        x = tf.convert_to_tensor(voxels)
        y = analysis_transform(x)
        y_tilde, likelihoods = entropy_bottleneck(
            y, training=False)  # TODO: repalce noise by quantization.
        x_tilde = synthesis_transform(y_tilde)

        num_points = tf.reduce_sum(
            tf.cast(tf.greater(tf.reduce_sum(x, -1), 0), 'float32'))
        train_bpp_ae = tf.reduce_sum(
            tf.log(likelihoods)) / (-np.log(2) * num_points)

        points_nums = tf.cast(tf.reduce_sum(x, axis=(1, 2, 3, 4)), 'int32')
        output = select_voxels(x_tilde, points_nums, 1.0)
        # output = output.numpy()
        _, _, IoU = get_classify_metrics(output, x)

        bpps = bpps + train_bpp_ae
        IoUs = IoUs + IoU

    return bpps / (i + 1), IoUs / (i + 1)
コード例 #2
0
def preprocess(input_file, scale, cube_size, min_num):
  """Scaling, Partition & Voxelization.
  Input: .ply file and arguments for pre-process.  
  Output: partitioned cubes, cube positions, and number of points in each cube. 
  """

  print('===== Preprocess =====')
  # scaling (optional)
  start = time.time()
  if scale == 1:
    scaling_file = input_file 
  else:
    pc = load_ply_data(input_file)
    pc_down = np.round(pc.astype('float32') * scale)
    pc_down = np.unique(pc_down, axis=0)# remove duplicated points
    scaling_file = './downscaling.ply'
    write_ply_data(scaling_file, pc_down)
  print("Scaling: {}s".format(round(time.time()-start, 4)))

  # partition.
  start = time.time()
  partitioned_points, cube_positions = load_points(scaling_file, cube_size, min_num)
  print("Partition: {}s".format(round(time.time()-start, 4)))

  # voxelization.
  start = time.time()
  cubes = points2voxels(partitioned_points, cube_size)
  points_numbers = np.sum(cubes, axis=(1,2,3,4)).astype(np.uint16)
  print("Voxelization: {}s".format(round(time.time()-start, 4)))

  print('cubes shape: {}'.format(cubes.shape))
  print('points numbers (sum/mean/max/min): {} {} {} {}'.format( 
  points_numbers.sum(), round(points_numbers.mean()), points_numbers.max(), points_numbers.min()))

  return cubes, cube_positions, points_numbers
コード例 #3
0
def eval(data, batch_size):
    bpps_ae = 0.
    bpps_hyper = 0.
    IoUs = 0.

    for i in range(len(data) // batch_size):
        samples = data[i * batch_size:(i + 1) * batch_size]
        samples_points = []
        for _, f in enumerate(samples):
            points = h5py.File(f, 'r')['data'][:].astype('int')
            samples_points.append(points)
        voxels = points2voxels(samples_points, 64).astype('float32')

        x = tf.convert_to_tensor(voxels)
        y = analysis_transform(x)
        z = hyper_encoder(y)
        z_tilde, likelihoods_hyper = entropy_bottleneck(z, training=False)
        loc, scale = hyper_decoder(z_tilde)
        scale = tf.maximum(scale, lower_bound)
        y_tilde, likelihoods = conditional_entropy_model(y,
                                                         loc,
                                                         scale,
                                                         training=False)
        x_tilde = synthesis_transform(y_tilde)

        num_points = tf.reduce_sum(
            tf.cast(tf.greater(tf.reduce_sum(x, -1), 0), 'float32'))
        train_bpp_ae = tf.reduce_sum(
            tf.log(likelihoods)) / (-np.log(2) * num_points)
        train_bpp_hyper = tf.reduce_sum(
            tf.log(likelihoods_hyper)) / (-np.log(2) * num_points)

        points_nums = tf.cast(tf.reduce_sum(x, axis=(1, 2, 3, 4)), 'int32')
        output = select_voxels(x_tilde, points_nums, 1.0)
        output = output.numpy()
        _, _, IoU = get_classify_metrics(output, x)

        bpps_ae = bpps_ae + train_bpp_ae
        bpps_hyper = bpps_hyper + train_bpp_hyper
        IoUs = IoUs + IoU

    return bpps_ae / (i + 1), bpps_hyper / (i + 1), IoUs / (i + 1)
コード例 #4
0
def train():
    start = time.time()
    train_list = file_list[len(file_list) // RATIO_EVAL:]

    train_bpp_ae_sum = 0.
    train_IoU_sum = 0.
    num = 0.

    for step in range(int(global_step), int(NUM_ITEATION + 1)):
        # generate input data.
        samples = random.sample(train_list, BATCH_SIZE)
        samples_points = []
        for _, f in enumerate(samples):
            points = h5py.File(f, 'r')['data'][:].astype('int')
            samples_points.append(points)
        voxels = points2voxels(samples_points, 64).astype('float32')
        x = tf.convert_to_tensor(voxels)

        with tf.GradientTape() as model_tape:
            y = analysis_transform(x)
            y_tilde, likelihoods = entropy_bottleneck(y, training=True)
            x_tilde = synthesis_transform(y_tilde)

            # losses.
            num_points = tf.reduce_sum(
                tf.cast(tf.greater(tf.reduce_sum(x, -1), 0), 'float32'))
            train_bpp_ae = tf.reduce_sum(
                tf.log(likelihoods)) / -np.log(2) / num_points
            train_zeros, train_ones = get_bce_loss(x_tilde, x)
            train_distortion = beta * train_zeros + 1.0 * train_ones
            train_loss = alpha * train_distortion + 1.0 * train_bpp_ae
            # metrics.
            _, _, IoU = get_classify_metrics(x_tilde, x)

            # gradients.
            gradients = model_tape.gradient(
                train_loss, analysis_transform.variables +
                synthesis_transform.variables + entropy_bottleneck.variables)
            # optimization.
            main_optimizer.apply_gradients(
                zip(
                    gradients, analysis_transform.variables +
                    synthesis_transform.variables +
                    entropy_bottleneck.variables))

        # post-process: classification.
        points_nums = tf.cast(tf.reduce_sum(x, axis=(1, 2, 3, 4)), 'int32')
        output = select_voxels(x_tilde, points_nums, 1.0)
        # output = output.numpy()

        train_bpp_ae_sum += train_bpp_ae
        _, _, IoU = get_classify_metrics(output, x)
        train_IoU_sum += IoU
        num += 1

        # Display.
        if (step + 1) % DISPLAY_STEP == 0:
            train_bpp_ae_sum /= num
            train_IoU_sum /= num

            print("Iteration:{0:}".format(step))
            print("Bpps: {0:.4f}".format(train_bpp_ae_sum.numpy()))
            print("IoU: ", train_IoU_sum.numpy())
            print('Running time:(mins):', round((time.time() - start) / 60.))
            print()

            with writer.as_default(
            ), tf.contrib.summary.record_summaries_every_n_global_steps(100):
                tf.contrib.summary.scalar('bpp', train_bpp_ae_sum)
                tf.contrib.summary.scalar('IoU', train_IoU_sum)

            num = 0.
            train_bpp_ae_sum = 0.
            train_bpp_hyper_sum = 0.
            train_IoU_sum = 0.

            print('evaluating...')
            eval_list = random.sample(file_list[:len(file_list) // RATIO_EVAL],
                                      16)
            bpp_eval, IoU_eval = eval(eval_list, batch_size=8)
            print("BPP:{0:.4f}, IoU:{1:.4f}".format(bpp_eval, IoU_eval))
            with eval_writer.as_default(
            ), tf.contrib.summary.record_summaries_every_n_global_steps(1):
                tf.contrib.summary.scalar('bpp', bpp_eval)
                tf.contrib.summary.scalar('IoU', IoU_eval)

        # Update global steps.
        global_step.assign_add(1)

        # Save checkpoints.
        if (step + 1) % SAVE_STEP == 0:
            checkpoint.save(file_prefix=checkpoint_prefix)