Beispiel #1
0
def eval(data, batch_size):
    bpps = 0.
    IoUs = 0.
    # generate input data.
    for i in range(len(data) // batch_size):
        samples = data[i * batch_size:(i + 1) * batch_size]
        samples_points = []
        for _, f in enumerate(samples):
            points = h5py.File(f, 'r')['data'][:].astype('int')
            samples_points.append(points)
        voxels = points2voxels(samples_points, 64).astype('float32')

        x = tf.convert_to_tensor(voxels)
        y = analysis_transform(x)
        y_tilde, likelihoods = entropy_bottleneck(
            y, training=False)  # TODO: repalce noise by quantization.
        x_tilde = synthesis_transform(y_tilde)

        num_points = tf.reduce_sum(
            tf.cast(tf.greater(tf.reduce_sum(x, -1), 0), 'float32'))
        train_bpp_ae = tf.reduce_sum(
            tf.log(likelihoods)) / (-np.log(2) * num_points)

        points_nums = tf.cast(tf.reduce_sum(x, axis=(1, 2, 3, 4)), 'int32')
        x_tilde = x_tilde.numpy()
        output = select_voxels(x_tilde, points_nums, 1.0)
        _, _, IoU = get_classify_metrics(output, x)

        bpps = bpps + train_bpp_ae
        IoUs = IoUs + IoU

    return bpps / (i + 1), IoUs / (i + 1)
Beispiel #2
0
def postprocess(output_file,
                cubes,
                points_numbers,
                cube_positions,
                scale,
                cube_size,
                rho,
                fixed_thres=None):
    """Classify voxels to occupied or free, then extract points and write to file.
    Input:  deocded cubes, cube positions, points numbers, cube size and rho=ouput numbers/input numbers.
    """
    prefix = output_file.split('/')[-1].split('_')[0] + str(
        random.randint(1, 100))
    print('===== Post process =====')
    # Classify.
    start = time.time()
    output = select_voxels(cubes, points_numbers, rho, fixed_thres=fixed_thres)

    # Extract points.
    #points = voxels2points(output.numpy())
    points = voxels2points(output)
    print("Classify and extract points: {}s".format(
        round(time.time() - start, 4)))

    # scaling (optional)
    start = time.time()
    if scale == 1:
        save_points(points, cube_positions, output_file, cube_size)
    else:
        scaling_output_file = prefix + 'downsampling_rec.ply'
        save_points(points, cube_positions, scaling_output_file, cube_size)
        pc = load_ply_data(scaling_output_file)
        pc_up = pc.astype('float32') * float(1 / scale)
        write_ply_data(output_file, pc_up)
        os.system("rm " + scaling_output_file)
    print("Write point cloud to {}: {}s".format(output_file,
                                                round(time.time() - start, 4)))

    return
Beispiel #3
0
def eval(data, batch_size):
  bpps_ae = 0.
  bpps_hyper = 0.
  IoUs = 0. 

  for i in range(len(data)//batch_size):
    samples = data[i*batch_size:(i+1)*batch_size]
    samples_points = []
    for _, f in enumerate(samples):
      points = h5py.File(f, 'r')['data'][:].astype('int')
      samples_points.append(points)
    voxels = points2voxels(samples_points, 64).astype('float32')

    x = tf.convert_to_tensor(voxels)
    y = analysis_transform(x)
    z = hyper_encoder(y)
    z_tilde, likelihoods_hyper = entropy_bottleneck(z, training=False)
    loc, scale = hyper_decoder(z_tilde)
    scale = tf.maximum(scale, lower_bound)
    y_tilde, likelihoods = conditional_entropy_model(y, loc, scale, training=False)
    x_tilde = synthesis_transform(y_tilde)

    num_points = tf.reduce_sum(tf.cast(tf.greater(tf.reduce_sum(x, -1), 0), 'float32'))
    train_bpp_ae = tf.reduce_sum(tf.log(likelihoods)) / (-np.log(2) * num_points)
    train_bpp_hyper = tf.reduce_sum(tf.log(likelihoods_hyper)) / (-np.log(2) * num_points)

    points_nums = tf.cast(tf.reduce_sum(x, axis=(1,2,3,4)), 'int32')
    x_tilde = x_tilde.numpy()
    output = select_voxels(x_tilde, points_nums, 1.0)
    # output = output.numpy()
    _, _, IoU = get_classify_metrics(output, x)

    bpps_ae = bpps_ae + train_bpp_ae
    bpps_hyper = bpps_hyper + train_bpp_hyper
    IoUs = IoUs + IoU

  return bpps_ae/(i+1), bpps_hyper/(i+1), IoUs/(i+1)
Beispiel #4
0
def train():
  start = time.time()
  train_list = file_list[len(file_list)//RATIO_EVAL:]

  train_bpp_ae_sum = 0.
  train_bpp_hyper_sum = 0.
  train_IoU_sum = 0.
  num = 0.

  for step in range(int(global_step), int(NUM_ITEATION+1)):
    # generate input data
    samples = random.sample(train_list, BATCH_SIZE)
    samples_points = []
    for _, f in enumerate(samples):
      points = h5py.File(f, 'r')['data'][:].astype('int')
      samples_points.append(points)
    voxels = points2voxels(samples_points, 64).astype('float32')
    x = tf.convert_to_tensor(voxels)
    
    with tf.GradientTape() as model_tape:
      y = analysis_transform(x)
      z = hyper_encoder(y)
      z_tilde, likelihoods_hyper = entropy_bottleneck(z, training=True)
      loc, scale = hyper_decoder(z_tilde)
      scale = tf.maximum(scale, lower_bound)# start with large lower bound to avaid crashes!
      y_tilde, likelihoods = conditional_entropy_model(y, loc, scale, training=True)
      x_tilde = synthesis_transform(y_tilde)

      # losses.
      num_points = tf.reduce_sum(tf.cast(tf.greater(tf.reduce_sum(x, -1), 0), 'float32')) 
      train_bpp_ae = tf.reduce_sum(tf.log(likelihoods)) / (-np.log(2) * num_points)
      train_bpp_hyper = tf.reduce_sum(tf.log(likelihoods_hyper)) / (-np.log(2) * num_points)
      train_zeros, train_ones = get_bce_loss(x_tilde, x)
      train_distortion = beta * train_zeros + 1.0 * train_ones
      train_loss = alpha * train_distortion +  delta * train_bpp_ae + gamma * train_bpp_hyper

      #gradients.
      gradients = model_tape.gradient(train_loss, 
                                      analysis_transform.variables + 
                                      synthesis_transform.variables +
                                      hyper_encoder.variables +
                                      hyper_decoder.variables +
                                      entropy_bottleneck.variables)
      # optimization.
      main_optimizer.apply_gradients(zip(gradients, 
                                        analysis_transform.variables + 
                                        synthesis_transform.variables +
                                        hyper_encoder.variables +
                                        hyper_decoder.variables +
                                        entropy_bottleneck.variables))

    # post-process: classification.
    points_nums = tf.cast(tf.reduce_sum(x, axis=(1,2,3,4)), 'int32')
    x_tilde = x_tilde.numpy()
    output = select_voxels(x_tilde, points_nums, 1.0)
    # output = output.numpy()  

    train_bpp_ae_sum += train_bpp_ae
    train_bpp_hyper_sum += train_bpp_hyper
    _, _, IoU = get_classify_metrics(output, x)
    train_IoU_sum += IoU
    num += 1

    # Display.
    if (step + 1) % DISPLAY_STEP == 0:
      train_bpp_ae_sum /= num
      train_bpp_hyper_sum /= num
      train_IoU_sum  /= num

      print("Iteration:{0:}".format(step))
      print("Bpps: {0:.4f} + {1:.4f}".format(train_bpp_ae_sum, train_bpp_hyper_sum))
      print("IoU: ", train_IoU_sum.numpy())
      print('Running time:(mins):', round((time.time()-start)/60.))
      print()

      with writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(1):
        tf.contrib.summary.scalar('bpp_ae', train_bpp_ae_sum)
        tf.contrib.summary.scalar('bpp_hyper', train_bpp_hyper_sum)
        tf.contrib.summary.scalar('bpp', train_bpp_ae_sum + train_bpp_hyper_sum)
        tf.contrib.summary.scalar('IoU',train_IoU_sum)
      
      num = 0.
      train_bpp_ae_sum = 0.
      train_bpp_hyper_sum = 0.
      train_IoU_sum = 0.
 
    # update global steps.
    global_step.assign_add(1)

    # Save checkpoints.
    if (step + 1) % SAVE_STEP == 0:
      print('evaluating...')
      eval_list =random.sample(file_list[:len(file_list)//RATIO_EVAL], 256)
      eval_bpp_ae, eval_bpp_hyper, eval_IoU = eval(eval_list, batch_size=8)
      print("Bpps: {0:.4f} + {1:.4f}".format(eval_bpp_ae, eval_bpp_hyper))
      print("IoU: {0:.4f}".format(eval_IoU))

      with eval_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(1):
        tf.contrib.summary.scalar('bpp_ae', eval_bpp_ae)
        tf.contrib.summary.scalar('bpp_hyper', eval_bpp_hyper)
        tf.contrib.summary.scalar('bpp', eval_bpp_ae + eval_bpp_hyper)
        tf.contrib.summary.scalar('IoU', eval_IoU)

      checkpoint.save(file_prefix = checkpoint_prefix)