def eval_one_epoch(sess,
                   ops,
                   val_writer,
                   val_writer_180,
                   epoch,
                   eval_only,
                   do_timings,
                   override_batch_size=None):
    """ ops: dict mapping from string to tf ops """

    is_training = False
    batch_size = cfg.training.batch_size if override_batch_size is None else override_batch_size

    val_idxs = VAL_INDICES
    num_batches = int(np.ceil(len(val_idxs) / batch_size))
    num_full_batches = int(np.floor(len(val_idxs) / batch_size))

    loss_sum = 0
    global_step = sess.run([ops['step']])[0]
    #  step_in_epochs = epoch + 1
    eval_dir = f'{cfg.logging.logdir}/val/eval{str(epoch).zfill(6)}'
    base_eval_dir = eval_dir
    if FLAGS.refineICP:
        eval_dir = f'{eval_dir}/refined_{FLAGS.refineICPmethod}{"_"+FLAGS.its if FLAGS.its != 30 else ""}'

    #if os.path.isdir(eval_dir):
    #    os.rename(eval_dir, f'{eval_dir}_backup_{int(time.time())}')

    os.makedirs(eval_dir, exist_ok=True)

    all_pred_translations = np.empty((len(val_idxs), 3), dtype=np.float32)
    all_pred_angles = np.empty((len(val_idxs), 1), dtype=np.float32)

    all_pred_s1_pc1centers = np.empty((len(val_idxs), 3), dtype=np.float32)
    all_pred_s1_pc2centers = np.empty((len(val_idxs), 3), dtype=np.float32)
    all_pred_s2_pc1centers = np.empty((len(val_idxs), 3), dtype=np.float32)
    all_pred_s2_pc2centers = np.empty((len(val_idxs), 3), dtype=np.float32)
    all_pred_s2_pc1angles = np.empty((len(val_idxs), 1), dtype=np.float32)
    all_pred_s2_pc2angles = np.empty((len(val_idxs), 1), dtype=np.float32)

    all_gt_translations = np.empty((len(val_idxs), 3), dtype=np.float32)
    all_gt_angles = np.empty((len(val_idxs), 1), dtype=np.float32)
    all_gt_pc1centers = np.empty((len(val_idxs), 3), dtype=np.float32)

    if FLAGS.use_old_results:
        all_pred_translations = np.load(
            f'{base_eval_dir}/pred_translations.npy')
        all_pred_angles = np.load(f'{base_eval_dir}/pred_angles.npy')
        all_pred_s2_pc1centers = np.load(
            f'{base_eval_dir}/pred_s2_pc1centers.npy')

    cumulated_times = 0.
    for batch_idx in tqdm(range(num_batches), desc='val'):
        #  logger.info('----- batch ' + str(batch_idx) + ' -----')
        start_idx = batch_idx * batch_size
        end_idx = min((batch_idx + 1) * batch_size, len(val_idxs))

        pcs1, pcs2, translations, rel_angles, pc1centers, pc2centers, pc1angles, pc2angles = provider.load_batch(
            val_idxs[start_idx:end_idx],
            override_batch_size=override_batch_size)

        feed_dict = {
            ops['pcs1']: pcs1,
            ops['pcs2']: pcs2,
            ops['translations']: translations,
            ops['rel_angles']: rel_angles,
            ops['is_training_pl']: is_training,
            ops['pc1centers']: pc1centers,
            ops['pc2centers']: pc2centers,
            ops['pc1angles']: pc1angles,
            ops['pc2angles']: pc2angles,
        }

        start = time.time()
        summary, loss_val, pred_translations, pred_pc1angle_logits, pred_pc2angle_logits, pred_remaining_angle_logits, pred_s1_pc1centers, pred_s1_pc2centers, pred_s2_pc1centers, pred_s2_pc2centers = sess.run(
            [
                ops['merged'], ops['loss'], ops['pred_translations'],
                ops['pred_pc1angle_logits'], ops['pred_pc2angle_logits'],
                ops['pred_remaining_angle_logits'], ops['pred_s1_pc1centers'],
                ops['pred_s1_pc2centers'], ops['pred_s2_pc1centers'],
                ops['pred_s2_pc2centers']
            ],
            feed_dict=feed_dict)
        cumulated_times += time.time() - start
        #  val_writer.add_summary(summary, step)
        actual_batch_size = end_idx - start_idx
        pred_translations = pred_translations[:actual_batch_size]
        pred_angles_pc1 = MODEL.classLogits2angle(
            pred_pc1angle_logits[:actual_batch_size])
        pred_angles_pc2 = MODEL.classLogits2angle(
            pred_pc2angle_logits[:actual_batch_size])
        pred_angles_remaining = MODEL.classLogits2angle(
            pred_remaining_angle_logits[:actual_batch_size])
        pred_angles = pred_angles_pc2 - pred_angles_pc1 + pred_angles_remaining

        if actual_batch_size == batch_size:  # last batch is not counted
            loss_sum += loss_val

        for idx in range(actual_batch_size):
            global_idx = start_idx + idx
            if eval_only and FLAGS.refineICP:
                if FLAGS.use_old_results:
                    init = get_mat_angle(
                        all_pred_translations[global_idx],
                        all_pred_angles[global_idx],
                        rotation_center=all_pred_s2_pc1centers[global_idx])
                else:
                    init = get_mat_angle(
                        pred_translations[idx],
                        pred_angles[idx],
                        rotation_center=pred_s2_pc1centers[idx])
                # Careful: Pass full point cloud, not subsampled one
                refined_pred_transform, refined_pred_center, time_elapsed = icp.icp_p2point(
                    val_idxs[global_idx],
                    cfg,
                    with_constraint=True,
                    radius=0.1,
                    init=init,
                    its=int(FLAGS.its))
                cumulated_times += time_elapsed
                #  if refined_pred_transform[2, 2] == -1:
                #  refined_pred_transform = init
                #  Overwrite predicted translation and angle in place, then update all_pred_... later
                pred_translations[idx] = refined_pred_transform[:3, 3]
                rotation_mat = refined_pred_transform[:3, :3]
                rot_vec = Rotation.from_dcm(rotation_mat).as_euler('xyz')
                #  if global_idx == 47:
                #  print(global_idx, '   ', evaluation.eval_angle(rot_vec[2], all_pred_angles[global_idx], True)[0])
                #  print(refined_pred_transform)
                #  print(init)
                #  print(rot_vec)
                pred_angles[idx] = rot_vec[2]
                #  The transformation ICP outputs is in world space, thus with a rotation around 0,0,0. Store this so that a comparable translation and rotation can be computed later
                pred_s2_pc1centers[idx] = [0., 0, 0]

            all_pred_translations[global_idx] = pred_translations[idx]
            all_pred_angles[global_idx] = pred_angles[idx]

            all_pred_s1_pc1centers[global_idx] = pred_s1_pc1centers[idx]
            all_pred_s1_pc2centers[global_idx] = pred_s1_pc2centers[idx]
            all_pred_s2_pc1centers[global_idx] = pred_s2_pc1centers[idx]
            all_pred_s2_pc2centers[global_idx] = pred_s2_pc2centers[idx]

            all_pred_s2_pc1angles[global_idx] = pred_angles_pc1[idx]
            all_pred_s2_pc2angles[global_idx] = pred_angles_pc2[idx]

            all_gt_translations[global_idx] = translations[idx]
            all_gt_angles[global_idx] = rel_angles[idx]
            all_gt_pc1centers[global_idx] = pc1centers[idx]

    mean_per_transform_loss = loss_sum / num_full_batches if num_full_batches > 0 else 0.
    mean_execution_time = cumulated_times / float(len(val_idxs))

    if do_timings:
        print(f'Timing bs={override_batch_size}: {mean_execution_time}')
    elif cfg.evaluation.has(
            'special') and cfg.evaluation.special.mode == 'held':
        #  print(all_pred_translations)
        _, eval_dict = evaluation.evaluate_held(cfg,
                                                val_idxs,
                                                all_pred_translations,
                                                all_pred_angles,
                                                all_gt_translations,
                                                all_gt_angles,
                                                eval_dir=eval_dir,
                                                mean_time=mean_execution_time)
    else:
        for accept_inverted_angle, _val_writer in zip(
            [False, True], [val_writer, val_writer_180]):
            eval_dict = evaluation.evaluate(
                cfg,
                val_idxs,
                all_pred_translations,
                all_pred_angles,
                all_gt_translations,
                all_gt_angles,
                all_pred_s2_pc1centers,
                all_gt_pc1centers,
                eval_dir=eval_dir,
                accept_inverted_angle=accept_inverted_angle,
                mean_time=mean_execution_time)
            corr_levels_translation_str = ' '.join(
                [f'{a*100.0:.2f}%' for a in eval_dict.corr_levels_translation])
            corr_levels_angles_str = ' '.join(
                [f'{a*100.0:.2f}%' for a in eval_dict.corr_levels_angles])
            corr_levels_str = ' '.join(
                [f'{a*100.0:.2f}%' for a in eval_dict.corr_levels])
            logger.info(
                f'Mean translation distance: {eval_dict.mean_dist_translation}, Mean angle distance: {eval_dict.mean_dist_angle}, Levels: {corr_levels_str}, Translation levels: {corr_levels_translation_str}, Angle levels: {corr_levels_angles_str}, Fitness: {eval_dict.reg_eval.fitness*100.0:.2f}%, Inlier RMSE: {eval_dict.reg_eval.inlier_rmse*100.0:.2f}%, Mean ex. time: {mean_execution_time:.5f}'
            )

            if not eval_only:
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='losses/loss',
                        simple_value=mean_per_transform_loss)
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/t_a_mean_dist',
                        simple_value=eval_dict.mean_dist_translation)
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/t_b_1cm',
                        simple_value=eval_dict.corr_levels_translation[0])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/t_c_10cm',
                        simple_value=eval_dict.corr_levels_translation[1])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/t_d_1m',
                        simple_value=eval_dict.corr_levels_translation[2])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/a_a_mean_dist',
                        simple_value=eval_dict.mean_dist_angle)
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/a_b_1d',
                        simple_value=eval_dict.corr_levels_angles[0])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/a_c_5d',
                        simple_value=eval_dict.corr_levels_angles[1])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/a_d_10d',
                        simple_value=eval_dict.corr_levels_angles[2])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/o_b_1cm',
                        simple_value=eval_dict.corr_levels[0])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/o_c_10cm',
                        simple_value=eval_dict.corr_levels[1])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/o_d_1m',
                        simple_value=eval_dict.corr_levels[2])
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/fitness',
                        simple_value=eval_dict.reg_eval.fitness)
                ]),
                                        global_step=global_step)
                _val_writer.add_summary(summary=tf.Summary(value=[
                    tf.summary.Summary.Value(
                        tag='accuracy/inlier_rmse',
                        simple_value=eval_dict.reg_eval.inlier_rmse)
                ]),
                                        global_step=global_step)
                _val_writer.flush()

    np.save(f'{eval_dir}/pred_translations.npy', all_pred_translations)
    np.save(f'{eval_dir}/pred_angles.npy', all_pred_angles)

    np.save(f'{eval_dir}/pred_s1_pc2centers.npy', all_pred_s1_pc2centers)
    if True or not eval_only:
        np.save(f'{eval_dir}/pred_s1_pc1centers.npy', all_pred_s1_pc1centers)
        np.save(f'{eval_dir}/pred_s2_pc1centers.npy', all_pred_s2_pc1centers)
        np.save(f'{eval_dir}/pred_s2_pc2centers.npy', all_pred_s2_pc2centers)
        np.save(f'{eval_dir}/pred_s2_pc1angles.npy', all_pred_s2_pc1angles)
        np.save(f'{eval_dir}/pred_s2_pc2angles.npy', all_pred_s2_pc2angles)

    logger.info('val mean loss: %f' % (mean_per_transform_loss))
Esempio n. 2
0
def evaluate(cfg, use_old_results=False):
    val_idxs = provider.getDataFiles(f'{cfg.data.basepath}/split/val.txt')
    #  val_idxs = val_idxs[:100]

    epoch = 0
    total_time = 0.

    do_refinement = cfg.evaluation.special.icp.has('refine')
    refinement_method = cfg.evaluation.special.icp.refine if do_refinement else None

    if cfg.evaluation.special.icp.variant in ['o3_gicp', 'o3_gicp_fast'
                                              ] and do_refinement:
        gicp_result_dir = f'{cfg.logging.logdir[:-4]}/val/eval{str(epoch).zfill(6)}'
        assert os.path.isdir(gicp_result_dir), gicp_result_dir
        assert os.path.isfile(f'{gicp_result_dir}/eval_180.json'
                              ), f'{gicp_result_dir}/eval_180.json'
        eval_dict = json.load(open(f'{gicp_result_dir}/eval_180.json', 'r'))
        precomp_time = eval_dict['mean_time'] * float(len(val_idxs))
        total_time += precomp_time
        precomp_pred_translations = np.load(
            f'{gicp_result_dir}/pred_translations.npy')
        precomp_pred_angles = np.load(f'{gicp_result_dir}/pred_angles.npy')
        precomp_pred_centers = np.load(
            f'{gicp_result_dir}/pred_s1_pc1centers.npy')
        print('Precomputed results loaded')

    pcs1, pcs2, all_gt_translations, all_gt_angles, all_gt_pc1centers, all_gt_pc2centers, all_gt_pc1angles, all_gt_pc2angles = provider.load_batch(
        val_idxs, override_batch_size=len(val_idxs))
    eval_dir = f'{cfg.logging.logdir}/val/eval{str(epoch).zfill(6)}'
    if use_old_results and os.path.isfile(f'{eval_dir}/pred_translations.npy'):
        all_pred_translations = np.load(f'{eval_dir}/pred_translations.npy')
        all_pred_angles = np.load(f'{eval_dir}/pred_angles.npy')
        all_pred_centers = np.load(f'{eval_dir}/pred_s1_pc1centers.npy')
    else:
        all_pred_translations = np.empty((len(val_idxs), 3), dtype=np.float32)
        all_pred_angles = np.empty((len(val_idxs), 1), dtype=np.float32)
        all_pred_centers = np.empty((len(val_idxs), 3), dtype=np.float32)

        for idx, file_idx in enumerate(tqdm(val_idxs)):
            if cfg.evaluation.special.icp.variant == 'p2point':
                pred_transform, pred_center, time_elapsed = icp_p2point(
                    file_idx, cfg, radius=0.10)
            elif cfg.evaluation.special.icp.variant == 'p2plane':
                pred_transform, pred_center, time_elapsed = icp_p2plane(
                    file_idx, cfg)
            elif cfg.evaluation.special.icp.variant == 'goicp':
                pred_transform, pred_center, time_elapsed = icp_goicp(
                    file_idx,
                    cfg,
                    refine=refinement_method,
                    refine_radius=0.10)
            elif cfg.evaluation.special.icp.variant == 'o3_gicp':
                pred_transform, pred_center, time_elapsed = icp_o3_gicp(
                    file_idx,
                    cfg,
                    refine=refinement_method,
                    refine_radius=0.10,
                    precomputed_results=(precomp_pred_translations[idx],
                                         precomp_pred_angles[idx],
                                         precomp_pred_centers[idx])
                    if do_refinement else None)
            elif cfg.evaluation.special.icp.variant == 'o3_gicp_fast':
                pred_transform, pred_center, time_elapsed = icp_o3_gicp_fast(
                    file_idx,
                    cfg,
                    refine=refinement_method,
                    refine_radius=0.10,
                    precomputed_results=(precomp_pred_translations[idx],
                                         precomp_pred_angles[idx],
                                         precomp_pred_centers[idx])
                    if do_refinement else None)
            else:
                assert False
            #  all_pred_centers[idx] = pred_center
            #  Important! The output of the ICP functions is around the origin, not around the centroid as used internally
            all_pred_centers[idx] = np.array([0., 0, 0])

            all_pred_translations[idx] = pred_transform[:3, 3]
            rotation_mat = pred_transform[:3, :3]
            rot_vec = Rotation.from_dcm(rotation_mat).as_rotvec()
            all_pred_angles[idx] = rot_vec[2]
            total_time += time_elapsed

        os.makedirs(eval_dir, exist_ok=True)
        np.save(f'{eval_dir}/pred_translations.npy', all_pred_translations)
        np.save(f'{eval_dir}/pred_angles.npy', all_pred_angles)
        np.save(f'{eval_dir}/pred_s1_pc1centers.npy', all_pred_centers)

    for accept_inverted_angle in [False, True]:
        eval_dict = evaluation.evaluate(
            cfg,
            val_idxs,
            all_pred_translations,
            all_pred_angles,
            all_gt_translations,
            all_gt_angles,
            all_pred_centers,
            all_gt_pc1centers,
            eval_dir=eval_dir,
            accept_inverted_angle=accept_inverted_angle,
            mean_time=total_time / len(val_idxs))
        logger.info(eval_dict)
def train_one_epoch(sess, ops, train_writer, epoch):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    batch_size = cfg.training.batch_size

    train_idxs = copy.deepcopy(TRAIN_INDICES)
    np.random.shuffle(train_idxs)
    num_batches = len(train_idxs) // batch_size

    loss_sum = 0

    pbar = tqdm(range(num_batches),
                desc=f'train',
                postfix=dict(last_loss_str=''))
    for batch_idx in pbar:
        #  logger.info('----- batch ' + str(batch_idx) + ' -----')
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx + 1) * batch_size

        pcs1, pcs2, translations, rel_angles, pc1centers, pc2centers, pc1angles, pc2angles = provider.load_batch(
            train_idxs[start_idx:end_idx])

        # Augment batched point clouds by jittering
        pcs1 = provider.jitter_point_cloud(pcs1)
        pcs2 = provider.jitter_point_cloud(pcs2)
        feed_dict = {
            ops['pcs1']: pcs1,
            ops['pcs2']: pcs2,
            ops['translations']: translations,
            ops['rel_angles']: rel_angles,
            ops['is_training_pl']: is_training,
            ops['pc1centers']: pc1centers,
            ops['pc2centers']: pc2centers,
            ops['pc1angles']: pc1angles,
            ops['pc2angles']: pc2angles,
        }
        summary, step, _, loss_val, pred_translations, pred_remaining_angle_logits = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['pred_translations'], ops['pred_remaining_angle_logits']
            ],
            feed_dict=feed_dict)
        #  step_in_epochs = float(epoch) + float(end_idx / len(train_idxs))
        train_writer.add_summary(summary, step)

        #  pred_val = np.argmax(pred_val, 1)
        #  correct = np.sum(pred_val == current_label[start_idx:end_idx])
        #  total_correct += correct
        #  total_seen += cfg.training.batch_size
        loss_sum += loss_val
        pbar.set_postfix(last_loss_str=f'{loss_val:.5f}')
        #  if batch_idx == 0:
        #  logger.info(np.concatenate([pred_val, transforms], axis=1)[:5,:])

    logger.info('train mean loss: %f' % (loss_sum / float(num_batches)))
    #  logger.info('accuracy: %f' % (total_correct / float(total_seen)))
    train_writer.flush()
def run_inference(configs, ids, ppath):
    """  """

    print(configs)
    load_config(configs[0])

    cfg = configGlobal
    MODEL = MODEL_tp8
    eval_epoch = configs[1]

    VAL_INDICES = ids

    with tf.Graph().as_default():
        # Define model on the device
        with tf.device('/gpu:' + str(cfg.gpu_index)):
            pcs1, pcs2, translations, rel_angles, pc1centers, pc2centers, pc1angles, pc2angles = MODEL.placeholder_inputs(
                cfg.training.batch_size, cfg.model.num_points)
            is_training_pl = tf.placeholder(tf.bool, shape=())

            # Note the global_step=batch parameter to minimize.
            # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
            batch = tf.Variable(0)

            # Get model and loss
            end_points = MODEL.get_model(pcs1, pcs2, is_training_pl)
            loss = MODEL.get_loss(pcs1, pcs2, translations, rel_angles,
                                  pc1centers, pc2centers, pc1angles, pc2angles,
                                  end_points)

            # Add ops to save and restore all the variables.
            saver = tf.train.Saver(max_to_keep=1000)

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False

        sess = tf.Session(config=config)

        merged = tf.summary.merge_all()  # Necessary to run

        # Init variables
        init = tf.global_variables_initializer()

        # To fix the bug introduced in TF 0.12.1 as in
        # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
        # sess.run(init)
        sess.run(init, {is_training_pl: True})

        ops = {
            'pcs1':
            pcs1,  # INPUT
            'pcs2':
            pcs2,  # INPUT
            'translations':
            translations,  # GT 
            'rel_angles':
            rel_angles,  # gt
            'is_training_pl':
            is_training_pl,  # What is it for?
            'pred_translations':
            end_points['pred_translations'],
            'pred_remaining_angle_logits':
            end_points['pred_remaining_angle_logits'],
            'pc1centers':
            pc1centers,  # GT
            'pc2centers':
            pc2centers,  # GT
            'pc1angles':
            pc1angles,  # GT
            'pc2angles':
            pc2angles,  # GT
            'pred_s1_pc1centers':
            end_points['pred_s1_pc1centers'],
            'pred_s1_pc2centers':
            end_points['pred_s1_pc2centers'],
            'pred_s2_pc1centers':
            end_points['pred_s2_pc1centers'],
            'pred_s2_pc2centers':
            end_points['pred_s2_pc2centers'],
            'pred_pc1angle_logits':
            end_points['pred_pc1angle_logits'],
            'pred_pc2angle_logits':
            end_points['pred_pc2angle_logits'],
            'loss':
            loss,  # COMPUTED
            'merged':
            merged
        }  # WHAT IS THIS??
        #'train_op': train_op,
        #'step': batch}                # INFO

        # Load existing model!
        model_to_load = cfg.logging.logdir
        assert os.path.isfile(f'{model_to_load}/model-{eval_epoch}.index'
                              ), f'{model_to_load}/model-{eval_epoch}.index'
        saver.restore(sess, f'{model_to_load}/model-{eval_epoch}')

        start_epoch = int(eval_epoch)
        epoch = start_epoch

        is_training = False
        batch_size = cfg.training.batch_size

        val_idxs = VAL_INDICES
        num_batches = int(np.ceil(len(val_idxs) / batch_size))
        num_full_batches = int(np.floor(len(val_idxs) / batch_size))

        loss_sum = 0

        #  step_in_epochs = epoch + 1
        eval_dir = f'{cfg.logging.logdir}/val/eval{str(epoch).zfill(6)}'
        base_eval_dir = eval_dir

        if os.path.isdir(eval_dir):
            os.rename(eval_dir, f'{eval_dir}_backup_{int(time.time())}')
        os.makedirs(eval_dir, exist_ok=True)

        # Prediction containers
        all_pred_translations = np.empty((len(val_idxs), 3), dtype=np.float32)
        all_pred_angles = np.empty((len(val_idxs), 1), dtype=np.float32)
        all_pred_pc1centers = np.empty((len(val_idxs), 3), dtype=np.float32)
        all_pred_pc2centers = np.empty((len(val_idxs), 3), dtype=np.float32)

        # The conversion from logits to is done outside the model
        # Pass this to the model!
        all_pred_s2_pc1angles = np.empty((len(val_idxs), 1), dtype=np.float32)
        all_pred_s2_pc2angles = np.empty((len(val_idxs), 1), dtype=np.float32)

        # Ground truth contina
        all_gt_translations = np.empty((len(val_idxs), 3), dtype=np.float32)
        all_gt_angles = np.empty((len(val_idxs), 1), dtype=np.float32)
        all_gt_pc1centers = np.empty((len(val_idxs), 3), dtype=np.float32)
        all_gt_pc1angles = np.empty((len(val_idxs), 1), dtype=np.float32)

        cumulated_times = 0.
        for batch_idx in range(num_batches):

            # AQUI ERROR!
            start_idx = batch_idx * batch_size
            end_idx = min((batch_idx + 1) * batch_size, len(val_idxs))

            print(f'----- Samples {start_idx}/{len(VAL_INDICES)} -----')

            # TODO: Create a class to solve this mess
            pcs1, pcs2, translations, rel_angles, pc1centers, pc2centers, pc1angles, pc2angles = provider.load_batch(
                val_idxs[start_idx:end_idx], path=ppath)
            #print(pc1centers)

            # TODO: Investigate a better way to do this feed_dict
            feed_dict = {
                ops['pcs1']: pcs1,
                ops['pcs2']: pcs2,
                ops['translations']: translations,
                ops['rel_angles']: rel_angles,
                ops['is_training_pl']: is_training,
                ops['pc1centers']: pc1centers,
                ops['pc2centers']: pc2centers,
                ops['pc1angles']: pc1angles,
                ops['pc2angles']: pc2angles,
            }
            start = time.time()

            # TODO: IDEM Create class to solve this mess
            summary, loss_val, pred_translations, pred_pc1angle_logits, pred_pc2angle_logits, pred_remaining_angle_logits, _, _, pred_pc1centers, pred_pc2centers = sess.run(
                [
                    ops['merged'], ops['loss'], ops['pred_translations'],
                    ops['pred_pc1angle_logits'], ops['pred_pc2angle_logits'],
                    ops['pred_remaining_angle_logits'],
                    ops['pred_s1_pc1centers'], ops['pred_s1_pc2centers'],
                    ops['pred_s2_pc1centers'], ops['pred_s2_pc2centers']
                ],
                feed_dict=feed_dict)

            # Why do we need time?
            cumulated_times += time.time() - start
            # ?
            actual_batch_size = end_idx - start_idx

            # How can this be longer? Maybe when not full batch
            pred_translations = pred_translations[:actual_batch_size]
            pred_pc1centers = pred_pc1centers[:actual_batch_size]
            pred_pc2centers = pred_pc2centers[:actual_batch_size]

            # Correct from logits to angle
            pred_angles_pc1 = MODEL.classLogits2angle(
                pred_pc1angle_logits[:actual_batch_size])
            pred_angles_pc2 = MODEL.classLogits2angle(
                pred_pc2angle_logits[:actual_batch_size])
            pred_angles_remaining = MODEL.classLogits2angle(
                pred_remaining_angle_logits[:actual_batch_size])
            # Final angle computation
            pred_angles = pred_angles_pc2 - pred_angles_pc1 + pred_angles_remaining

            # Why this?
            if actual_batch_size == batch_size:  # last batch is not counted
                loss_sum += loss_val

            # Some parameters (ARE THEY NEEDED?)
            mean_per_transform_loss = loss_sum / num_full_batches if num_full_batches > 0 else 0.
            mean_execution_time = cumulated_times / float(len(val_idxs))

            print(f"{loss_val}")

            # Store result to big array TODO: CONVERT TO SINGLE LINE OP
            for idx in range(actual_batch_size):
                global_idx = start_idx + idx

                all_pred_translations[global_idx] = pred_translations[idx]
                all_pred_angles[global_idx] = pred_angles[idx]
                all_pred_pc1centers[global_idx] = pred_pc1centers[idx]
                all_pred_pc2centers[global_idx] = pred_pc2centers[idx]

                all_gt_translations[global_idx] = translations[idx]
                all_gt_angles[global_idx] = rel_angles[idx]
                all_gt_pc1centers[global_idx] = pc1centers[idx]
                all_gt_pc1angles[global_idx] = pc1angles[idx]

    print("Results fully computed")

    info = np.hstack((all_pred_translations[:, :-1], all_pred_angles,
                      all_pred_pc1centers[:, :-1], all_pred_pc2centers[:, :-1],
                      all_gt_translations[:, :-1], all_gt_angles,
                      all_gt_pc1centers[:, :-1], all_gt_pc1angles))
    names = [
        'pred_trans_x', 'pred_trans_y', 'pred_angles', 'pred_pc1center_x',
        'pred_pc1center_y', 'pred_pc2center_x', 'pred_pc2center_y',
        'gt_trans_x', 'gt_trans_y', 'gt_angles', 'gt_pc1centers_x',
        'gt_pc1centers_y', 'gt_pc1angles'
    ]

    df = DataFrame(info, columns=names)

    return df