Ejemplo n.º 1
0
def test():
    with tf.Graph().as_default(), tf.device('/gpu:0'):  # Use GPU 0
        # Training parameters
        # Count the number of training & eval data
        num_data = utils.count_text_lines(args.test_filenames_file)
        print('===> Train: There are totally %d test files' % (num_data))

        steps_per_epoch = np.ceil(num_data / args.batch_size).astype(np.int32)

        num_total_steps = args.max_epoches * steps_per_epoch
        # Load data
        data_loader = Dataloader(test_dataloader_params,
                                 shuffle=False)  # no shuffle
        # Debug test train_dataloader
        # test_synthetic_dataloader(data_loader, True)

        I1_batch = data_loader.I1_batch
        I2_batch = data_loader.I2_batch
        I1_aug_batch = data_loader.I1_aug_batch
        I2_aug_batch = data_loader.I2_aug_batch
        I_batch = data_loader.I_batch
        I_prime_batch = data_loader.I_prime_batch
        full_I_batch = data_loader.full_I_batch
        full_I_prime_batch = data_loader.full_I_prime_batch
        pts1_batch = data_loader.pts1_batch
        gt_batch = data_loader.gt_batch
        patch_indices_batch = data_loader.patch_indices_batch

        # Train on multiple GPU:
        h_losses = []
        # Create a session
        gpu_options = tf.GPUOptions(
            allow_growth=True
        )  # Does not pre-allocate large, increase if needed
        config = tf.ConfigProto(
            allow_soft_placement=True, gpu_options=gpu_options
        )  # soft_placement allows to work on CPUs if GPUs are not available

        sess = tf.Session(config=config)

        # Initialize
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        # Threads coordinator
        coordinator = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)

        num_samples = 0
        total_num_fail = 0
        h_losses = []
        total_time = 0

        # Start test
        for step in range(num_total_steps):
            full_I_value, full_I_prime_value, I2_value, I1_value, I1_aug_value, I2_aug_value, I_value, I_prime_value, pts_1_value, full_gt_corr_value = sess.run(
                [
                    full_I_batch, full_I_prime_batch, I2_batch, I1_batch,
                    I1_aug_batch, I2_aug_batch, I_batch, I_prime_batch,
                    pts1_batch, gt_batch
                ])
            for i in range(args.batch_size):
                num_samples += 1
                down_ratio = 2
                I_sample = utils.denorm_img(full_I_value[i]).astype(np.uint8)
                I_prime_sample = utils.denorm_img(
                    full_I_prime_value[i]).astype(np.uint8)
                corr1_sample = full_gt_corr_value[i, 0:8].reshape([
                    4, 2
                ]) / down_ratio  # (gt is for 480x640. Here, use 240x320)
                corr2_sample = full_gt_corr_value[i, 8:16].reshape(
                    [4, 2]) / down_ratio
                # Use RANSAC_homography/ Direct method to find the homography (delta 4 points)
                sample_start_time = timeit.default_timer()
                if args.method == 'direct':
                    pred_h4p, _, not_found = direct_h.find_homography(
                        I_sample,
                        I_prime_sample,
                        corr1_sample,
                        corr2_sample,
                        visual=args.visual,
                        method=args.method,
                        num_iterations=args.num_iterations,
                        return_h_inv=False)
                # RANSAC Methods
                else:
                    pred_h4p, _, not_found = ransac_h.find_homography(
                        I_sample,
                        I_prime_sample,
                        corr1_sample,
                        corr2_sample,
                        visual=args.visual,
                        method=args.method,
                        min_match_count=args.num_features,
                        return_h_inv=False)
                sample_run_time = timeit.default_timer() - sample_start_time
                total_time += sample_run_time
                # Set maximum value for every value of delta h4p
                pred_h4p[np.where(pred_h4p >= 80)] = 80
                pred_h4p[np.where(pred_h4p <= -80)] = -80

                pred_corr2_sample = pred_h4p[0] + corr1_sample
                h_loss_value = np.sqrt(
                    np.mean(np.square(pred_corr2_sample - corr2_sample)))
                # Evaluate the result
                # There are two cases of failure
                if not_found:  # Cannot find homography
                    total_num_fail += 1
                    print('===> Fail case 1: Not found homography')

                else:
                    # H_loss if homography is identity matrix
                    h_loss_identity = np.sqrt(
                        np.mean(np.square(corr1_sample - corr2_sample)))
                    if h_loss_identity < h_loss_value:
                        print('===> Fail case 2:  error > identity')
                        total_num_fail += 1
                        h_loss_value = h_loss_identity
                h_losses.append(h_loss_value)

                _ = utils.progress_bar(
                    step * args.batch_size + i,
                    num_total_steps * args.batch_size,
                    ' Test| image %d, h_loss %.3f, h_loss_average %.3f, fail %d/%d, time %.4f'
                    % (i, h_loss_value, np.mean(h_losses), total_num_fail,
                       num_samples, sample_run_time))

                # Save visualization
                if args.save_visual:
                    # Query full images
                    img1_with_4pts = I_sample.astype(np.uint8)
                    img2_with_4pts = I_prime_sample.astype(np.uint8)
                    # Draw prediction
                    cv2.polylines(img2_with_4pts,
                                  np.int32([pred_corr2_sample]), 1,
                                  (5, 225, 225), 3)

                    point_color = (0, 255, 255)
                    line_color_set = [(255, 102, 255), (51, 153, 255),
                                      (102, 255, 255), (255, 255, 0),
                                      (102, 102, 244), (150, 202, 178),
                                      (153, 240, 142), (102, 0, 51),
                                      (51, 51, 0)]
                    # Draw 4 points (ground truth)
                    full_stack_images = utils.draw_matches(
                        img1_with_4pts,
                        corr1_sample,
                        img2_with_4pts,
                        corr2_sample,
                        'tmp.jpg',
                        color_set=line_color_set,
                        show=False)

                    # Save image
                    visual_file_name = os.path.join(
                        args.results_dir,
                        str(step * args.batch_size + i) + '_loss_' +
                        str(h_loss_value) + '.jpg')
                    #cv2.putText(full_stack_images, 'RMSE %.2f'%h_loss,(800, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
                    cv2.imwrite(visual_file_name, full_stack_images)
                    print('Wrote file %s', visual_file_name)

        result_dict = {
            'method': args.method,
            'h_losses': h_losses,
            'h_loss_mu': np.mean(h_losses),
            'h_loss_std': np.std(h_losses)
        }
        import cPickle as pickle
        result_file_dir = os.path.join(args.results_dir, 'h_losses.pkl')
        with open(result_file_dir, 'wb') as f:
            pickle.dump(result_dict, f)
            print('===> Successfully write results  to  %s' % result_file_dir)
        print('==========================================================')
        mean_h_loss, std_h_loss = np.mean(np.array(h_losses)), np.std(
            np.array(h_losses))
        print('===> H_loss:', mean_h_loss, '+/-', std_h_loss)
        print('Running time:', total_time / num_samples)
        fail_percent = total_num_fail * 1.0 / (num_samples)
        print('Failure %.3f' % (fail_percent))
        output_line = [
            num_samples, mean_h_loss, std_h_loss, fail_percent,
            total_time / num_samples
        ]
        print('output_line:', output_line)
        with open(os.path.join(args.log_dir, 'results.txt'), 'w') as f:
            np.savetxt(f, [output_line], delimiter=' ', fmt='%.5f')
            print('===> Wrote results to file %s' %
                  os.path.join(args.log_dir, 'results.txt'))
        tops_list = utils.find_percentile(h_losses)
        print('===> Percentile Values: (20, 50, 80, 100):')
        print(tops_list)
        print('======> End! ====================================')
Ejemplo n.º 2
0
    def run_test(self, model_index=0):
        # Create a session
        gpu_options = tf.GPUOptions(
            allow_growth=True
        )  # Does not pre-allocate large, increase if needed
        config = tf.ConfigProto(
            allow_soft_placement=True, gpu_options=gpu_options
        )  # soft_placement allows to work on CPUs if GPUs are not available

        sess = tf.Session(config=config)

        # Saver
        log_name = args.loss_type
        summary_writer = tf.summary.FileWriter(args.log_dir, sess.graph)
        train_saver = tf.train.Saver(max_to_keep=20)  # Keep maximum 20 models

        # Initialize
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        # Threads coordinator
        coordinator = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)

        # Restore
        print(args.model_dir)
        train_saver.restore(sess, tf.train.latest_checkpoint(args.model_dir))
        # Index of the image want to display
        index = 0
        h_total_loss_value = 0
        rec_total_loss_value = 0
        ssim_total_loss_value = 0
        l1_total_loss_value = 0
        total_num_fail_value = 0
        # Start testing
        h_losses_array = []
        total_num_fail = 0

        for step in range(self.num_total_steps):
            num_fail_value, h_loss_value, rec_loss_value, ssim_loss_value, l1_loss_value, l1_smooth_loss_value, pred_I2_value, I1_aug_value, I2_aug_value, I_value, I_prime_value, pts1_value, gt_value, pred_h4p_value = sess.run(
                [
                    self.total_num_fail, self.total_h_loss,
                    self.total_rec_loss, self.total_ssim_loss,
                    self.total_l1_loss, self.total_l1_smooth_loss,
                    self.pred_I2, self.I1_aug, self.I2_aug, self.I,
                    self.I_prime, self.pts1, self.gt, self.pred_h4p
                ])

            h_total_loss_value += h_loss_value
            rec_total_loss_value += rec_loss_value
            ssim_total_loss_value += ssim_loss_value
            l1_total_loss_value += l1_loss_value
            total_num_fail_value += num_fail_value
            h_losses_array.append(h_loss_value)

            if args.save_visual:
                I_sample = utils.denorm_img(I_value[0]).astype(np.uint8)
                I_prime_sample = utils.denorm_img(I_prime_value[0]).astype(
                    np.uint8)
                pts1_sample = pts1_value[0].reshape([4, 2]).astype(np.float32)
                gt_h4p_sample = gt_value[0].reshape([4, 2]).astype(np.float32)

                pts2_sample = pts1_sample + gt_h4p_sample

                pred_h4p_sample = pred_h4p_value[0].reshape([4, 2]).astype(
                    np.float32)
                pred_pts2_sample = pts1_sample + pred_h4p_sample

                # Save
                visual_file_name = str(
                    step *
                    args.batch_size) + '_' + args.loss_type + '_loss_' + str(
                        h_loss_value) + '.jpg'
                utils.save_correspondences_img(I_prime_sample, I_sample,
                                               pts1_sample, pts2_sample,
                                               pred_pts2_sample,
                                               args.results_dir,
                                               visual_file_name)

            if step % 10 == 0:
                print('===> This iteration num Fail: %d \n' % num_fail_value)
                total_time = utils.progress_bar(
                    step, self.num_total_steps,
                    'Test, h_loss %4.3f, rec_loss %4.3f, ssim_loss %4.3f, l1_loss %4.3f, fail_percent %4.4f'
                    % (h_total_loss_value / (step + 1), rec_total_loss_value /
                       (step + 1), ssim_total_loss_value /
                       (step + 1), l1_total_loss_value /
                       (step + 1), total_num_fail_value /
                       (step + 1) / args.batch_size))

            if args.visual and step % 10 == 0:
                plt.subplot(2, 1, 1)
                plt.imshow(np.concatenate([
                    pred_I2_value[index, :, :, 0].astype(np.uint8),
                    I2_aug_value[index, :, :, 0]
                ], 1),
                           cmap='gray')
                plt.title('Pred I2 vs I2')
                plt.subplot(2, 1, 2)
                plt.imshow(I1_aug_value[index, :, :, 0],
                           I2_aug_value[index, :, :, 0],
                           cmap='gray')
                plt.title('I1_aug vs I2_aug')
                plt.show()
                plt.pause(0.05)

        # Final result
        total_time = utils.progress_bar(
            step, self.num_total_steps,
            'Test, h_loss %4.3f, rec_loss %4.3f, ssim_loss %4.3f, l1_loss %4.3f, fail_percent %4.4f'
            %
            (h_total_loss_value / (step + 1), rec_total_loss_value /
             (step + 1), ssim_total_loss_value /
             (step + 1), l1_total_loss_value /
             (step + 1), total_num_fail_value / (step + 1) / args.batch_size))

        # Summarize results
        print('====> Result for RHO:', RHO, ' loss ', args.loss_type,
              ' noise ', args.do_augment)
        print('|Steps  |   h_loss   |    l1_loss   |  Fail percent    |')
        print(step, h_total_loss_value / (step + 1),
              l1_total_loss_value / (step + 1),
              100 * total_num_fail_value / (step + 1) / args.batch_size)

        tops_list = utils.find_percentile(h_losses_array)
        print('===> Percentile Values: (20, 50, 80, 100):')
        print(tops_list)
        print('======> End! ====================================')
Ejemplo n.º 3
0
def test(args):
    # Load data
    TestDataset = SyntheticDataset(data_path=args.data_path,
                                   mode=args.mode,
                                   img_h=args.img_h,
                                   img_w=args.img_w,
                                   patch_size=args.patch_size,
                                   do_augment=args.do_augment)
    test_loader = DataLoader(TestDataset, batch_size=1)
    print('===> Test: There are totally {} testing files'.format(len(TestDataset)))

    # Load model
    net = HomographyModel()
    model_path = os.path.join(args.model_dir, args.model_name)
    state = torch.load(model_path)
    net.load_state_dict(state.state_dict())
    if torch.cuda.is_available():
        net = net.cuda()

    print("start testing")

    with torch.no_grad():
        net.eval()
        test_l1_loss = 0.0
        test_h_loss = 0.0
        h_losses_array = []
        for i, batch_value in enumerate(test_loader):
            I1_aug_batch = batch_value[2].float()
            I2_aug_batch = batch_value[3].float()
            I_batch = batch_value[4].float()
            I_prime_batch = batch_value[5].float()
            pts1_batch = batch_value[6].float()
            gt_batch = batch_value[7].float()
            patch_indices_batch = batch_value[8].float()

            if torch.cuda.is_available():
                I1_aug_batch = I1_aug_batch.cuda()
                I2_aug_batch = I2_aug_batch.cuda()
                I_batch = I_batch.cuda()
                pts1_batch = pts1_batch.cuda()
                gt_batch = gt_batch.cuda()
                patch_indices_batch = patch_indices_batch.cuda()

            batch_out = net(I1_aug_batch, I2_aug_batch, I_batch, pts1_batch, gt_batch, patch_indices_batch)
            h_loss = batch_out['h_loss']
            rec_loss = batch_out['rec_loss']
            ssim_loss = batch_out['ssim_loss']
            l1_loss = batch_out['l1_loss']
            pred_h4p_value = batch_out['pred_h4p']

            test_h_loss += h_loss.item()
            test_l1_loss += l1_loss.item()
            h_losses_array.append(h_loss.item())

            if args.save_visual:
                I_sample = utils.denorm_img(I_batch[0].cpu().numpy()).astype(np.uint8)
                I_prime_sample = utils.denorm_img(I_prime_batch[0].numpy()).astype(np.uint8)
                pts1_sample = pts1_batch[0].cpu().numpy().reshape([4, 2]).astype(np.float32)
                gt_h4p_sample = gt_batch[0].cpu().numpy().reshape([4, 2]).astype(np.float32)

                pts2_sample = pts1_sample + gt_h4p_sample

                pred_h4p_sample = pred_h4p_value[0].cpu().numpy().reshape([4, 2]).astype(np.float32)
                pred_pts2_sample = pts1_sample + pred_h4p_sample

                # Save
                visual_file_name = ('%s' % i).zfill(4) + '.jpg'
                utils.save_correspondences_img(I_prime_sample, I_sample, pts1_sample, pts2_sample, pred_pts2_sample,
                                               args.results_dir, visual_file_name)

            print("Testing: h_loss: {:4.3f}, rec_loss: {:4.3f}, ssim_loss: {:4.3f}, l1_loss: {:4.3f}".format(
                h_loss.item(), rec_loss.item(), ssim_loss.item(), l1_loss.item()
            ))

    print('|Test size  |   h_loss   |    l1_loss   |')
    print(len(test_loader), test_h_loss / len(test_loader), test_l1_loss / len(test_loader))

    tops_list = utils.find_percentile(h_losses_array)
    print('===> Percentile Values: (20, 50, 80, 100):')
    print(tops_list)
    print('======> End! ====================================')
    def run_test(self, model_index=0):
        # Create a session
        gpu_options = tf.GPUOptions(
            allow_growth=True
        )  # Does not pre-allocate large, increase if needed
        config = tf.ConfigProto(
            allow_soft_placement=True, gpu_options=gpu_options
        )  # soft_placement allows to work on CPUs if GPUs are not available

        sess = tf.Session(config=config)

        # Saver
        log_name = args.loss_type
        summary_writer = tf.summary.FileWriter(args.log_dir, sess.graph)
        train_saver = tf.train.Saver(max_to_keep=20)  # Keep maximum 20 models

        # Initialize
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        # Threads coordinator
        coordinator = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)

        # Restore
        print(args.save_model_dir)
        train_saver.restore(sess,
                            tf.train.latest_checkpoint(args.save_model_dir))
        # Index of the image want to display
        index = 0
        rec_total_loss_value = 0
        ssim_total_loss_value = 0
        l1_total_loss_value = 0
        # Start testing
        total_num_fail_value = 0
        h_losses = []
        num_samples = 0
        for step in range(self.num_total_steps):
            full_gt_corr_value, pts1_value, pred_h4p_value, rec_loss_value, l1_loss_value, l1_smooth_loss_value, pred_I2_value, I1_aug_value, I2_aug_value, full_I_value, full_I_prime_value = sess.run(
                [
                    self.gt_corr, self.pts1, self.pred_h4p,
                    self.total_rec_loss, self.total_l1_loss,
                    self.total_l1_smooth_loss, self.pred_I2, self.I1_aug,
                    self.I2_aug, self.full_I, self.full_I_prime
                ])

            rec_total_loss_value += rec_loss_value
            l1_total_loss_value += l1_loss_value

            # Now, use the correspondences that we manually chose (ground truth), compute homography error
            # pred_h4p_value: Batch_size x 8
            # We cannot just compare these value with the ground truth directly since four points on the first images are different.
            for i in range(args.batch_size):
                full_I_sample = utils.denorm_img(full_I_value[i]).astype(
                    np.uint8)
                full_I_prime_sample = utils.denorm_img(
                    full_I_prime_value[i]).astype(np.uint8)
                # Ratio between full image and our image used for deep learing
                down_ratio = args.full_img_h / args.img_h

                delta_h4p_sample = pred_h4p_value[i].reshape([4, 2]).astype(
                    np.float32)
                pts1_sample = pts1_value[i].reshape([4, 2]).astype(np.float32)

                # Project onto the full image
                full_pts2_sample = (delta_h4p_sample +
                                    pts1_sample) * down_ratio
                full_pts1_sample = pts1_sample * down_ratio
                # Find homography
                full_H = cv2.getPerspectiveTransform(full_pts1_sample,
                                                     full_pts2_sample)
                full_H_inv = np.linalg.inv(full_H)
                # Obtain the chosen correspondences on which we compute the homography error
                full_corr1_sample = full_gt_corr_value[i, 0:8].reshape([
                    4, 2
                ]) / 2  # Since we chose point on images of size 480 x 640
                full_corr2_sample = full_gt_corr_value[i, 8:16].reshape([4, 2
                                                                         ]) / 2
                # Apply estimated homography to the corr1
                pred_full_corr2_sample = cv2.perspectiveTransform(
                    np.array([full_corr1_sample]), full_H_inv)[0]

                # Find RMSE
                h_loss = np.sqrt(
                    np.mean(
                        np.square(pred_full_corr2_sample - full_corr2_sample))
                )  #normalize to be equivalent to SIFT, ORB, direct
                h_loss_identity = np.sqrt(
                    np.mean(np.square(full_corr1_sample - full_corr2_sample)))

                if h_loss > h_loss_identity:
                    print("===> Found %.3f > %.3f = RMSE_identity" %
                          (h_loss, h_loss_identity))
                    total_num_fail_value += 1
                    #TODO: conditionally
                    h_loss = h_loss_identity

                num_samples += 1
                h_loss = h_loss
                h_losses.append(h_loss)

                # VISUALIZATION
                # Save visualization
                if args.save_visual:
                    # Query full images
                    visual_file_name = str(
                        step * args.batch_size +
                        i) + '_Unsupervised_' + '_loss_' + str(h_loss) + '.jpg'
                    utils.save_correspondences_img(
                        full_I_sample, full_I_prime_sample, full_corr1_sample,
                        full_corr2_sample, pred_full_corr2_sample,
                        args.results_dir, visual_file_name)
                # Save visualization for other methods - for report
                if args.do_report:
                    for fb_method in ['SIFT']:
                        SIFT_pred_h4p, _, not_found = ransac_h.find_homography(
                            full_I_sample,
                            full_I_prime_sample,
                            full_corr1_sample,
                            visual=False,
                            method='SIFT',
                            min_match_count=25,
                            return_h_inv=False)
                        SIFT_pred_full_corr2_sample = SIFT_pred_h4p[
                            0] + full_corr1_sample
                        SIFT_h_loss = np.sqrt(
                            np.mean(
                                np.square(SIFT_pred_full_corr2_sample -
                                          full_corr2_sample)))
                        visual_file_name = str(
                            step * args.batch_size +
                            i) + '_' + fb_method + '_loss_' + str(
                                SIFT_h_loss) + '.jpg'
                        utils.save_correspondences_img(
                            full_I_sample, full_I_prime_sample,
                            full_corr1_sample, full_corr2_sample,
                            SIFT_pred_full_corr2_sample, args.results_dir,
                            visual_file_name)

                    # Direct method
                    direct_pred_h4p, _, not_found = direct_h.find_homography(
                        full_I_sample,
                        full_I_prime_sample,
                        full_corr1_sample,
                        visual=False,
                        method='direct',
                        num_iterations=1000,
                        return_h_inv=False)
                    direct_pred_full_corr2_sample = direct_pred_h4p[
                        0] + full_corr1_sample
                    direct_h_loss = np.sqrt(
                        np.mean(
                            np.square(direct_pred_full_corr2_sample -
                                      full_corr2_sample)))
                    visual_file_name = str(step * args.batch_size +
                                           i) + '_direct' + '_loss_' + str(
                                               direct_h_loss) + '.jpg'
                    utils.save_correspondences_img(
                        full_I_sample, full_I_prime_sample, full_corr1_sample,
                        full_corr2_sample, direct_pred_full_corr2_sample,
                        args.results_dir, visual_file_name)

            if step % 1 == 0:
                total_time = utils.progress_bar(
                    step, self.num_total_steps,
                    'Test, h_loss %4.3f, rec_loss %4.3f, l1_loss %4.3f, fail_percent %4.4f'
                    % (np.mean(h_losses), rec_total_loss_value /
                       (step + 1), l1_total_loss_value /
                       (step + 1), total_num_fail_value /
                       (step + 1) / args.batch_size))

            if args.visual and step % 10 == 0:
                plt.subplot(2, 1, 1)
                plt.imshow(np.concatenate([
                    utils.denorm_img(pred_I2_value[index, :, :, 0]).astype(
                        np.uint8),
                    utils.denorm_img(I2_aug_value[index, :, :, 0])
                ], 1),
                           cmap='gray')
                plt.title('Pred I2 vs I2')
                plt.subplot(2, 1, 2)
                plt.imshow(np.concatenate([
                    I1_aug_value[index, :, :, 0], I2_aug_value[index, :, :, 0]
                ], 1),
                           cmap='gray')
                plt.title('I1_aug vs I2_aug')
                plt.show()
                plt.pause(0.05)

        # Final result
        total_time = utils.progress_bar(
            step, self.num_total_steps,
            'Test, h_loss %4.3f, rec_loss %4.3f, ssim_loss %4.3f, l1_loss %4.3f, fail_percent %4.4f'
            %
            (np.mean(h_losses), rec_total_loss_value /
             (step + 1), ssim_total_loss_value /
             (step + 1), l1_total_loss_value /
             (step + 1), total_num_fail_value / (step + 1) / args.batch_size))

        print('====> Result for RHO:', RHO, ' loss ', args.loss_type,
              ' noise ', args.do_augment)
        print('|Steps  |   h_loss   |    l1_loss   |  Fail percent    |')
        print(step, np.mean(h_losses), l1_total_loss_value / (step + 1),
              100 * total_num_fail_value / (step + 1) / args.batch_size)
        tops_list = utils.find_percentile(h_losses)
        print('===> Percentile Values: (20, 50, 80, 100):')
        print(tops_list)
        print('======> End! ====================================')
Ejemplo n.º 5
0
def test():
  with tf.Graph().as_default(), tf.device('/gpu:0'): # Use GPU 0
    # Training parameters
    # Count the number of training & eval data
    num_data = utils.count_text_lines(args.test_filenames_file)
    print('===> Train: There are totally %d test files'%(num_data))

    steps_per_epoch = np.ceil(num_data/args.batch_size).astype(np.int32)

    num_total_steps = args.max_epoches*steps_per_epoch
    # Load data
    data_loader = Dataloader(test_dataloader_params, shuffle=False) # no shuffle
    # Debug test train_dataloader
    # test_synthetic_dataloader(data_loader, True)

    I1_batch =  data_loader.I1_batch
    I2_batch =  data_loader.I2_batch
    I1_aug_batch =  data_loader.I1_aug_batch
    I2_aug_batch =  data_loader.I2_aug_batch
    I_batch  =  data_loader.I_batch
    I_prime_batch = data_loader.I_prime_batch
    pts1_batch     = data_loader.pts1_batch
    gt_batch      = data_loader.gt_batch
    patch_indices_batch = data_loader.patch_indices_batch

    # Train on multiple GPU:
    h_losses = []
    # Create a session
    gpu_options = tf.GPUOptions(allow_growth=True) # Does not pre-allocate large, increase if needed
    config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) # soft_placement allows to work on CPUs if GPUs are not available

    sess = tf.Session(config=config)

    # Initialize
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    # Threads coordinator
    coordinator = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)

    num_samples = 0
    total_num_fail = 0
    h_total_loss_value = 0
    h_losses = []
    total_time = 0

    # Start test
    for step in range(num_total_steps):
      I2_value, I1_value, I1_aug_value, I2_aug_value, I_value, I_prime_value, pts_1_value, gt_value = sess.run([I2_batch, I1_batch, I1_aug_batch, I2_aug_batch, I_batch, I_prime_batch, pts1_batch, gt_batch])
      for i in range(args.batch_size):
        num_samples += 1
        I_sample = utils.denorm_img(I_value[i]).astype(np.uint8)
        I_prime_sample = utils.denorm_img(I_prime_value[i]).astype(np.uint8)
        pts_1_sample = pts_1_value[i].reshape([4,2])
        gt_sample = gt_value[i].reshape([4,2])
        pts_2_sample = pts_1_sample + gt_sample

        # Use RANSAC_homography to find the homography (delta 4 points)
        sample_start_time = timeit.default_timer()
        if args.method == 'direct':
          pred_h4p, _, not_found  = direct_h.find_homography(I_sample, I_prime_sample, pts_1_sample, pts_2_sample, visual=args.visual, method=args.method, num_iterations=args.num_iterations)
        else:
          pred_h4p, _, not_found  = ransac_h.find_homography(I_sample, I_prime_sample, pts_1_sample, pts_2_sample, visual=args.visual, method=args.method, min_match_count=args.num_features)
        sample_run_time = timeit.default_timer() - sample_start_time
        total_time += sample_run_time
        # Set maximum value for every value of delta h4p
        pred_h4p[np.where(pred_h4p >= RHO)] = RHO*2
        pred_h4p[np.where(pred_h4p <=- RHO)] = -RHO*2

        h_loss_value = np.sqrt(np.mean(np.square(pred_h4p[0] - gt_sample)))
        # Evaluate the result

        # There are two cases of failure
        if not_found:  # Cannot find homography
          total_num_fail += 1
          print('===> Fail case 1: Not found homography')

        else:
          # H_loss if homography is identity matrix
          h_loss_identity = np.sqrt(np.mean(np.square(gt_sample)))
          if h_loss_identity < h_loss_value:
            print('===> Fail case 2:  error > identity')
            total_num_fail += 1
            h_loss_value = h_loss_identity
        h_losses.append(h_loss_value)
        h_loss_value = np.sqrt(np.mean(np.square(pred_h4p - gt_sample)))
        _ = utils.progress_bar(step*args.batch_size+i, num_total_steps*args.batch_size, ' Test| image %d, h_loss %.3f, h_loss_average %.3f, fail %d/%d, time %.4f'%(i, h_loss_value, np.mean(h_losses), total_num_fail, num_samples, sample_run_time))

    print ('==========================================================')
    mean_h_loss, std_h_loss = np.mean(np.array(h_losses)), np.std(np.array(h_losses))
    print ('===> H_loss:', mean_h_loss, '+/-', std_h_loss)
    print ('Running time:', total_time/num_samples)
    fail_percent = total_num_fail*1.0/(num_samples)
    print ('Failure %.3f'%(fail_percent))
    output_line = [num_samples, mean_h_loss, std_h_loss, fail_percent, total_time/num_samples]
    print ('output_line:', output_line)
    with open(os.path.join(args.log_dir, 'results.txt'), 'w') as f:
      np.savetxt(f, [output_line], delimiter= ' ',  fmt='%.5f')
      print('===> Wrote results to file %s'%os.path.join(args.log_dir, 'results.txt'))

    tops_list = utils.find_percentile(h_losses)
    print('===> Percentile Values: (30, 60, 100):')
    print(tops_list)