def test():
    # Local Variables
    num_test_images = None

    # -----------------------------------------
    #  Network Testing Model - Importing Graph
    # -----------------------------------------
    # Loads the dataset and restores a specified trained model.
    data = Dataloader()

    # Searches dataset images filenames
    if SELECTED_EVALUATION_SUBSET == 'test':
        _, _, _, _, num_test_images = data.get_test_data(
            test_split=args.test_split, test_file_path=args.test_file_path)
    elif SELECTED_EVALUATION_SUBSET == 'train':
        data.test_image_filenames, data.test_depth_filenames, _, _, num_test_images = data.get_train_data(
        )

    model = Test(data)

    with tf.Session() as sess:
        print('\n[Test] Loading the model...')
        load_model(saver=tf.train.Saver(), sess=sess)

        # ==============
        #  Testing Loop
        # ==============
        if args.debug:
            num_test_images = 5  # Only for testing!

        # TODO: Criar uma classe de test assim como fiz para train e valid, e declarar este objeto dentro dela
        if args.show_test_results:
            test_plot_obj = Plot(args.mode, title='Test Predictions')

        print("[Test] Generating Predictions...")
        pred_list, gt_list = [], []

        timer = -time.time()
        for i in tqdm(range(num_test_images)):
            timer2 = -time.time()

            # Evalute the network for the given image
            # data.test_depth_filenames = [] # Only for testing the following condition!!! # FIXME: Atualmente, o código não dá suporte para esta situação
            if data.test_depth_filenames:  # It's not empty
                feed_test = {
                    model.tf_image_key: data.test_image_filenames[i],
                    model.tf_depth_key: data.test_depth_filenames[i]
                }

                _, depth, depth_resized = sess.run(model.depth_op, feed_test)

            else:
                feed_test = {model.tf_image_key: data.test_image_filenames[i]}

            _, image, image_resized, pred, pred_up = sess.run(
                model.image_op + model.pred_op, feed_test)

            # Clips Predictions at 50, 80 meters
            try:
                pred_50, pred_80 = sess.run(
                    [model.tf_pred_50, model.tf_pred_80], feed_test)
            except AttributeError:
                pred_50 = np.zeros((model.batch_size, ) +
                                   model.output_size.get_size())
                pred_80 = np.zeros((model.batch_size, ) +
                                   model.output_size.get_size())

            # Fill arrays for later on metrics evaluation
            if args.eval_tool == 'monodepth':
                pred_list.append(pred_up[0, :, :, 0])
                gt_list.append(depth[:, :, 0])

            # Saves the predictions and ground truth depth images as uint16 PNG Images
            if SAVE_TEST_DISPARITIES or args.eval_tool == 'monodepth' or args.eval_tool == 'kitti_depth':
                # Convert the Predictions Images from float32 to uint16

                # print(data.test_image_filenames[i])

                imsave_as_uint16_png(
                    settings.output_tmp_pred_dir + 'pred' + str(i) + '.png',
                    pred_up[0])
                imsave_as_uint16_png(
                    settings.output_tmp_gt_dir + 'gt' + str(i) + '.png', depth)

            timer2 += time.time()

            # Show Results
            if args.show_test_results:
                # TODO: Fazer um lista the 'images_to_display' e dar append das imagens na lista
                test_plot_obj.show_test_results(
                    image=image,
                    depth=depth[:, :, 0],
                    image_resized=image_resized,
                    depth_resized=depth_resized[:, :, 0],
                    pred=pred[0, :, :, 0],
                    pred_up=pred_up[0, :, :, 0],
                    pred_50=pred_50[0, :, :, 0],
                    pred_80=pred_80[0, :, :, 0],
                    i=i + 1)

            # input("Continue...")

        # Testing Finished.
        timer += time.time()
        print("Time elapsed: {} s\n".format(timer))

        # =========
        #  Results
        # =========
        # Calculates Metrics
        if args.eval_tool:
            if data.test_depth_filenames:
                print(
                    "[Test/Metrics] Calculating Metrics based on Test Predictions..."
                )

                print('args.test_split:', args.test_split)
                print('args.test_file_path:', args.test_file_path)
                print('dataset_path:', data.dataset.dataset_path)
                print()

                # Invokes Evaluation Tools
                if args.eval_tool == 'monodepth':
                    pred_depths, gt_depths = metrics.generate_depth_maps(
                        pred_list, gt_list, data.dataset.dataset_path)
                    metrics.evaluation_tool_monodepth(pred_depths, gt_depths)
                elif args.eval_tool == 'kitti_depth':
                    metrics.evaluation_tool_kitti_depth(num_test_images)
            else:
                print(
                    "[Test/Metrics] It's not possible to calculate metrics. There are no corresponding labels for generated predictions!"
                )
        else:
            print("[Test/Metrics] Metrics calculation wasn't requested!")