Example #1
0
 def test_benchmark_text(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Text('Text'), Image('all_shapes'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         finder = TextFinder()
         # the text backend has too many benchmarking combinations so let's restrict here
         finder.algorithms["threshold_filters2"] = ("adaptive",)
         finder.algorithms["threshold_filters3"] = ("adaptive",)
         finder.algorithms["threshold_filters2"] = ("adaptive",)
         finder.algorithms["threshold_filters3"] = ("adaptive",)
         # also get rid of these since they are not implemented anyway
         finder.algorithms["text_detectors"] = list(finder.algorithms["text_detectors"])
         finder.algorithms["text_detectors"].remove("components")
         import cv2
         # TODO: deprecate OpenCV 3.X versions after time
         if cv2.__version__.startswith("3."):
             finder.algorithms["text_detectors"].remove("east")
         finder.algorithms["text_recognizers"] = list(finder.algorithms["text_recognizers"])
         finder.algorithms["text_recognizers"].remove("beamSearch")
         # one tesseract backend is enough for the unit test
         finder.algorithms["text_recognizers"].remove("tesseract")
         finder.algorithms["text_recognizers"].remove("pytesseract")
         results = calibrator.benchmark(finder, calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertIn("mixed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertGreaterEqual(result[1], 0.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertLessEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
Example #2
0
 def test_benchmark_cascade(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Pattern('shape_blue_circle.xml'), Image('all_shapes'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(CascadeFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertEqual(result[0], "", "Incorrect backend names for case '%s' %s %s" % result)
             # similarity is not available in the cascade backend
             self.assertEqual(result[1], 0.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
Example #3
0
 def test_benchmark_feature(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Image('n_ibs'), Image('n_ibs'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(FeatureFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertIn("mixed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertGreaterEqual(result[1], 0.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertLessEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
Example #4
0
 def test_benchmark_template(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Image('shape_blue_circle'), Image('all_shapes'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(TemplateFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             # only normed backends are supported
             self.assertIn("_normed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
Example #5
0
 def test_benchmark_contour(self):
     self.benchmark_setUp()
     # matching all shapes will require a modification of the minArea parameter
     calibrator = Calibrator(Image('shape_blue_circle'), Image('shape_blue_circle'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(ContourFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertIn("mixed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
Example #6
0
 def test_benchmark_deep(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Pattern('cat'), Image('coco_cat'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         finder = DeepFinder()
         # get rid of backends that are not implemented anyway
         finder.algorithms["deep_learners"] = list(finder.algorithms["deep_learners"])
         finder.algorithms["deep_learners"].remove("tensorflow")
         results = calibrator.benchmark(finder, calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertEqual("pytorch", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             # TODO: the needle is found but with very low similarity - possibly due to different haystack size
             #self.assertEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
Example #7
0
#finder.params["threshold2"]["blockSize"].fixed = False
similarity_after = calibrator.calibrate(finder,
                                        max_attempts=MAX_ATTEMPTS,
                                        max_exec_time=MAX_EXEC_TIME)
logging.info("Similarity before and after calibration: %s -> %s",
             similarity_before, similarity_after)
similarity_global = calibrator.search(finder,
                                      random_starts=RANDOM_STARTS,
                                      uniform=UNIFORM_DRAW,
                                      calibration=CALIBRATED_SEARCH,
                                      max_attempts=MAX_ATTEMPTS,
                                      max_exec_time=MAX_EXEC_TIME)
logging.info("Similarity after search (Monte Carlo calibration): %s -> %s",
             similarity_before, similarity_global)
logging.info("Best found parameters:\n%s", pprint.pformat(finder.params))
results = calibrator.benchmark(finder,
                               random_starts=BENCHMARK_RANDOM_STARTS,
                               uniform=UNIFORM_DRAW,
                               calibration=CALIBRATED_BENCHMARK,
                               max_attempts=MAX_ATTEMPTS,
                               max_exec_time=MAX_EXEC_TIME)
logging.info("Benchmarking results (method, similarity, location, time):\n%s",
             pprint.pformat(results))

# Final cleanup steps
if REMOVE_LOGPATH:
    shutil.rmtree(LOGPATH)
GlobalConfig.image_logging_level = logging.ERROR
GlobalConfig.image_logging_destination = "./imglog"
GlobalConfig.image_logging_step_width = 3