コード例 #1
0
 def test_benchmark_text(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Text('Text'), Image('all_shapes'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         finder = TextFinder()
         # the text backend has too many benchmarking combinations so let's restrict here
         finder.algorithms["threshold_filters2"] = ("adaptive",)
         finder.algorithms["threshold_filters3"] = ("adaptive",)
         finder.algorithms["threshold_filters2"] = ("adaptive",)
         finder.algorithms["threshold_filters3"] = ("adaptive",)
         # also get rid of these since they are not implemented anyway
         finder.algorithms["text_detectors"] = list(finder.algorithms["text_detectors"])
         finder.algorithms["text_detectors"].remove("components")
         import cv2
         # TODO: deprecate OpenCV 3.X versions after time
         if cv2.__version__.startswith("3."):
             finder.algorithms["text_detectors"].remove("east")
         finder.algorithms["text_recognizers"] = list(finder.algorithms["text_recognizers"])
         finder.algorithms["text_recognizers"].remove("beamSearch")
         # one tesseract backend is enough for the unit test
         finder.algorithms["text_recognizers"].remove("tesseract")
         finder.algorithms["text_recognizers"].remove("pytesseract")
         results = calibrator.benchmark(finder, calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertIn("mixed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertGreaterEqual(result[1], 0.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertLessEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
コード例 #2
0
 def calibration_setUp(self, needle, haystack, calibrate_backends):
     # use a single finder type for these tests
     finder = FeatureFinder()
     for category in calibrate_backends:
         finder.can_calibrate(category, True)
     calibrator = Calibrator(Image(needle), Image(haystack))
     return calibrator.calibrate(finder)
コード例 #3
0
 def test_benchmark_cascade(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Pattern('shape_blue_circle.xml'), Image('all_shapes'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(CascadeFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertEqual(result[0], "", "Incorrect backend names for case '%s' %s %s" % result)
             # similarity is not available in the cascade backend
             self.assertEqual(result[1], 0.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
コード例 #4
0
 def test_benchmark_feature(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Image('n_ibs'), Image('n_ibs'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(FeatureFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertIn("mixed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertGreaterEqual(result[1], 0.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertLessEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
コード例 #5
0
 def test_benchmark_template(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Image('shape_blue_circle'), Image('all_shapes'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(TemplateFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             # only normed backends are supported
             self.assertIn("_normed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
コード例 #6
0
 def test_benchmark_contour(self):
     self.benchmark_setUp()
     # matching all shapes will require a modification of the minArea parameter
     calibrator = Calibrator(Image('shape_blue_circle'), Image('shape_blue_circle'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         results = calibrator.benchmark(ContourFinder(), calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertIn("mixed", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             self.assertEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
コード例 #7
0
 def test_benchmark_deep(self):
     self.benchmark_setUp()
     calibrator = Calibrator(Pattern('cat'), Image('coco_cat'))
     for calibration, random_starts in [(False, 0), (False, 1), (True, 0), (True, 1)]:
         finder = DeepFinder()
         # get rid of backends that are not implemented anyway
         finder.algorithms["deep_learners"] = list(finder.algorithms["deep_learners"])
         finder.algorithms["deep_learners"].remove("tensorflow")
         results = calibrator.benchmark(finder, calibration=calibration, random_starts=random_starts)
         # pprint.pprint(results)
         self.assertGreater(len(results), 0, "There should be at least one benchmarked method")
         for result in results:
             self.assertEqual("pytorch", result[0], "Incorrect backend names for case '%s' %s %s" % result)
             # TODO: the needle is found but with very low similarity - possibly due to different haystack size
             #self.assertEqual(result[1], 1.0, "Incorrect similarity for case '%s' %s %s" % result)
             self.assertGreater(result[2], 0.0, "Strictly positive time is required to run case '%s' %s %s" % result)
コード例 #8
0
# non-default initial conditions for the calibration of various finder types
#finder.configure(text_detector="contours")
#finder.configure_backend(backend="sqdiff_normed", category="template")
#finder.params["find"]["similarity"].value = 0.7
#finder.params["tempfeat"]["front_similarity"].value = 0.5
#finder.params["feature"]["ransacReprojThreshold"].value = 25.0
#finder.params["fdetect"]["MaxFeatures"].value = 10
#finder.params["text"]["datapath"].value = "../../misc"
#finder.params["ocr"]["oem"].value = 0
#finder.params["tdetect"]["verticalVariance"].value = 5
#finder.params["threshold"]["blockSize"].value = 3
# synchronize at this stage to take into account all configuration
finder.synchronize()

# Main steps: calibration, searching, and benchmarking
calibrator = Calibrator(NEEDLE, HAYSTACK)
# uncomment this to use a list of needles and haystacks instead
#calibrator = Calibrator(config="pairs.list")
calibrator.run = calibrator.run_performance
# uncomment these for alternative run functions
#calibrator.run = calibrator.run_default
#calibrator.run = calibrator.run_peak
similarity_before = calibrator.calibrate(finder, max_attempts=1)
# categories to calibrate
for category in ENABLED:
    finder.can_calibrate(category, True)
# example parameter to solo allow for calibration:
#finder.params["threshold2"]["blockSize"].fixed = False
similarity_after = calibrator.calibrate(finder,
                                        max_attempts=MAX_ATTEMPTS,
                                        max_exec_time=MAX_EXEC_TIME)
コード例 #9
0
 def list_setUp(self):
     with open("pairs.list", "w") as f:
         f.write("n_ibs" + " " + "h_ibs_viewport" + " max" + "\n")
         f.write("n_ibs" + " " + "h_ibs_rotated" + " max" + "\n")
         f.write("n_ibs" + " " + "h_ibs_scaled" + " max" + "\n")
     return Calibrator(config="pairs.list")