コード例 #1
0
ファイル: test_benchmark.py プロジェクト: Borda/BIRL
 def test_fail_visual(self):
     fig = ImRegBenchmark._visual_image_move_warp_lnds_move_warp({ImRegBenchmark.COL_POINTS_MOVE_WARP: 'abc'})
     self.assertIsNone(fig)
     fig = ImRegBenchmark._visual_image_move_warp_lnds_ref_warp({ImRegBenchmark.COL_POINTS_REF_WARP: 'abc'})
     self.assertIsNone(fig)
     fig = ImRegBenchmark.visualise_registration((0, {}))
     self.assertIsNone(fig)
コード例 #2
0
ファイル: test_benchmark.py プロジェクト: Borda/BIRL
 def test_benchmark_simple(self):
     """ test run in sequence (1 thread) """
     self._remove_default_experiment(ImRegBenchmark.__name__)
     params = {
         'path_table': PATH_CSV_COVER_ANHIR,
         'path_dataset': PATH_DATA,
         'path_out': self.path_out,
         'preprocessing': ['matching-hsv', 'gray'],
         'nb_workers': 1,
         'visual': True,
         'unique': False,
     }
     benchmark = ImRegBenchmark(params)
     benchmark.run()
     self.check_benchmark_results(benchmark, final_means=[0., 0.], final_stds=[0., 0.])
コード例 #3
0
 def test_benchmark_failing(self):
     """ test run in parallel with failing experiment """
     params = {
         'path_table': PATH_CSV_COVER_MIX,
         'path_dataset': PATH_DATA,
         'path_out': self.path_out,
         'preprocessing': 'nothing',
         'nb_workers': 4,
         'visual': True,
         'unique': True,
     }
     benchmark = ImRegBenchmark(params)
     benchmark.run()
     # no landmarks was copy and also no experiment results was produced
     list_csv = [len([csv for csv in files if os.path.splitext(csv)[1] == '.csv'])
                 for _, _, files in os.walk(benchmark.params['path_exp'])]
     self.assertEqual(sum(list_csv), 0)
     del benchmark
コード例 #4
0
ファイル: test_benchmark.py プロジェクト: Borda/BIRL
 def test_benchmark_parallel(self):
     """ test run in parallel (2 threads) """
     self._remove_default_experiment(ImRegBenchmark.__name__)
     params = {
         'path_table': PATH_CSV_COVER_MIX,
         'path_out': self.path_out,
         'preprocessing': ['gray', 'matching-rgb'],
         'nb_workers': 2,
         'visual': True,
         'unique': False,
     }
     benchmark = ImRegBenchmark(params)
     # run it for the first time, complete experiment
     benchmark.run()
     # rerun experiment simulated repeating unfinished benchmarks
     benchmark.run()
     self.check_benchmark_results(benchmark, final_means=[0., 0., 0., 0., 0.], final_stds=[0., 0., 0., 0., 0.])