def run_export_test(self, config_path, snapshot, metrics=('bbox', ), thr=0.025): print('\n\ntesting export ' + config_path, file=sys.stderr) name = config_path.replace('configs/', '')[:-3] test_dir = f'/tmp/{name}' export_dir = os.path.join(test_dir, "export") log_file = os.path.join(export_dir, 'test_export.log') os.makedirs(export_dir, exist_ok=True) target_config_path = os.path.join(export_dir, os.path.basename(config_path)) os.system(f'cp {config_path} {target_config_path}') assert replace_text_in_file(target_config_path, "data_root = 'data/coco/'", f"data_root = '{self.coco_dir}/'") replace_text_in_file(target_config_path, "keep_ratio=True", "keep_ratio=False") metrics = ' '.join(metrics) os.system( f'/opt/intel/openvino/bin/setupvars.sh;' f'python tools/export.py ' f'{target_config_path} ' f'{snapshot} ' f'{export_dir} ' f'openvino ;' f'python tools/test_exported.py ' f'{target_config_path} ' f'{os.path.join(export_dir, os.path.basename(name) + ".xml")} ' f'--out res.pkl --eval {metrics} 2>&1 | tee {log_file}') ap = collect_ap(log_file) with open(f'tests/expected_outputs/public/{name}.json') as read_file: content = json.load(read_file) reference_ap = content['map'] for expected, actual in zip(reference_ap, ap): self.assertLessEqual(expected - thr, actual)
def run_test(self, config_path, snapshot, metrics=('bbox', )): print('\n\ntesting ' + config_path, file=sys.stderr) name = config_path.replace('configs', '')[:-3] test_dir = f'/tmp/{name}' log_file = os.path.join(test_dir, 'log.txt') os.makedirs(test_dir, exist_ok=True) target_config_path = os.path.join(test_dir, 'config.py') os.system(f'cp {config_path} {target_config_path}') assert replace_text_in_file(target_config_path, "data_root = 'data/coco/'", f"data_root = '{self.coco_dir}/'") replace_text_in_file(target_config_path, "keep_ratio=True", "keep_ratio=False") metrics = ' '.join(metrics) os.system(f'python tools/test.py ' f'{target_config_path} ' f'{snapshot} ' f'--out {test_dir}/res.pkl --eval {metrics} 2>&1 | tee {log_file}') ap = collect_ap(log_file) with open(f'tests/expected_outputs/public/{name}.json') as read_file: content = json.load(read_file) reference_ap = content['map'] self.assertListEqual(reference_ap, ap)
def test_fine_tuning(self): log_file = os.path.join(self.work_dir, 'test_fine_tuning.log') os.system( f'../../external/mmdetection/tools/dist_train.sh {self.configuration_file} 1 --validate 2>&1 |' f' tee {log_file}') ap = collect_ap(log_file) self.assertEqual(len((ap)), 5) self.assertLess(ap[0], ap[-1])
def postrun(self, log_file, expected_output_file, metrics, thr): print('expected ouputs', expected_output_file) ap = collect_ap(log_file) with open(expected_output_file) as read_file: content = json.load(read_file) reference_ap = content['map'] print(f'expected {reference_ap} vs actual {ap}') for expected, actual, m in zip(reference_ap, ap, metrics): if expected - thr > actual: raise AssertionError(f'{m}: {expected} (expected) - {thr} (threshold) > {actual}')
def test_quality_metrics(self): log_file = os.path.join(self.work_dir, 'test_quality_metrics.log') os.system( f'python ../../external/mmdetection/tools/test.py ' f'{self.configuration_file} ' f'{os.path.join(self.work_dir, self.snapshot_name)} ' f'--out res.pkl --eval bbox 2>&1 | tee {log_file}') ap = collect_ap(log_file) with open(f'tests/expected_outputs/{self.model_name}.json') as read_file: content = json.load(read_file) self.assertEqual(content['map'], ap[0])