def export_test(self, alt_ssd_export, thr): if alt_ssd_export: export_dir = os.path.join(self.template_folder, "export", "alt_ssd_export") else: export_dir = os.path.join(self.template_folder, "export") run_through_shell( f'cd {os.path.dirname(self.template_file)};' f'. {self.venv_activate_path};' f'python eval.py' f' --test-ann-files {ann_file}' f' --test-data-roots {img_root}' f' --load-weights {os.path.join(export_dir, "model.bin")}' f' --save-metrics-to {os.path.join(export_dir, "metrics.yaml")}' ) with open(os.path.join(export_dir, "metrics.yaml")) as read_file: content = yaml.load(read_file, yaml.SafeLoader) ap = [ metric for metric in content['metrics'] if metric['key'] == 'ap' ][0]['value'] with open( f'{os.path.dirname(__file__)}/../expected_outputs/{problem_name}/{model_name}.json' ) as read_file: content = json.load(read_file) self.assertGreater(ap, content['map'] - thr)
def test_fine_tuning(self): log_file = os.path.join(self.work_dir, 'test_fine_tuning.log') run_through_shell( f'../../external/mmdetection/tools/dist_train.sh {self.configuration_file} 1 2>&1 |' f' tee {log_file}') ap = collect_ap(log_file) self.assertEqual(len((ap)), 5)
def setUp(self): self.model_name = model_name self.data_folder = '../../data' self.work_dir = os.path.join('/tmp/', self.model_name) os.makedirs(self.work_dir, exist_ok=True) self.configuration_file = f'./person-vehicle-bike-detection/{self.model_name}/config.py' run_through_shell(f'cp {self.configuration_file} {self.work_dir}/') self.configuration_file = os.path.join( self.work_dir, os.path.basename(self.configuration_file)) self.ote_url = 'https://download.01.org/opencv/openvino_training_extensions' self.url = f'{self.ote_url}/models/object_detection/v2/{snapshot_name}' download_if_not_yet(self.work_dir, self.url) assert replace_text_in_file(self.configuration_file, 'samples_per_gpu=', 'samples_per_gpu=1 ,#') assert replace_text_in_file(self.configuration_file, 'total_epochs = 20', 'total_epochs = 25') assert replace_text_in_file( self.configuration_file, 'work_dir =', f'work_dir = "{os.path.join(self.work_dir, "outputs")}" #') assert replace_text_in_file(self.configuration_file, 'annotation_example_val.json', 'annotation_example_train.json') assert replace_text_in_file(self.configuration_file, "data_root + 'val'", "data_root + 'train'") assert replace_text_in_file( self.configuration_file, 'resume_from = None', f'resume_from = "{os.path.join(self.work_dir, snapshot_name)}"' )
def copy_template_folder(src_template_folder, template_folder): logging.info(f'Copying {src_template_folder} to {template_folder}') if os.path.isdir(template_folder): logging.warning('') logging.warning( f'ATTENTION: the folder that should be created for this test case exists!' ) logging.warning( f' It may cause side effects between tests!') logging.warning(f'The folder is `{template_folder}`.\n') run_through_shell( f'cp -a "{src_template_folder}" "{template_folder}"') assert os.path.isdir( template_folder), f'Cannot create {template_folder}'
def setUpClass(cls): cls.template_file = f'model_templates/{problem_name}/{model_name}/template.yaml' cls.work_dir = tempfile.mkdtemp() cls.dependencies = get_dependencies(cls.template_file) download_if_not_yet(cls.work_dir, cls.dependencies['snapshot']) cls.test_export_thr = 0.031 run_through_shell( f'cd {os.path.dirname(cls.template_file)};' f'/opt/intel/openvino/bin/setupvars.sh;' f'python {cls.dependencies["export"]}' f' --load-weights {os.path.join(cls.work_dir, os.path.basename(cls.dependencies["snapshot"]))}' f' --save-model-to {os.path.join(cls.work_dir, "export")}')
def test_quality_metrics(self): log_file = os.path.join(self.work_dir, 'test_quality_metrics.log') run_through_shell( f'python ../../external/mmdetection/tools/test.py ' f'{self.configuration_file} ' f'{os.path.join(self.work_dir, self.model_name + ".pth")} ' f'--out res.pkl --eval bbox 2>&1 | tee {log_file}') ap = collect_ap(log_file) with open( f'tests/expected_outputs/horizontal-text-detection/{self.model_name}.json' ) as read_file: content = json.load(read_file) self.assertEqual(content['map'], ap[0])
def test_nncf_compress_on_gpu(self): log_file = os.path.join(self.template_folder, f'log__{self.id()}.txt') run_through_shell( f'cd {self.template_folder};' f'python compress.py' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --load-weights snapshot.pth' f' --save-checkpoints-to {self.template_folder}/output_{self.id()}' f' --gpu-num 1' f' --batch-size 1' f' | tee {log_file}') ap = collect_ap(log_file) self.assertGreater(ap[-1], 0)
def setUpClass(cls): cls.templates_folder = os.environ['MODEL_TEMPLATES'] cls.template_folder = os.path.join(cls.templates_folder, 'object_detection', problem_name, model_name) cls.template_file = os.path.join(cls.template_folder, 'template.yaml') cls.ann_file = ann_file cls.img_root = img_root cls.dependencies = get_dependencies(cls.template_file) cls.epochs_delta = 2 cls.total_epochs = get_epochs(cls.template_file) + cls.epochs_delta download_snapshot_if_not_yet(cls.template_file, cls.template_folder) run_through_shell(f'cd {cls.template_folder};' f'pip install -r requirements.txt;')
def setUpClass(cls): cls.template_updates_description = cls.generate_template_updates_description( template_update_dict) logging.info( f'Begin setting up class for {problem_name}/{model_name}, {cls.template_updates_description}' ) cls.templates_folder = os.environ['MODEL_TEMPLATES'] cls.src_template_folder = os.path.join(cls.templates_folder, 'object_detection', problem_name, model_name) skip_non_instantiated_template_if_its_allowed( cls.src_template_folder, problem_name, model_name) cls.template_folder = cls.generate_template_folder_name( cls.src_template_folder, cls.template_updates_description) cls.copy_template_folder(cls.src_template_folder, cls.template_folder) cls.template_file = os.path.join(cls.template_folder, 'template.yaml') cls.apply_update_dict_params_to_template_file( cls.template_file, template_update_dict, compression_cfg_update_dict) cls.ann_file = ann_file cls.img_root = img_root cls.dependencies = get_dependencies(cls.template_file) # Note that such big threshold is required, since # we have very small dataset for training and evaluation: # if network compression causes other detections # on 2-4 images, the accuracy drop will be significant. cls.test_export_thr = 0.05 download_snapshot_if_not_yet(cls.template_file, cls.template_folder) run_through_shell(f'cd {cls.template_folder};' f'pip install -r requirements.txt;') logging.info( f'End setting up class for {problem_name}/{model_name}, {cls.template_updates_description}' )
def test_finetuning_on_gpu(self): log_file = os.path.join(self.template_folder, 'test_finetuning.log') run_through_shell(f'cd {self.template_folder};' f'python train.py' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --resume-from snapshot.pth' f' --save-checkpoints-to {self.template_folder}' f' --gpu-num 1' f' --batch-size 1' f' --epochs {self.total_epochs}' f' | tee {log_file}') ap = collect_ap(log_file) self.assertEqual(len((ap)), self.epochs_delta) self.assertGreater(ap[-1], 0)
def test_finetuning(self): log_file = os.path.join(self.work_dir, 'test_finetuning.log') run_through_shell( f'cd {os.path.dirname(self.template_file)};' f'python {self.dependencies["train"]}' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --resume-from {os.path.join(self.work_dir, os.path.basename(self.dependencies["snapshot"]))}' f' --save-checkpoints-to {self.work_dir}' f' --gpu-num 1' f' --batch-size 1' f' --epochs {self.total_epochs}' f' | tee {log_file}') ap = collect_ap(log_file) self.assertEqual(len((ap)), self.epochs_delta) self.assertGreater(ap[-1], 0)
def test_nncf_compress_and_eval_on_gpu(self): log_file = os.path.join(self.template_folder, f'log__{self.id()}.txt') checkpoints_dir = f'{self.template_folder}/output_{self.id()}' run_through_shell(f'cd {self.template_folder};' f'python compress.py' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --load-weights snapshot.pth' f' --save-checkpoints-to {checkpoints_dir}' f' --gpu-num 1' f' --batch-size 1' f' | tee {log_file}') compress_ap = collect_ap(log_file) last_compress_ap = compress_ap[-1] logging.info(f'From training last_compress_ap={last_compress_ap}') latest_file = f'{checkpoints_dir}/latest.pth' self.assertTrue( os.path.isfile(latest_file), f'Cannot find the latest.pth in path `{latest_file}`') metrics_path = f'{checkpoints_dir}/metrics.yaml' run_through_shell(f'cd {self.template_folder};' f'python eval.py' f' --test-ann-files {self.ann_file}' f' --test-data-roots {self.img_root}' f' --save-metrics-to {metrics_path}' f' --load-weights {latest_file}') with open(metrics_path) as read_file: content = yaml.safe_load(read_file) ap = [ metric['value'] for metric in content['metrics'] if metric['key'] == 'ap' ][0] ap = ap / 100 logging.info(f'Evaluation result ap={ap}') self.assertLess(abs(last_compress_ap - ap), 1e-6)
def setUpClass(cls): cls.templates_folder = os.environ['MODEL_TEMPLATES'] cls.template_folder = os.path.join(cls.templates_folder, 'object_detection', problem_name, model_name) cls.template_file = os.path.join(cls.template_folder, 'template.yaml') cls.ann_file = ann_file cls.img_root = img_root cls.dependencies = get_dependencies(cls.template_file) cls.epochs_delta = 2 cls.total_epochs = get_epochs(cls.template_file) + cls.epochs_delta cls.venv_activate_path = os.path.join(cls.templates_folder, 'object_detection', 'venv', 'bin', 'activate') run_through_shell(f'cd {cls.template_folder};' f'. {cls.venv_activate_path};' f'pip install -r requirements.txt;')
def test_nncf_finetune_and_compress_on_gpu(self): log_file = os.path.join(self.template_folder, f'log__{self.id()}.txt') total_epochs = get_epochs(self.template_file) total_epochs_with_finetuning = total_epochs + 2 run_through_shell( f'cd {self.template_folder};' f'python train.py' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --resume-from snapshot.pth' f' --save-checkpoints-to {self.template_folder}/output_{self.id()}' f' --gpu-num 1' f' --batch-size 1' f' --epochs {total_epochs_with_finetuning}' f' | tee {log_file}') ap = collect_ap(log_file) self.assertGreater(ap[-1], 0)
def setUpClass(cls): cls.templates_folder = os.environ['MODEL_TEMPLATES'] cls.template_folder = os.path.join(cls.templates_folder, 'instance_segmentation_2', problem_name, model_name) cls.template_file = os.path.join(cls.template_folder, 'template.yaml') cls.ann_file = ann_file cls.img_root = img_root cls.dependencies = get_dependencies(cls.template_file) cls.epochs_delta = 2 cls.total_epochs = get_epochs(cls.template_file) + cls.epochs_delta download_snapshot_if_not_yet(cls.template_file, cls.template_folder) coco_dir = os.path.abspath( f'{os.path.dirname(__file__)}/../../../../data/coco') val_dir = os.path.join(coco_dir, 'val2017') zip_file = os.path.join(coco_dir, 'val2017.zip') link = 'http://images.cocodataset.org/zips/val2017.zip' if not os.path.exists(val_dir): if not os.path.exists(zip_file): run_through_shell( f'wget --no-verbose {link} -P {coco_dir}') run_through_shell(f'unzip {zip_file} -d {coco_dir}') run_through_shell(f'cd {cls.template_folder};' f'pip install -r requirements.txt;')
def setUpClass(cls): cls.templates_folder = os.environ['MODEL_TEMPLATES'] cls.template_folder = os.path.join(cls.templates_folder, 'object_detection', problem_name, model_name) cls.template_file = os.path.join(cls.template_folder, 'template.yaml') cls.ann_file = ann_file cls.img_root = img_root cls.dependencies = get_dependencies(cls.template_file) cls.test_export_thr = 0.031 cls.venv_activate_path = os.path.join(cls.templates_folder, 'object_detection', 'venv', 'bin', 'activate') run_through_shell(f'cd {os.path.dirname(cls.template_file)};' f'. {cls.venv_activate_path};' f'pip install -r requirements.txt;' f'python export.py' f' --load-weights snapshot.pth' f' --save-model-to export')
def test_evaluation_on_gpu(self): run_through_shell(f'cd {self.template_folder};' f'python eval.py' f' --test-ann-files {self.ann_file}' f' --test-data-roots {self.img_root}' f' --save-metrics-to metrics.yaml' f' --load-weights snapshot.pth') with open(os.path.join(self.template_folder, "metrics.yaml")) as read_file: content = yaml.load(read_file, yaml.SafeLoader) ap = [ metrics['value'] for metrics in content['metrics'] if metrics['key'] == 'ap' ][0] with open( f'{os.path.dirname(__file__)}/../expected_outputs/{problem_name}/{model_name}.json' ) as read_file: content = json.load(read_file) self.assertLess(abs(content['map'] - ap / 100), 1e-6)
def test_nncf_compress_and_export(self): log_file = os.path.join(self.template_folder, f'log__{self.id()}.txt') checkpoints_dir = f'{self.template_folder}/output_{self.id()}' run_through_shell(f'cd {self.template_folder};' f'python compress.py' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --load-weights snapshot.pth' f' --save-checkpoints-to {checkpoints_dir}' f' --gpu-num 1' f' --batch-size 1' f' | tee {log_file}') compress_ap = collect_ap(log_file) last_compress_ap = compress_ap[-1] latest_file = f'{checkpoints_dir}/latest.pth' self.assertTrue( os.path.isfile(latest_file), f'Cannot find the latest.pth in path `{latest_file}`') run_through_shell(f'cd {os.path.dirname(self.template_file)};' f'python export.py' f' --load-weights {latest_file}' f' --save-model-to {checkpoints_dir}') model_bin_paths = list( glob.glob(os.path.join(checkpoints_dir, '*.bin'))) assert len(model_bin_paths) == 1, ( f'Wrong result of export.py: globbing "*.bin" in' f' {checkpoints_dir} gives {model_bin_paths}') run_through_shell( f'cd {os.path.dirname(self.template_file)};' f'python eval.py' f' --test-ann-files {ann_file}' f' --test-data-roots {img_root}' f' --load-weights {model_bin_paths[0]}' f' --save-metrics-to {os.path.join(checkpoints_dir, "metrics.yaml")}' ) with open(os.path.join(checkpoints_dir, "metrics.yaml")) as read_file: content = yaml.safe_load(read_file) ap = [ metric for metric in content['metrics'] if metric['key'] == 'ap' ][0]['value'] ap = ap / 100 logging.info(f'From training last_compress_ap={last_compress_ap}') logging.info(f'From evaluation of OpenVINO(TM) model ap={ap}') self.assertGreater(ap, last_compress_ap - self.test_export_thr)
def do_export(self, folder): run_through_shell(f'cd {os.path.dirname(self.template_file)};' f'pip install -r requirements.txt;' f'python export.py' f' --load-weights snapshot.pth' f' --save-model-to {folder}')