def setUpClass(cls): cls.templates_folder = os.environ['MODEL_TEMPLATES'] cls.template_folder = os.path.join(cls.templates_folder, domain_name, problem_name, model_name) skip_non_instantiated_template_if_its_allowed( cls.template_folder, problem_name, model_name) cls.template_file = os.path.join(cls.template_folder, 'template.yaml') cls.ann_file = ann_file cls.img_root = img_root cls.dependencies = get_dependencies(cls.template_file) cls.epochs_delta = 1 cls.total_epochs = get_epochs(cls.template_file) + cls.epochs_delta expected_outputs_json = f'{expected_outputs_dir}/{problem_name}/{model_name}.json' with open(expected_outputs_json) as read_file: cls.expected_outputs = json.load(read_file) download_snapshot_if_not_yet(cls.template_file, cls.template_folder) run_through_shell(f'cd {cls.template_folder};' f'pip install -r requirements.txt;')
def download_and_extract_coco_val2017(coco_dir): val_dir = os.path.join(coco_dir, 'val2017') zip_file = os.path.join(coco_dir, 'val2017.zip') link = 'http://images.cocodataset.org/zips/val2017.zip' if not os.path.exists(val_dir): if not os.path.exists(zip_file): run_through_shell(f'wget --no-verbose {link} -P {coco_dir}') run_through_shell(f'unzip {zip_file} -d {coco_dir}')
def copy_template_folder(src_template_folder, template_folder): logging.info(f'Copying {src_template_folder} to {template_folder}') if os.path.isdir(template_folder): logging.warning('') logging.warning(f'ATTENTION: the folder that should be created for this test case exists!') logging.warning(f' It may cause side effects between tests!') logging.warning(f'The folder is `{template_folder}`.\n') run_through_shell(f'cp -a "{src_template_folder}" "{template_folder}"') assert os.path.isdir(template_folder), f'Cannot create {template_folder}'
def do_eval(self, file_to_eval): metrics_path = os.path.join(self.output_folder, 'metrics.yaml') run_through_shell(f'cd {self.template_folder};' f'python eval.py' f' --test-ann-files {self.ann_file}' f' --test-data-roots {self.img_root}' f' --save-metrics-to {metrics_path}' f' --load-weights {file_to_eval}') return metrics_path
def do_export(self, export_dir, on_gpu): if not os.path.exists(export_dir): initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else '' run_through_shell(f'{initial_command}' f'cd {os.path.dirname(self.template_file)};' f'pip install -r requirements.txt;' f'python export.py' f' --load-weights snapshot.pth' f' --save-model-to {export_dir}')
def do_compress(self): log_file = os.path.join(self.output_folder, f'log__{self.id()}.txt') run_through_shell( f'cd {self.template_folder};' f'python compress.py' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --load-weights snapshot.pth' f' --save-checkpoints-to {self.output_folder}' f' --gpu-num 1' f' --batch-size 1' + ' ' + self.compress_cmd_line_params + f' | tee {log_file}') return log_file
def do_evaluation(self, export_dir): metrics_path = os.path.join(export_dir, "metrics.yaml") run_through_shell( f'cd {os.path.dirname(self.template_file)};' f'python eval.py' f' --test-ann-files {ann_file}' f' --test-data-roots {img_root}' f' --load-weights {os.path.join(export_dir, "model.bin")}' f' --save-metrics-to {metrics_path}' ) with open(metrics_path) as read_file: content = yaml.safe_load(read_file) for metric_key in metric_keys: value = [metrics['value'] for metrics in content['metrics'] if metrics['key'] == metric_key][0] self.assertGreaterEqual(value, self.expected_outputs[metric_key] - self.test_export_thr)
def setUpClass(cls): cls.compress_cmd_line_params = compress_cmd_line_params cls.test_case_description = cls.generate_test_case_description( template_update_dict, compress_cmd_line_params, compression_cfg_update_dict) logging.info( f'Begin setting up class for {problem_name}/{model_name}, {cls.test_case_description}' ) cls.templates_folder = os.environ['MODEL_TEMPLATES'] cls.src_template_folder = os.path.join(cls.templates_folder, domain_name, problem_name, model_name) skip_non_instantiated_template_if_its_allowed( cls.src_template_folder, problem_name, model_name) src_template_file = os.path.join(cls.src_template_folder, 'template.yaml') download_snapshot_if_not_yet(src_template_file, cls.src_template_folder) cls.template_folder = cls.generate_template_folder_name( cls.src_template_folder, cls.test_case_description) cls.copy_template_folder(cls.src_template_folder, cls.template_folder) cls.template_file = os.path.join(cls.template_folder, 'template.yaml') cls.apply_update_dict_params_to_template_file( cls.template_file, template_update_dict, compression_cfg_update_dict) cls.ann_file = ann_file cls.img_root = img_root cls.dependencies = get_dependencies(cls.template_file) download_snapshot_if_not_yet(cls.template_file, cls.template_folder) run_through_shell(f'cd {cls.template_folder};' f'pip install -r requirements.txt;') logging.info( f'End setting up class for {problem_name}/{model_name}, {cls.test_case_description}' )
def do_finetuning(self, on_gpu): log_file = os.path.join(self.output_folder, 'test_finetuning.log') initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else '' run_through_shell( f'{initial_command}' f'cd {self.template_folder};' f'python train.py' f' --train-ann-files {self.ann_file}' f' --train-data-roots {self.img_root}' f' --val-ann-files {self.ann_file}' f' --val-data-roots {self.img_root}' f' --resume-from snapshot.pth' f' --save-checkpoints-to {self.output_folder}' f' --gpu-num 1' f' --batch-size 1' f' --epochs {self.total_epochs}' f' | tee {log_file}') self.assertTrue(os.path.exists(os.path.join(self.output_folder, 'latest.pth')))
def do_evaluation(self, on_gpu): initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else '' metrics_path = os.path.join(self.output_folder, "metrics.yaml") run_through_shell( f'{initial_command}' f'cd {self.template_folder};' f'python eval.py' f' --test-ann-files {self.ann_file}' f' --test-data-roots {self.img_root}' f' --save-metrics-to {metrics_path}' f' --load-weights snapshot.pth' ) with open(metrics_path) as read_file: content = yaml.safe_load(read_file) for metric_key in metric_keys: value = [metrics['value'] for metrics in content['metrics'] if metrics['key'] == metric_key][0] self.assertLess(abs(self.expected_outputs[metric_key] - value), 1e-4)
def test_nncf_compress_and_export(self): log_file = self.do_compress() latest_file = os.path.join(self.output_folder, 'latest.pth') self.assertTrue(os.path.isfile(latest_file), f'Cannot find the latest.pth in path `{latest_file}`') run_through_shell( f'cd {os.path.dirname(self.template_file)};' f'python export.py' f' --load-weights {latest_file}' f' --save-model-to {self.output_folder}' ) model_bin_paths = list(glob.glob(os.path.join(self.output_folder, '*.bin'))) assert len(model_bin_paths) == 1, ( f'Wrong result of export.py: globbing "*.bin" in' f' {self.output_folder} gives {model_bin_paths}') model_bin_path = model_bin_paths[0] metrics_path = self.do_eval(model_bin_path) return log_file, metrics_path