def test_empty_local_config_raises_value_error_exception(self, mocker): mocker.patch(self.module + '._read_configs', return_value=(self.global_config, {})) with pytest.raises(ValueError) as exception: ConfigReader.merge(self.arguments) error_message = str(exception).split(sep=': ')[-1] assert error_message == 'Missing local config'
def test_invalid_model_raises_value_error_exception(self, mocker): mocker.patch(self.module + '._read_configs', return_value=( self.global_config, {'models': [{'name': None, 'launchers': None, 'datasets': None}]} )) with pytest.raises(ConfigError) as exception: ConfigReader.merge(self.arguments) error_message = str(exception).split(sep=': ')[-1] assert error_message == 'Each model must specify {}'.format(['name', 'launchers', 'datasets'])
def test_empty_models_in_local_config_raises_value_error_exception(self, mocker): mocker.patch(self.module + '._read_configs', return_value=( self.global_config, {'models': []} )) with pytest.raises(ConfigError) as exception: ConfigReader.merge(self.arguments) error_message = str(exception).split(sep=': ')[-1] assert error_message == 'Missed "{}" in local config'.format('models')
def test_only_appropriate_launcher_is_filtered_by_another_framework(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'model': Path('/absolute_path1'), 'weights': Path('/absolute_path1'), 'adapter': 'classification', 'device': 'CPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models }, { 'framework': 'caffe', 'model': Path('/absolute_path2'), 'weights': Path('/absolute_path2'), 'adapter': 'classification', 'device': 'GPU' } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) self.arguments.target_framework = 'caffe' config = ConfigReader.merge(self.arguments) launchers = config['models'][0]['launchers'] assert len(launchers) == 1 assert launchers[0] == config_launchers[1]
def test_both_launchers_are_not_filtered_by_the_same_framework(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'model': Path('/absolute_path1'), 'weights': Path('/absolute_path1'), 'adapter': 'classification', 'device': 'CPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models }, { 'framework': 'dlsdk', 'model': Path('/absolute_path2'), 'weights': Path('/absolute_path2'), 'adapter': 'classification', 'device': 'GPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) args = copy.deepcopy(self.arguments) args.model_optimizer = None args.converted_models = None args.target_framework = 'dlsdk' config = ConfigReader.merge(args) launchers = config['models'][0]['launchers'] assert launchers == config_launchers
def test_launcher_with_several_tags_contained_at_least_one_from_target_tegs_is_not_filtered(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'tags': ['tag1', 'tag2'], 'model': Path('/absolute_path1'), 'weights': Path('/absolute_path1'), 'adapter': 'classification', 'device': 'CPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) args = copy.deepcopy(self.arguments) args.model_optimizer = None args.converted_models = None args.target_tags = ['tag2'] config = ConfigReader.merge(args) launchers = config['models'][0]['launchers'] assert len(launchers) == 1 assert launchers[0] == config_launchers[0]
def test_both_launchers_are_filtered_by_another_tag(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'tags': ['some_tag'], 'model': '/absolute_path1', 'weights': '/absolute_path1', 'adapter': 'classification', 'device': 'CPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models }, { 'framework': 'dlsdk', 'tags': ['some_tag'], 'model': '/absolute_path2', 'weights': '/absolute_path2', 'adapter': 'classification', 'device': 'GPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) args = copy.deepcopy(self.arguments) args.model_optimizer = None args.converted_models = None args.target_tags = ['other_tag'] with pytest.warns(Warning): config = ConfigReader.merge(args) launchers = config['models'][0]['launchers'] assert len(launchers) == 0
def test_both_launchers_are_filtered_by_target_tags_if_tags_not_provided_in_config(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'model': '/absolute_path1', 'weights': '/absolute_path1', 'adapter': 'classification', 'device': 'CPU', }, { 'framework': 'dlsdk', 'model': '/absolute_path2', 'weights': '/absolute_path2', 'adapter': 'classification', 'device': 'GPU', } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} self.arguments.target_tags = ['some_tag'] mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) with pytest.warns(Warning): config = ConfigReader.merge(self.arguments) launchers = config['models'][0]['launchers'] assert len(launchers) == 0
def test_expand_relative_paths_in_datasets_config_using_command_line(self, mocker): local_config = {'models': [{ 'name': 'model', 'launchers': [{'framework': 'caffe'}], 'datasets': [{ 'name': 'global_dataset', 'dataset_meta': 'relative_annotation_path', 'data_source': 'relative_source_path', 'segmentation_masks_source': 'relative_source_path', 'annotation': 'relative_annotation_path' }] }]} mocker.patch(self.module + '._read_configs', return_value=( None, local_config )) expected = copy.deepcopy(local_config['models'][0]['datasets'][0]) expected['annotation'] = self.arguments.annotations / 'relative_annotation_path' expected['dataset_meta'] = self.arguments.annotations / 'relative_annotation_path' expected['segmentation_masks_source'] = self.arguments.source / 'relative_source_path' expected['data_source'] = self.arguments.source / 'relative_source_path' config = ConfigReader.merge(self.arguments) assert config['models'][0]['datasets'][0] == expected
def test_both_launchers_are_filtered_by_other_devices(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'model': '/absolute_path1', 'weights': '/absolute_path1', 'adapter': 'classification', 'device': 'CPU', }, { 'framework': 'caffe', 'model': '/absolute_path2', 'weights': '/absolute_path2', 'adapter': 'classification', 'device': 'CPU' } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) self.arguments.target_devices = ['FPGA', 'MYRIAD'] with pytest.warns(Warning): config = ConfigReader.merge(self.arguments) launchers = config['models'][0]['launchers'] assert len(launchers) == 0
def test_launcher_is_not_filtered_by_device_with_tail(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'model': Path('/absolute_path1'), 'weights': Path('/absolute_path1'), 'adapter': 'classification', 'device': 'CPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models }, { 'framework': 'caffe', 'model': Path('/absolute_path2'), 'weights': Path('/absolute_path2'), 'adapter': 'classification', 'device': 'GPU' } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) args = copy.deepcopy(self.arguments) args.converted_models = None args.target_devices = ['CPU', 'GPU_unexpected_tail'] config = ConfigReader.merge(args) launchers = config['models'][0]['launchers'] assert len(launchers) == 1 assert launchers[0] == config_launchers[0]
def test_merge_datasets_with_definitions_and_meta_is_not_modified(self, mocker): local_config = {'models': [{ 'name': 'model', 'launchers': [{'framework': 'dlsdk', 'model': '/absolute_path', 'weights': '/absolute_path'}], 'datasets': [{'name': 'global_dataset', 'dataset_meta': '/absolute_path'}] }]} expected = self.global_datasets[0] expected['dataset_meta'] = Path('/absolute_path') mocker.patch(self.module + '._read_configs', return_value=( self.global_config, local_config )) config = ConfigReader.merge(self.arguments) assert config['models'][0]['datasets'][0] == expected
def test_merge_datasets_with_definitions(self, mocker): local_config = {'models': [{ 'name': 'model', 'launchers': [{'framework': 'dlsdk', 'model': '/absolute_path', 'weights': '/absolute_path'}], 'datasets': [{'name': 'global_dataset'}] }]} mocker.patch(self.module + '._read_configs', return_value=( self.global_config, local_config )) arguments = copy.deepcopy(self.arguments) arguments.model_optimizer = None config = ConfigReader.merge(arguments) assert config['models'][0]['datasets'][0] == self.global_datasets[0]
def test_merge_launchers_with_model_is_not_modified(self, mocker): local_config = {'models': [{ 'name': 'model', 'launchers': [{'framework': 'dlsdk', 'model': 'custom'}], 'datasets': [{'name': 'global_dataset'}] }]} expected = copy.deepcopy(self.get_global_launcher('dlsdk')) expected['model'] = 'custom' expected['bitstream'] = self.arguments.bitstreams / expected['bitstream'] expected['cpu_extensions'] = self.arguments.extensions / expected['cpu_extensions'] mocker.patch(self.module + '._read_configs', return_value=( self.global_config, local_config )) args = copy.deepcopy(self.arguments) args.model_optimizer = None args.models = None args.converted_models = None config = ConfigReader.merge(args) assert config['models'][0]['launchers'][0] == expected
def test_read_configs_without_global_config(self, mocker): config = {'models': [{ 'name': 'model', 'launchers': [{'framework': 'dlsdk', 'model': Path('/absolute_path'), 'weights': Path('/absolute_path')}], 'datasets': [{'name': 'global_dataset'}] }]} empty_args = Namespace(**{ 'models': None, 'extensions': None, 'source': None, 'annotations': None, 'converted_models': None, 'model_optimizer': None, 'bitstreams': None, 'definitions': None, 'config': None, 'stored_predictions': None, 'tf_custom_op_config': None, 'progress': 'bar', 'target_framework': None, 'target_devices': None, 'log_file': None, 'tf_obj_detection_api_pipeline_config_path': None, 'target_tags': None, 'cpu_extensions_mode': None, 'aocl': None }) mocker.patch('accuracy_checker.utils.get_path', return_value=Path.cwd()) mocker.patch('yaml.load', return_value=config) mocker.patch('pathlib.Path.open') result = ConfigReader.merge(empty_args) assert config == result
def test_only_appropriate_launcher_is_filtered_by_user_input_devices(self, mocker): config_launchers = [ { 'framework': 'dlsdk', 'model': Path('/absolute_path1'), 'weights': Path('/absolute_path1'), 'adapter': 'classification', 'device': 'CPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models }, { 'framework': 'dlsdk', 'model': Path('/absolute_path1'), 'weights': Path('/absolute_path1'), 'adapter': 'classification', 'device': 'HETERO:CPU,GPU', '_model_optimizer': self.arguments.model_optimizer, '_models_prefix': self.arguments.models }, { 'framework': 'caffe', 'model': Path('/absolute_path2'), 'weights': Path('/absolute_path2'), 'adapter': 'classification', 'device': 'GPU', } ] local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) args = copy.deepcopy(self.arguments) args.converted_models = None args.target_devices = ['GPU', 'CPU'] config = ConfigReader.merge(args) launchers = config['models'][0]['launchers'] assert launchers == [config_launchers[0], config_launchers[2]]
def test_expand_relative_paths_in_launchers_config_using_command_line(self, mocker): local_config = {'models': [{ 'name': 'model', 'launchers': [{ 'framework': 'dlsdk', 'model': 'relative_model_path', 'weights': 'relative_weights_path', 'cpu_extensions': 'relative_extensions_path', 'gpu_extensions': 'relative_extensions_path', 'caffe_model': 'relative_model_path', 'caffe_weights': 'relative_weights_path', 'tf_model': 'relative_model_path', 'mxnet_weights': 'relative_weights_path', 'bitstream': 'relative_bitstreams_path' }], 'datasets': [{'name': 'dataset'}] }]} mocker.patch(self.module + '._read_configs', return_value=(None, local_config)) expected = copy.deepcopy(local_config['models'][0]['launchers'][0]) expected['model'] = self.arguments.models / 'relative_model_path' expected['caffe_model'] = self.arguments.models / 'relative_model_path' expected['tf_model'] = self.arguments.models / 'relative_model_path' expected['weights'] = self.arguments.models / 'relative_weights_path' expected['caffe_weights'] = self.arguments.models / 'relative_weights_path' expected['mxnet_weights'] = self.arguments.models / 'relative_weights_path' expected['cpu_extensions'] = self.arguments.extensions / 'relative_extensions_path' expected['gpu_extensions'] = self.arguments.extensions / 'relative_extensions_path' expected['bitstream'] = self.arguments.bitstreams / 'relative_bitstreams_path' expected['_models_prefix'] = self.arguments.models args = copy.deepcopy(self.arguments) args.model_optimizer = None args.converted_models = None config = ConfigReader.merge(args) assert config['models'][0]['launchers'][0] == expected
def process() -> CalibrationConfiguration: args, unknown_args = CommandLineReader.parser().parse_known_args() if unknown_args: info("unknown command line arguments: {0}".format(unknown_args)) if not args.simplified_mode: args.target_framework = "dlsdk" args.aocl = None merged_config, mode = ConfigReader.merge(args) updated_config = ConfigurationFilter.filter( merged_config, args.metric_name, args.metric_type, default_logger) if len(updated_config['models']) > 1: raise ValueError("too much models") if len(updated_config['models'][0]['launchers']) > 1: raise ValueError("too much launchers") launcher = updated_config['models'][0]['launchers'][0] if 'caffe_model' in launcher or 'tf_model' in launcher or 'tf_meta' in launcher or 'mxnet_weights' in launcher or 'onnx_model' in launcher: if args.converted_models: tmp_directory = None else: tmp_directory = tempfile.mkdtemp(".converted_models") launcher['mo_params']['output_dir'] = tmp_directory if 'caffe_model' in launcher: framework = FrameworkParameters('caffe', False) output_model = Path.get_model( str(launcher['caffe_model']), "_i8", str(args.output_dir) if args.output_dir else None) output_weights = Path.get_weights( str(launcher['caffe_weights']), "_i8", str(args.output_dir) if args.output_dir else None) elif 'tf_model' in launcher: framework = FrameworkParameters('tf', False) output_model = Path.get_model( str(launcher['tf_model']), "_i8", str(args.output_dir) if args.output_dir else None) output_weights = Path.get_weights( str(launcher['tf_model']), "_i8", str(args.output_dir) if args.output_dir else None) elif 'tf_meta' in launcher: framework = FrameworkParameters('tf', True) output_model = Path.get_model( str(launcher['tf_meta']), "_i8", str(args.output_dir) if args.output_dir else None) output_weights = Path.get_weights( str(launcher['tf_meta']), "_i8", str(args.output_dir) if args.output_dir else None) elif 'mxnet_weights' in launcher: framework = FrameworkParameters('mxnet', False) output_model = Path.get_model( str(launcher['mxnet_weights']), "_i8", str(args.output_dir) if args.output_dir else None) output_weights = Path.get_weights( str(launcher['mxnet_weights']), "_i8", str(args.output_dir) if args.output_dir else None) elif 'onnx_model' in launcher: framework = FrameworkParameters('onnx', False) output_model = Path.get_model( str(launcher['onnx_model']), "_i8", str(args.output_dir) if args.output_dir else None) output_weights = Path.get_weights( str(launcher['onnx_model']), "_i8", str(args.output_dir) if args.output_dir else None) else: raise ValueError("unknown model framework") model, weights = DLSDKLauncher.convert_model( launcher, framework) launcher['model'] = model launcher['weights'] = weights launcher.pop('caffe_model', None) launcher.pop('caffe_weights', None) launcher.pop('tf_model', None) launcher.pop('tf_meta', None) launcher.pop('mxnet_weights', None) launcher.pop('onnx_model', None) else: model = launcher['model'] output_model = Path.get_model( str(model), "_i8", str(args.output_dir) if args.output_dir else None) weights = launcher['weights'] output_weights = Path.get_weights( str(weights), "_i8", str(args.output_dir) if args.output_dir else None) tmp_directory = None batch_size = args.batch_size if args.batch_size else ( launcher['batch'] if 'batch' in launcher else None) if not batch_size: with Network(str(launcher['model']), str(launcher['weights'])) as network: batch_size = network.ie_network.batch_size if 'cpu_extensions' in launcher: cpu_extension = DLSDKLauncher.get_cpu_extension( launcher['cpu_extensions'], args.cpu_extensions_mode) launcher['cpu_extensions'] = cpu_extension else: cpu_extension = None if not args.calibrate_fully_connected: if args.ignore_layer_types is None: args.ignore_layer_types = [] args.ignore_layer_types.append("FullyConnected") return CalibrationConfiguration( config=updated_config, precision=args.precision, model=str(model), weights=str(weights), tmp_directory=tmp_directory, output_model=output_model, output_weights=output_weights, cpu_extension=str(cpu_extension) if cpu_extension else None, gpu_extension=str(launcher['gpu_extensions']) if 'gpu_extensions' in launcher else None, device=launcher['device'], batch_size=batch_size, threshold=args.threshold, ignore_layer_types=args.ignore_layer_types, ignore_layer_types_path=args.ignore_layer_types_path, ignore_layer_names=args.ignore_layer_names, ignore_layer_names_path=args.ignore_layer_names_path, benchmark_iterations_count=args.benchmark_iterations_count, progress=(None if args.progress == 'None' else args.progress), threshold_step=args.threshold_step, threshold_boundary=args.threshold_boundary, simplified_mode=args.simplified_mode) else: file_name = ntpath.basename(str(args.models)) model = os.path.splitext(file_name) output_model = model[0] + "_i8" if args.output_dir: output_model = str(args.output_dir.joinpath(output_model)) batch_size = args.batch_size if args.batch_size else 0 precision = args.precision if args.precision.lower() in [ 'fp16', 'fp32' ] else '' return CalibrationConfiguration( config=args, precision=precision, model=str(args.models), weights=None, tmp_directory=None, output_model=output_model, output_weights=None, cpu_extension=str(args.extensions) if args.extensions else '', gpu_extension=None, device=args.target_devices, batch_size=batch_size, threshold=None, ignore_layer_types=None, ignore_layer_types_path=None, ignore_layer_names=None, ignore_layer_names_path=None, benchmark_iterations_count=None, progress=(None if args.progress == 'None' else args.progress), threshold_step=None, threshold_boundary=None, simplified_mode=args.simplified_mode)