コード例 #1
0
    def test_invalid_model_raises_value_error_exception(self, mocker):
        mocker.patch(self.module + '._read_configs', return_value=(
            self.global_config, {'models': [{'name': None, 'launchers': None, 'datasets': None}]}
        ))

        with pytest.raises(ConfigError) as exception:
            ConfigReader.merge(self.arguments)

        error_message = str(exception.value).split(sep=': ')[-1]
        assert error_message == 'Each model must specify {}'.format(', '.join(['name', 'launchers', 'datasets']))
コード例 #2
0
    def test_empty_models_in_local_config_raises_value_error_exception(self, mocker):
        mocker.patch(self.module + '._read_configs', return_value=(
            self.global_config, {'models': []}
        ))

        with pytest.raises(ConfigError) as exception:
            ConfigReader.merge(self.arguments)

        error_message = str(exception.value).split(sep=': ')[-1]
        assert error_message == 'Missed "{}" in local config'.format('models')
コード例 #3
0
    def test_missed_models_in_local_config_raises_value_error_exception(self, mocker):
        mocker.patch(self.module + '._read_configs', return_value=(
            self.global_config, {'not_models': 'custom'}
        ))

        with pytest.raises(ConfigError) as exception:
            ConfigReader.merge(self.arguments)

        error_message = str(exception.value).split(sep=': ')[-1]
        assert error_message == 'Accuracy Checker not_models mode is not supported. Please select between evaluations and models.'
コード例 #4
0
    def test_both_launchers_are_not_filtered_by_same_devices(self, mocker):
        config_launchers = [
            {
                'framework': 'dlsdk',
                'model': Path('/absolute_path1').absolute(),
                'weights': Path('/absolute_path1').absolute(),
                'adapter': 'classification',
                'device': 'CPU',
            },
            {
                'framework': 'caffe',
                'model': Path('/absolute_path2').absolute(),
                'weights': Path('/absolute_path2').absolute(),
                'adapter': 'classification',
                'device': 'GPU'
            }
        ]
        local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]}
        mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
        args = copy.deepcopy(self.arguments)
        args.extensions = None
        args.target_devices = ['GPU', 'CPU']

        config = ConfigReader.merge(args)[0]

        assert len(config['models']) == 2
        assert len(config['models'][0]['launchers']) == 1
        assert len(config['models'][1]['launchers']) == 1
        launchers = [config['models'][0]['launchers'][0], config['models'][1]['launchers'][0]]
        assert launchers == config_launchers
コード例 #5
0
    def test_only_appropriate_launcher_is_filtered_by_another_tag_if_provided_several_target_tags(self, mocker):
        config_launchers = [
            {
                'framework': 'dlsdk',
                'tags': ['tag1'],
                'model': Path('/absolute_path1').absolute(),
                'weights': Path('/absolute_path1').absolute(),
                'adapter': 'classification',
                'device': 'CPU',
            },
            {
                'framework': 'caffe',
                'tags': ['tag2'],
                'model': Path('/absolute_path2').absolute(),
                'weights': Path('/absolute_path2').absolute(),
                'adapter': 'classification',
                'device': 'GPU',
            }
        ]
        local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]}
        mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
        args = copy.deepcopy(self.arguments)
        args.extensions = None
        args.target_tags = ['tag2', 'tag3']

        config = ConfigReader.merge(args)[0]

        launchers = config['models'][0]['launchers']
        assert len(launchers) == 1
        assert launchers[0] == config_launchers[1]
コード例 #6
0
    def test_expand_relative_paths_in_launchers_config_using_command_line(self, mocker):
        local_config = {'models': [{
            'name': 'model',
            'launchers': [{
                'framework': 'dlsdk',
                'model': 'relative_model_path',
                'weights': 'relative_weights_path',
                'cpu_extensions': 'relative_extensions_path',
                'gpu_extensions': 'relative_extensions_path',
            }],
            'datasets': [{'name': 'dataset'}]
        }]}
        mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
        with mock_filesystem(['extensions/', 'models/']) as prefix:
            expected = copy.deepcopy(local_config['models'][0]['launchers'][0])
            expected['model'] = prefix / self.arguments.models / 'relative_model_path'
            expected['weights'] = prefix / self.arguments.models / 'relative_weights_path'
            expected['cpu_extensions'] = prefix / self.arguments.extensions / 'relative_extensions_path'
            expected['gpu_extensions'] = prefix / self.arguments.extensions / 'relative_extensions_path'
            args = copy.deepcopy(self.arguments)
            args.extensions = prefix / self.arguments.extensions
            args.models = prefix / self.arguments.models

            config = ConfigReader.merge(args)[0]

            assert config['models'][0]['launchers'][0] == expected
コード例 #7
0
    def test_not_overwrite_relative_paths_in_datasets_config_using_env_variable_if_commandline_provided(self, mocker):
        local_config = {'models': [{
            'name': 'model',
            'launchers': [{'framework': 'caffe'}],
            'datasets': [{
                'name': 'global_dataset',
                'dataset_meta': 'relative_annotation_path',
                'data_source': 'relative_source_path',
                'segmentation_masks_source': 'relative_source_path',
                'annotation': 'relative_annotation_path'
            }]
        }]}

        mocker.patch(self.module + '._read_configs', return_value=(
            None, local_config
        ))
        expected = copy.deepcopy(local_config['models'][0]['datasets'][0])
        with mock_filesystem(['source/']) as prefix:
            mocker.patch('os.environ.get', return_value=str(prefix))
            expected['dataset_meta'] = prefix / 'relative_annotation_path'
            expected['segmentation_masks_source'] = prefix / 'relative_source_path'
            expected['data_source'] = prefix / 'relative_source_path'
            expected['annotation'] = prefix / 'relative_annotation_path'
            expected['dataset_meta'] = prefix / 'relative_annotation_path'

            arguments = copy.deepcopy(self.arguments)
            arguments.extensions = None
            arguments.source = None
            arguments.annotations = None

            config = ConfigReader.merge(arguments)[0]

            assert config['models'][0]['datasets'][0] == expected
コード例 #8
0
    def test_both_launchers_are_filtered_by_other_devices(self, mocker):
        config_launchers = [
            {
                'framework': 'dlsdk',
                'model': Path('/absolute_path1').absolute(),
                'weights': Path('/absolute_path1').absolute(),
                'adapter': 'classification',
                'device': 'CPU',
            },
            {
                'framework': 'caffe',
                'model': Path('/absolute_path2').absolute(),
                'weights': Path('/absolute_path2').absolute(),
                'adapter': 'classification',
                'device': 'CPU'
            }
        ]
        local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]}
        mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
        args = copy.deepcopy(self.arguments)
        args.target_devices = ['FPGA', 'MYRIAD']
        args.extensions = None

        with pytest.warns(Warning):
            config = ConfigReader.merge(args)[0]

        assert len(config['models']) == 0
コード例 #9
0
    def test_launcher_is_not_filtered_by_device_with_tail(self, mocker):
        config_launchers = [
            {
                'framework': 'dlsdk',
                'model': Path('/absolute_path1').absolute(),
                'weights': Path('/absolute_path1').absolute(),
                'adapter': 'classification',
                'device': 'CPU',
            },
            {
                'framework': 'caffe',
                'model': Path('/absolute_path2').absolute(),
                'weights': Path('/absolute_path2').absolute(),
                'adapter': 'classification',
                'device': 'GPU'
            }
        ]
        local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]}
        mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
        args = copy.deepcopy(self.arguments)
        args.converted_models = None
        args.extensions = None
        args.target_devices = ['CPU', 'GPU_unexpected_tail']

        config = ConfigReader.merge(args)[0]

        launchers = config['models'][0]['launchers']
        assert len(launchers) == 1
        assert launchers[0] == config_launchers[0]
コード例 #10
0
    def test_not_modify_absolute_paths_in_datasets_config_using_command_line(self):
        local_config = {'models': [{
            'name': 'model',
            'datasets': [{
                'name': 'global_dataset',
                'dataset_meta': Path('/absolute_annotation_meta_path').absolute(),
                'data_source': Path('/absolute_source_path').absolute(),
                'annotation': Path('/absolute_annotation_path').absolute(),
            }]
        }]}

        expected = copy.deepcopy(local_config['models'][0]['datasets'][0])
        expected['annotation'] = Path('/absolute_annotation_path').absolute()
        expected['dataset_meta'] = Path('/absolute_annotation_meta_path').absolute()
        expected['data_source'] = Path('/absolute_source_path').absolute()

        ConfigReader._merge_paths_with_prefixes(self.arguments, local_config)

        assert local_config['models'][0]['datasets'][0] == expected
コード例 #11
0
    def test_merge_datasets_with_definitions(self, mocker):
        local_config = {'models': [{
            'name': 'model',
            'launchers': [{'framework': 'dlsdk', 'model': Path('/absolute_path').absolute(), 'weights': Path('/absolute_path').absolute()}],
            'datasets': [{'name': 'global_dataset'}]
        }]}
        mocker.patch(self.module + '._read_configs', return_value=(
            self.global_config, local_config
        ))
        arguments = copy.deepcopy(self.arguments)
        arguments.extensions = None

        config = ConfigReader.merge(arguments)[0]

        assert config['models'][0]['datasets'][0] == self.global_datasets[0]
コード例 #12
0
    def test_launcher_is_not_filtered_by_the_same_framework(self, mocker):
        config_launchers = [{
            'framework': 'dlsdk',
            'model': Path('/absolute_path1').absolute(),
            'weights': Path('/absolute_path1').absolute(),
            'adapter': 'classification',
            'device': 'CPU',
        }]
        local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]}
        mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
        args = copy.deepcopy(self.arguments)
        args.extensions = None
        args.target_framework = 'dlsdk'

        config = ConfigReader.merge(args)[0]

        launchers = config['models'][0]['launchers']
        assert launchers == config_launchers
コード例 #13
0
 def test_replace_empty_device_by_target_in_models_mode(self, mocker):
     local_config = {
         'models': [{
             'name': 'model',
             'launchers': [{
                 'framework': 'caffe',
                 'model': Path('/absolute_path2').absolute(),
                 'weights': Path('/absolute_path2').absolute(),
                 'adapter': 'classification',
         }],
             'datasets': [{'name': 'dataset'}]}]
     }
     mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
     args = copy.deepcopy(self.arguments)
     args.target_devices = ['CPU']
     config, _ = ConfigReader.merge(args)
     launchers = config['models'][0]['launchers']
     assert len(launchers) == 1
     assert 'device' in launchers[0]
     assert launchers[0]['device'].upper() == 'CPU'
コード例 #14
0
    def test_merge_launchers_with_model_is_not_modified(self, mocker):
        local_config = {'models': [{
            'name': 'model',
            'launchers': [{'framework': 'dlsdk', 'model': Path('/custom').absolute()}],
            'datasets': [{'name': 'global_dataset'}]
        }]}
        expected = copy.deepcopy(self.get_global_launcher('dlsdk'))
        expected['model'] = Path('/custom').absolute()
        mocker.patch(self.module + '._read_configs', return_value=(
            self.global_config, local_config
        ))
        with mock_filesystem(['extensions/']) as prefix:
            expected['cpu_extensions'] = prefix / self.arguments.extensions / expected['cpu_extensions']
            args = copy.deepcopy(self.arguments)
            args.models = None
            args.extensions = prefix / self.arguments.extensions

            config = ConfigReader.merge(args)[0]

        assert config['models'][0]['launchers'][0] == expected
コード例 #15
0
    def test_read_configs_without_global_config(self, mocker):
        config = {'models': [{
            'name': 'model',
            'launchers': [{'framework': 'dlsdk', 'model': Path('/absolute_path').absolute(), 'weights': Path('/absolute_path').absolute()}],
            'datasets': [{'name': 'global_dataset'}]
        }]}
        empty_args = Namespace(**{
            'models': Path.cwd(), 'extensions': Path.cwd(), 'source': Path.cwd(), 'annotations': Path.cwd(),
            'definitions': None, 'config': None, 'stored_predictions': None,
            'progress': 'bar', 'target_framework': None, 'target_devices': None, 'target_backend': None, 'log_file': None,
            'target_tags': None, 'cpu_extensions_mode': None,
            'model_attributes': None
        })
        mocker.patch('openvino.tools.accuracy_checker.utils.get_path', return_value=Path.cwd())
        mocker.patch('yaml.safe_load', return_value=config)
        mocker.patch('pathlib.Path.open')

        result = ConfigReader.merge(empty_args)

        assert 'models' == result[1]
        assert config == result[0]
コード例 #16
0
    def test_launcher_with_several_tags_contained_at_least_one_from_target_tegs_is_not_filtered(self, mocker):
        config_launchers = [
            {
                'framework': 'dlsdk',
                'tags': ['tag1', 'tag2'],
                'model': Path('/absolute_path1').absolute(),
                'weights': Path('/absolute_path1').absolute(),
                'adapter': 'classification',
                'device': 'CPU',
            }
        ]
        local_config = {'models': [{'name': 'name', 'launchers': config_launchers, 'datasets': [{'name': 'dataset'}]}]}
        mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
        args = copy.deepcopy(self.arguments)
        args.extensions = None
        args.target_tags = ['tag2']

        config = ConfigReader.merge(args)[0]

        launchers = config['models'][0]['launchers']
        assert len(launchers) == 1
        assert launchers[0] == config_launchers[0]
コード例 #17
0
 def test_all_model_launchers_filtered_in_config_with_several_models(self, mocker):
     model1_launchers = [
         {
             'framework': 'dlsdk',
             'model': Path('/absolute_path1').absolute(),
             'weights': Path('/absolute_path1').absolute(),
             'adapter': 'classification',
             'device': 'CPU',
         },
         {
             'framework': 'caffe',
             'model': Path('/absolute_path2').absolute(),
             'weights': Path('/absolute_path2').absolute(),
             'adapter': 'classification',
             'device': 'CPU'
         }
     ]
     model2_launchers = [
         {
             'framework': 'tf',
             'model': Path('/absolute_path3').absolute(),
             'adapter': 'classification',
             'device': 'CPU'
         }
     ]
     local_config = {'models': [
         {'name': 'model_1', 'launchers': model1_launchers, 'datasets': [{'name': 'dataset'}]},
         {'name': 'model_2', 'launchers': model2_launchers, 'datasets': [{'name': 'dataset'}]}
     ]}
     mocker.patch(self.module + '._read_configs', return_value=(None, local_config))
     args = copy.deepcopy(self.arguments)
     args.target_framework = 'tf'
     with pytest.warns(Warning):
         config = ConfigReader.merge(args)[0]
     assert len(config['models']) == 1
     assert config['models'][0]['name'] == 'model_2'
     assert config['models'][0]['launchers'] == model2_launchers