示例#1
0
def load_image_meta(annotation, image_root):
    print_info("Loading image level metadata")
    for ann in tqdm(annotation):
        path = os.path.join(image_root, ann.identifier)
        image = cv2.imread(path)
        if image is None:
            raise FileNotFoundError('Image file not found: {}'.format(path))
        Dataset.set_image_metadata(ann, image)
    return annotation
示例#2
0
 def test_annotation_conversion_converter_without_required_options_raise_config_error(
         self):
     addition_options = {'annotation_conversion': {'converter': 'wider'}}
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     with pytest.raises(ConfigError):
         Dataset(config, MockPreprocessor())
示例#3
0
    def from_configs(cls, config):
        dataset_config = config['datasets'][0]
        dataset = Dataset(dataset_config)
        data_reader_config = dataset_config.get('reader', 'opencv_imread')
        data_source = dataset_config['data_source']
        if isinstance(data_reader_config, str):
            data_reader_type = data_reader_config
            data_reader_config = None
        elif isinstance(data_reader_config, dict):
            data_reader_type = data_reader_config['type']
        else:
            raise ConfigError('reader should be dict or string')
        if data_reader_type in REQUIRES_ANNOTATIONS:
            data_source = dataset.annotation
        data_reader = BaseReader.provide(data_reader_type, data_source,
                                         data_reader_config)
        models_info = config['network_info']
        launcher_config = config['launchers'][0]
        launcher = create_launcher(launcher_config, delayed_model_loading=True)
        preprocessors_config = dataset_config.get('preprocessing', [])
        stages = build_stages(models_info, preprocessors_config, launcher)
        metrics_executor = MetricsExecutor(dataset_config['metrics'], dataset)
        postprocessing = PostprocessingExecutor(
            dataset_config['postprocessing'])

        return cls(dataset, data_reader, stages, postprocessing,
                   metrics_executor)
示例#4
0
 def test_setting_custom_dataset_with_missed_data_source_raises_config_error_exception(self, mocker):
     local_dataset = {
         'name': 'custom',
         'annotation': 'custom',
         'metrics': [{'type': 'map'}]
     }
     with pytest.raises(ConfigError):
        Dataset(local_dataset, MockPreprocessor())
示例#5
0
 def test_annotation_conversion_closer_to_zero_subset_ratio(self, mocker):
     addition_options = {
         'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
         'subsample_size': '0.001%'
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=ConverterReturn(converted_annotation, None, None)
     )
     subset_maker_mock = mocker.patch(
         'accuracy_checker.dataset.make_subset'
     )
     Dataset.load_annotation(config)
     subset_maker_mock.assert_called_once_with(converted_annotation, 1, 666, True)
示例#6
0
 def test_successful_annotation_conversion(self, mocker):
     addition_options = {'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')}}
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     annotation_converter_mock = mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=ConverterReturn(make_representation("0 0 0 5 5", True), None, None)
     )
     Dataset(config)
     annotation_converter_mock.assert_called_once_with()
示例#7
0
    def test_create_data_provider_with_subset_file(self, mocker):
        mocker.patch('pathlib.Path.exists', return_value=True)
        mocker.patch('accuracy_checker.dataset.read_yaml', return_value=['1'])

        addition_options = {
            'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
            "subset_file": "subset.yml"
        }
        config = copy_dataset_config(self.dataset_config)
        config.update(addition_options)
        converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
        mocker.patch(
            'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
            return_value=ConverterReturn(converted_annotation, None, None)
        )
        dataset = Dataset(config)
        assert len(dataset.data_provider) == 1
        assert dataset.identifiers() == ['1']
        assert dataset.data_provider.full_size == 2
示例#8
0
 def test_annotation_conversion_raise_config_error_on_extra_args(self):
     addition_options = {
         'annotation_conversion': {
             'converter': 'wider',
             'annotation_file': 'file',
             'something_extra': 'extra'
         }
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     with pytest.raises(ConfigError):
         Dataset(config, MockPreprocessor())
示例#9
0
    def test_missed_name_raises_config_error_exception(self):
        local_dataset = {
            'annotation': 'custom',
            'data_source': 'custom',
            'metrics': [{
                'type': 'fppi',
                'mr_rates': [0.0, 0.1]
            }],
        }

        with pytest.raises(ConfigError):
            Dataset(local_dataset, MockPreprocessor())
示例#10
0
    def test_ignore_subset_parameters_if_file_provided(self, mocker):
        mocker.patch('pathlib.Path.exists', return_value=True)
        mocker.patch('accuracy_checker.utils.read_yaml', return_value=['1'])

        subset_maker_mock = mocker.patch(
            'accuracy_checker.dataset.make_subset'
        )
        addition_options = {
            'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
            'subsample_size': 1,
            'shuffle': False,
            "subset_file": "subset.yml"
        }
        config = copy_dataset_config(self.dataset_config)
        config.update(addition_options)
        converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
        mocker.patch(
            'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
            return_value=ConverterReturn(converted_annotation, None, None)
        )
        Dataset.load_annotation(config)
        assert not subset_maker_mock.called
示例#11
0
 def test_annotation_conversion_subset_size(self, mocker):
     addition_options = {
         'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
         'subsample_size': 1
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=ConverterReturn(converted_annotation, None, None)
     )
     dataset = Dataset(config)
     assert dataset.data_provider.annotation_provider._data_buffer == {converted_annotation[1].identifier: converted_annotation[1]}
示例#12
0
 def test_annotation_conversion_invalid_subset_size_raise_config_error(self, mocker):
     addition_options = {
         'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
         'subsample_size': 'aaa'
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=ConverterReturn(converted_annotation, None, None)
     )
     with pytest.raises(ConfigError):
         Dataset(config)
示例#13
0
    def test_dataset_validation_scheme(self):
        dataset_validation_scheme = Dataset.validation_scheme()
        assert isinstance(dataset_validation_scheme, list)
        assert len(dataset_validation_scheme) == 1
        dataset_params = [
            key for key in Dataset.parameters() if not key.startswith('_')
        ]
        assert isinstance(dataset_validation_scheme[0], dict)
        assert contains_all(dataset_validation_scheme[0], dataset_params)
        assert len(dataset_validation_scheme[0]) == len(dataset_params)
        assert isinstance(dataset_validation_scheme[0]['name'], StringField)
        assert isinstance(dataset_validation_scheme[0]['annotation'],
                          PathField)
        assert isinstance(dataset_validation_scheme[0]['data_source'],
                          PathField)
        assert isinstance(dataset_validation_scheme[0]['dataset_meta'],
                          PathField)
        assert isinstance(dataset_validation_scheme[0]['subsample_size'],
                          BaseField)
        assert isinstance(dataset_validation_scheme[0]['shuffle'], BoolField)
        assert isinstance(dataset_validation_scheme[0]['subsample_seed'],
                          NumberField)
        assert isinstance(dataset_validation_scheme[0]['analyze_dataset'],
                          BoolField)
        assert isinstance(
            dataset_validation_scheme[0]['segmentation_masks_source'],
            PathField)
        assert isinstance(
            dataset_validation_scheme[0]['additional_data_source'], PathField)
        assert isinstance(dataset_validation_scheme[0]['batch'], NumberField)

        assert dataset_validation_scheme[0]['reader'] == BaseReader
        assert dataset_validation_scheme[0]['preprocessing'] == Preprocessor
        assert dataset_validation_scheme[0]['postprocessing'] == Postprocessor
        assert dataset_validation_scheme[0]['metrics'] == Metric
        assert dataset_validation_scheme[0][
            'annotation_conversion'] == BaseFormatConverter
示例#14
0
    def test_annotation_conversion_not_convert_twice(self, mocker):
        addition_options = {
            'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
            'annotation': Path('custom')
        }
        config = copy_dataset_config(self.dataset_config)
        config.update(addition_options)
        converted_annotation = make_representation('0 0 0 5 5', True)
        annotation_reader_mock = mocker.patch(
            'accuracy_checker.dataset.read_annotation',
            return_value=converted_annotation
        )
        Dataset(config)

        annotation_reader_mock.assert_called_once_with(Path('custom'))
示例#15
0
 def test_annotation_conversion_subset_with_disabled_shuffle(self, mocker):
     addition_options = {
         'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
         'subsample_size': 1,
         'shuffle': False
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=ConverterReturn(converted_annotation, None, None)
     )
     annotation, _ = Dataset.load_annotation(config)
     assert annotation == [converted_annotation[0]]
示例#16
0
 def test_annotation_conversion_subset_more_than_dataset_size(self, mocker):
     addition_options = {
         'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
         'subsample_size': 3,
         'subsample_seed': 1
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=ConverterReturn(converted_annotation, None, None)
     )
     with pytest.warns(UserWarning):
         annotation, _ = Dataset.load_annotation(config)
         assert annotation == converted_annotation
示例#17
0
 def test_annotation_conversion_subset_size(self, mocker):
     addition_options = {
         'annotation_conversion': {
             'converter': 'wider',
             'annotation_file': 'file'
         },
         'subsample_size': 1
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(
         ['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=(converted_annotation, None))
     dataset = Dataset(config, MockPreprocessor())
     assert dataset.annotation == [converted_annotation[1]]
 def from_configs(cls, config):
     dataset_config = config['datasets'][0]
     dataset = Dataset(dataset_config)
     data_reader_config = dataset_config.get('reader', 'opencv_imread')
     data_source = dataset_config['data_source']
     if isinstance(data_reader_config, str):
         reader = BaseReader.provide(data_reader_config, data_source)
     elif isinstance(data_reader_config, dict):
         reader = BaseReader.provide(data_reader_config['type'], data_source, data_reader_config)
     else:
         raise ConfigError('reader should be dict or string')
     preprocessing = PreprocessingExecutor(dataset_config.get('preprocessing', []), dataset.name)
     metrics_executor = MetricsExecutor(dataset_config['metrics'], dataset)
     launcher = create_launcher(config['launchers'][0], delayed_model_loading=True)
     model = SequentialModel(
         config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob')
     )
     return cls(dataset, reader, preprocessing, metrics_executor, launcher, model)
示例#19
0
 def test_annotation_conversion_subset_with_seed(self, mocker):
     addition_options = {
         'annotation_conversion': {
             'converter': 'wider',
             'annotation_file': Path('file')
         },
         'subsample_size': 1,
         'subsample_seed': 1
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(
         ['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=converted_annotation)
     dataset = Dataset(config)
     annotation = dataset.annotation
     assert annotation == [converted_annotation[0]]
示例#20
0
 def test_annotation_conversion_subset_ratio(self, mocker):
     addition_options = {
         'annotation_conversion': {
             'converter': 'wider',
             'annotation_file': 'file'
         },
         'subsample_size': '50%'
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(
         ['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=(converted_annotation, None))
     subset_maker_mock = mocker.patch(
         'accuracy_checker.dataset.make_subset')
     Dataset(config, MockPreprocessor())
     subset_maker_mock.assert_called_once_with(converted_annotation, 1, 666)
示例#21
0
 def test_annotation_conversion_save_subset(self, mocker):
     addition_options = {
         'annotation_conversion': {'converter': 'wider', 'annotation_file': Path('file')},
         'annotation': Path('custom'),
         'subsample_size': 1,
     }
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     converted_annotation = make_representation(['0 0 0 5 5', '0 1 1 10 10'], True)
     mocker.patch(
         'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
         return_value=ConverterReturn(converted_annotation, None, None)
     )
     annotation_saver_mock = mocker.patch(
         'accuracy_checker.dataset.save_annotation'
     )
     mocker.patch('pathlib.Path.exists', return_value=False)
     Dataset(config)
     annotation_saver_mock.assert_called_once_with([converted_annotation[1]], None, Path('custom'), None, config)
示例#22
0
    def test_annotation_conversion_with_store_annotation(self, mocker):
        addition_options = {
            'annotation_conversion': {
                'converter': 'wider',
                'annotation_file': 'file'
            },
            'annotation': 'custom'
        }
        config = copy_dataset_config(self.dataset_config)
        config.update(addition_options)
        converted_annotation = make_representation('0 0 0 5 5', True)
        mocker.patch(
            'accuracy_checker.annotation_converters.WiderFormatConverter.convert',
            return_value=(converted_annotation, None))
        annotation_saver_mock = mocker.patch(
            'accuracy_checker.dataset.save_annotation')
        Dataset(config, MockPreprocessor())

        annotation_saver_mock.assert_called_once_with(converted_annotation,
                                                      None, Path('custom'),
                                                      None)
示例#23
0
    def from_configs(cls, config):
        dataset_config = config['datasets'][0]
        dataset = Dataset(dataset_config)
        data_reader_config = dataset_config.get('reader', 'opencv_imread')
        data_source = dataset_config['data_source']
        if isinstance(data_reader_config, str):
            reader = BaseReader.provide(data_reader_config, data_source)
        elif isinstance(data_reader_config, dict):
            reader = BaseReader.provide(data_reader_config['type'], data_source, data_reader_config)
        else:
            raise ConfigError('reader should be dict or string')
        preprocessing = PreprocessingExecutor(dataset_config.get('preprocessing', []), dataset.name)
        metrics_executor = MetricsExecutor(dataset_config['metrics'], dataset)
        launcher_settings = config['launchers'][0]
        supported_frameworks = ['dlsdk']
        if not launcher_settings['framework'] in supported_frameworks:
            raise ConfigError('{} framework not supported'.format(launcher_settings['framework']))
        launcher = create_launcher(launcher_settings, delayed_model_loading=True)
        network_info = config.get('network_info', {})
        colorization_network = network_info.get('colorization_network', {})
        verification_network = network_info.get('verification_network', {})
        model_args = config.get('_models', [])
        models_is_blob = config.get('_model_is_blob')
        if 'model' not in colorization_network and model_args:
            colorization_network['model'] = model_args[0]
            colorization_network['_model_is_blob'] = models_is_blob
        if 'model' not in verification_network and model_args:
            verification_network['model'] = model_args[1 if len(model_args) > 1 else 0]
            verification_network['_model_is_blob'] = models_is_blob
        network_info.update({
            'colorization_network': colorization_network,
            'verification_network': verification_network
        })
        if not contains_all(network_info, ['colorization_network', 'verification_network']):
            raise ConfigError('configuration for colorization_network/verification_network does not exist')

        test_model = ColorizationTestModel(network_info['colorization_network'], launcher)
        check_model = ColorizationCheckModel(network_info['verification_network'], launcher)
        return cls(dataset, reader, preprocessing, metrics_executor, launcher, test_model, check_model)
示例#24
0
 def test_annotation_conversion_unknown_converter_raise_config_error(self):
     addition_options = {'annotation_conversion': {'converter': 'unknown'}}
     config = copy_dataset_config(self.dataset_config)
     config.update(addition_options)
     with pytest.raises(ValueError):
         Dataset(config, MockPreprocessor())
示例#25
0
 def test_setting_custom_dataset_with_missed_data_source_raises_config_error_exception(
         self):
     local_dataset = copy_dataset_config(self.dataset_config)
     local_dataset.pop('data_source')
     with pytest.raises(ConfigError):
         Dataset(local_dataset, MockPreprocessor())
示例#26
0
    def test_missed_name_raises_config_error_exception(self):
        local_dataset = copy_dataset_config(self.dataset_config)
        local_dataset.pop('name')

        with pytest.raises(ConfigError):
            Dataset(local_dataset, MockPreprocessor())
示例#27
0
 def test_setting_custom_dataset_with_missed_annotation_raises_config_error_exception(
         self):
     local_dataset = copy_dataset_config(self.dataset_config)
     local_dataset.pop('annotation')
     with pytest.raises(ConfigError):
         Dataset(local_dataset)