def setup_method(self): self.launcher = MagicMock() self.launcher.get_async_requests = Mock(return_value=[]) data = MagicMock(data=MagicMock(), metadata=MagicMock(), identifier=0) self.preprocessor = Mock() self.preprocessor.process = Mock(return_value=data) self.postprocessor = Mock() self.adapter = MagicMock(return_value=[]) self.input_feeder = MagicMock() self.input_feeder.lstm_inputs = [] annotation_0 = MagicMock() annotation_0.identifier = 0 annotation_0.metadata = {'data_source': MagicMock()} annotation_1 = MagicMock() annotation_1.identifier = 1 annotation_1.metadata = {'data_source': MagicMock()} annotation_container_0 = MagicMock() annotation_container_0.values = MagicMock(return_value=[annotation_0]) annotation_container_1 = MagicMock() annotation_container_1.values = MagicMock( return_value=([annotation_1])) self.annotations = [[annotation_container_0], [annotation_container_1]] self.dataset = MagicMock() self.dataset.__iter__.return_value = [ (range(1), self.annotations[0], data, [0]), (range(1), self.annotations[1], data, [1]) ] self.dataset.multi_infer = False self.postprocessor.process_batch = Mock( side_effect=[([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])]) self.postprocessor.process_dataset = Mock( return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) self.postprocessor.full_process = Mock( return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) self.metric = Mock() self.metric.update_metrics_on_batch = Mock(return_value=[{}, {}]) self.metric.profiler = None self.evaluator = ModelEvaluator(self.launcher, self.input_feeder, self.adapter, self.preprocessor, self.postprocessor, self.dataset, self.metric, True, {}) self.evaluator.store_predictions = Mock() self.evaluator.load = Mock(return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])))
def test_adapter_dict_config(self): launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'adapter': {'type': 'classification'}} config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]}) assert len(config_errors) == 1 assert config_errors[0].message == 'datasets section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'models.datasets'
def test_dataset_config_ignore_without_metrics(self): dataset_config = {'name': 'dataset', 'data_source': 'data', 'annotation': 'annotation'} config_errors = ModelEvaluator.validate_config( {'models': [{'datasets': [dataset_config]}]}, delayed_annotation_loading=True ) assert len(config_errors) == 1 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'models.launchers'
def test_input_without_name(self): launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'inputs': [{"type": 'INPUT'}]} config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]}) assert len(config_errors) == 2 assert config_errors[0].message.endswith('input name is not provided') assert config_errors[0].field_uri == 'models.launchers.0.inputs.0' assert config_errors[1].message == 'datasets section is not provided' assert not config_errors[1].entry assert config_errors[1].field_uri == 'models.datasets'
def test_empty_launchers_and_datasets_config(self): config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [], 'datasets': []}]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'models.launchers' assert config_errors[1].message == 'datasets section is not provided' assert not config_errors[1].entry assert config_errors[1].field_uri == 'models.datasets'
def test_unregistered_launcher_config(self): launcher_config = {'framework': 'foo'} config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launcher foo is not unregistered' assert config_errors[0].entry == launcher_config assert config_errors[0].field_uri == 'models.launchers.0' assert config_errors[1].message == 'datasets section is not provided' assert not config_errors[1].entry assert config_errors[1].field_uri == 'models.datasets'
def test_postprocessing_config(self): dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation': 'annotation', 'postprocessing': [{'type': 'resize_prediction_boxes'}] } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 1 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers'
def test_dataset_config_without_metrics(self): dataset_config = {'name': 'dataset', 'data_source': 'data', 'annotation': 'annotation'} config_errors = ModelEvaluator.validate_config({'models': [{'datasets': [dataset_config]}]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'models.launchers' assert config_errors[1].message == 'Metrics are not provided' assert not config_errors[1].entry assert config_errors[1].field_uri == 'models.datasets.0'
def test_unregistered_adapter_config(self): launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'adapter': 'not_classification'} config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]}) assert len(config_errors) == 2 assert config_errors[0].message.startswith('Invalid value "not_classification"') assert config_errors[0].entry == 'not_classification' assert config_errors[0].field_uri.startswith('models.launchers.0') and config_errors[0].field_uri.endswith('adapter') assert config_errors[1].message == 'datasets section is not provided' assert not config_errors[1].entry assert config_errors[1].field_uri == 'models.datasets'
def test_dict_data_reader(self): dataset_config = { 'name': 'dataset', 'annotation': 'annotation', 'metrics': [{'type': 'accuracy'}], 'reader': {'type': 'opencv_imread'}, 'data_source': 'data' } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 1 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers'
def test_data_source_does_not_exists(self): dataset_config = {'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data_dir'} config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 3 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[-1].message == 'Invalid value "data_dir" for datasets.0.data_source: path does not exist' assert config_errors[-1].entry == 'data_dir' assert config_errors[-1].field_uri == 'datasets.0.data_source'
def test_data_reader_ignore_without_data_source(self): dataset_config = {'name': 'dataset', 'annotation': 'annotation', 'metrics': [{'type': 'accuracy'}]} config_errors = ModelEvaluator.validate_config({'models': [{'datasets': [dataset_config]}]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'models.launchers' assert config_errors[1].message == 'Invalid value "None" for models.datasets.0.data_source: models.datasets.0.data_source is not allowed to be None' assert not config_errors[1].entry assert config_errors[1].field_uri == 'models.datasets.0.data_source'
def test_annotation_conversion_config(self): conversion_parameters = {'converter': 'imagenet', 'annotation_file': 'file'} dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation_conversion': conversion_parameters } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 1 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers'
def test_annotation_is_not_provided(self): dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data' } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == 'annotation_conversion or annotation field should be provided' assert config_errors[1].entry == dataset_config assert config_errors[1].field_uri == 'datasets.0'
def test_data_source_is_file(self): dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'annotation': 'annotation', 'data_source': 'data' } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == 'Invalid value "data" for datasets.0.data_source: path is not a directory' assert config_errors[1].entry == 'data' assert config_errors[1].field_uri == 'datasets.0.data_source'
def test_unregistered_data_reader(self): dataset_config = { 'name': 'dataset', 'annotation': 'annotation', 'metrics': [{'type': 'accuracy'}], 'reader': 'unknown' } config_errors = ModelEvaluator.validate_config({'models': [{'datasets': [dataset_config]}]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'models.launchers' assert config_errors[1].message.startswith('Invalid value "unknown" for models.datasets.0.reader') assert config_errors[-1].entry == 'unknown' assert config_errors[-1].field_uri == 'models.datasets.0.reader'
def test_annotation_conversion_without_converter(self): dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation_conversion': {} } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == 'converter is not found' assert config_errors[1].entry == {} assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
def test_common_validation_scheme(self): validation_scheme = ModelEvaluator.validation_scheme() assert isinstance(validation_scheme, dict) assert len(validation_scheme) == 1 assert 'models' in validation_scheme assert len(validation_scheme['models']) == 1 assert contains_all(validation_scheme['models'][0], ['name', 'launchers', 'datasets']) assert isinstance(validation_scheme['models'][0]['name'], StringField) model_validation_scheme = validation_scheme['models'][0] assert model_validation_scheme['launchers'].__name__ == Launcher.__name__ assert model_validation_scheme['datasets'].__name__ == Dataset.__name__ assert isinstance(model_validation_scheme['launchers'].validation_scheme(), list) assert isinstance(model_validation_scheme['datasets'].validation_scheme(), list)
def test_postprocessing_config_extra_parameter(self): postprocessing_config = [{'type': 'resize_prediction_boxes', 'something_extra': True}] dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation': 'annotation', 'postprocessing': postprocessing_config } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == "datasets.0.postprocessing.0 specifies unknown options: ['something_extra']" assert config_errors[1].entry == postprocessing_config[0] assert config_errors[1].field_uri == 'datasets.0.postprocessing.0'
def test_postprocessing_config_unknown_parameter(self): postprocessing_config = [{'type': 'bgr_to_rgb'}] dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation': 'annotation', 'postprocessing': postprocessing_config } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == "postprocessor bgr_to_rgb unregistered" assert config_errors[1].entry == postprocessing_config[0] assert config_errors[1].field_uri == 'datasets.0.postprocessing.0'
def test_annotation_conversion_extra_parameter(self): conversion_parameters = {'converter': 'imagenet', 'annotation_file': 'file', 'something_extra': 'extra'} dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation_conversion': conversion_parameters } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == "datasets.0.annotation_conversion specifies unknown options: ['something_extra']" assert config_errors[1].entry == conversion_parameters assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
def test_annotation_conversion_missed_parameter(self): conversion_parameters = {'converter': 'imagenet'} dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation_conversion': conversion_parameters } config_errors = ModelEvaluator.validate_config({'datasets': [dataset_config]}) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == 'Invalid config for datasets.0.annotation_conversion: missing required fields: annotation_file' assert config_errors[1].entry == conversion_parameters assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
def test_preprocessing_config_unknown_type_with_postponed_annotation(self): preprocessing_config = [{'type': 'bgr_to_rgb'}, {'type': 'unknown', 'size': 224}] dataset_config = { 'name': 'dataset', 'metrics': [{'type': 'accuracy'}], 'data_source': 'data', 'annotation': 'annotation', 'preprocessing': preprocessing_config } config_errors = ModelEvaluator.validate_config( {'datasets': [dataset_config]}, delayed_annotation_loading=True ) assert len(config_errors) == 2 assert config_errors[0].message == 'launchers section is not provided' assert not config_errors[0].entry assert config_errors[0].field_uri == 'launchers' assert config_errors[1].message == 'preprocessor unknown unregistered' assert config_errors[1].entry == preprocessing_config[1] assert config_errors[1].field_uri == 'datasets.0.preprocessing.1'
class TestModelEvaluatorAsync: def setup_method(self): self.launcher = MagicMock() self.launcher.get_async_requests = Mock(return_value=[]) data = MagicMock(data=MagicMock(), metadata=MagicMock(), identifier=0) self.preprocessor = Mock() self.preprocessor.process = Mock(return_value=data) self.postprocessor = Mock() self.adapter = MagicMock(return_value=[]) self.input_feeder = MagicMock() self.input_feeder.lstm_inputs = [] annotation_0 = MagicMock() annotation_0.identifier = 0 annotation_0.metadata = {'data_source': MagicMock()} annotation_1 = MagicMock() annotation_1.identifier = 1 annotation_1.metadata = {'data_source': MagicMock()} annotation_container_0 = MagicMock() annotation_container_0.values = MagicMock(return_value=[annotation_0]) annotation_container_1 = MagicMock() annotation_container_1.values = MagicMock( return_value=([annotation_1])) self.annotations = [[annotation_container_0], [annotation_container_1]] self.dataset = MagicMock() self.dataset.__iter__.return_value = [ (range(1), self.annotations[0], data, [0]), (range(1), self.annotations[1], data, [1]) ] self.dataset.multi_infer = False self.postprocessor.process_batch = Mock( side_effect=[([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])]) self.postprocessor.process_dataset = Mock( return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) self.postprocessor.full_process = Mock( return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) self.metric = Mock() self.metric.update_metrics_on_batch = Mock(return_value=[{}, {}]) self.metric.profiler = None self.evaluator = ModelEvaluator(self.launcher, self.input_feeder, self.adapter, self.preprocessor, self.postprocessor, self.dataset, self.metric, True, {}) self.evaluator.store_predictions = Mock() self.evaluator.load = Mock(return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) def test_process_dataset_without_storing_predictions_and_dataset_processors( self): self.postprocessor.has_dataset_processors = False self.launcher.allow_reshape_input = False self.preprocessor.has_multi_infer_transformations = False self.launcher.dyn_input_layers = False self.evaluator.process_dataset(None, None) assert not self.evaluator.store_predictions.called assert not self.evaluator.load.called assert not self.launcher.predict.called assert self.launcher.get_infer_queue.called def test_process_dataset_with_storing_predictions_and_without_dataset_processors( self): self.postprocessor.has_dataset_processors = False self.launcher.allow_reshape_input = False self.preprocessor.has_multi_infer_transformations = False self.dataset.multi_infer = False self.launcher.dyn_input_layers = False self.evaluator.process_dataset('path', None) assert not self.evaluator.load.called assert not self.launcher.predict.called assert self.launcher.get_infer_queue.called def test_process_dataset_with_loading_predictions_and_without_dataset_processors( self, mocker): mocker.patch( 'openvino.tools.accuracy_checker.evaluators.model_evaluator.get_path' ) self.postprocessor.has_dataset_processors = False self.evaluator.process_dataset('path', None) assert self.evaluator.load.called assert not self.launcher.predict.called assert not self.launcher.predict_async.called assert not self.postprocessor.process_batch.called assert self.metric.update_metrics_on_batch.call_count == 1 assert not self.postprocessor.process_dataset.called assert self.postprocessor.full_process.called def test_switch_to_sync_predict_if_need_reshaping(self): self.postprocessor.has_dataset_processors = False self.launcher.allow_reshape_input = True self.launcher.dynamic_shapes_policy = 'static' self.preprocessor.has_multi_infer_transformations = False self.evaluator.process_dataset(None, None) assert not self.evaluator.store_predictions.called assert not self.evaluator.load.called assert self.launcher.predict.called assert not self.launcher.predict_async.called assert self.metric.update_metrics_on_batch.call_count == len( self.annotations) def test_switch_to_sync_predict_if_need_multi_infer_after_preprocessing( self): self.postprocessor.has_dataset_processors = False self.launcher.allow_reshape_input = False self.preprocessor.has_multi_infer_transformations = True self.evaluator.process_dataset(None, None) assert not self.evaluator.store_predictions.called assert not self.evaluator.load.called assert self.launcher.predict.called assert not self.launcher.predict_async.called assert self.metric.update_metrics_on_batch.call_count == len( self.annotations) def test_switch_to_sync_predict_if_need_multi_infer(self): self.postprocessor.has_dataset_processors = False self.launcher.allow_reshape_input = False self.preprocessor.has_multi_infer_transformations = False self.dataset.multi_infer = True self.evaluator.process_dataset(None, None) assert not self.evaluator.store_predictions.called assert not self.evaluator.load.called assert self.launcher.predict.called assert not self.launcher.predict_async.called assert self.metric.update_metrics_on_batch.call_count == len( self.annotations)
class TestModelEvaluator: def setup_method(self): self.launcher = Mock() self.launcher.predict.return_value = [] data = MagicMock(data=MagicMock(), metadata=MagicMock(), identifier=0) self.preprocessor = Mock() self.preprocessor.process = Mock(return_value=data) self.postprocessor = Mock() self.adapter = MagicMock(return_value=[]) self.input_feeder = Mock() annotation_0 = MagicMock() annotation_0.identifier = 0 annotation_0.metadata = {'data_source': MagicMock()} annotation_1 = MagicMock() annotation_1.identifier = 1 annotation_1.metadata = {'data_source': MagicMock()} annotation_container_0 = MagicMock() annotation_container_0.values = MagicMock(return_value=[annotation_0]) annotation_container_1 = MagicMock() annotation_container_1.values = MagicMock( return_value=([annotation_1])) self.annotations = [[annotation_container_0], [annotation_container_1]] self.dataset = MagicMock() self.dataset.__iter__.return_value = [ (range(1), self.annotations[0], data, [0]), (range(1), self.annotations[1], data, [1]) ] self.postprocessor.process_batch = Mock( side_effect=[([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])]) self.postprocessor.process_dataset = Mock( return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) self.postprocessor.full_process = Mock( return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) self.metric = Mock() self.metric.update_metrics_on_batch = Mock(return_value=[{}, {}]) self.metric.profiler = None self.evaluator = ModelEvaluator(self.launcher, self.input_feeder, self.adapter, self.preprocessor, self.postprocessor, self.dataset, self.metric, False, {}) self.evaluator.store_predictions = Mock() self.evaluator.load = Mock(return_value=(([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1]))) def test_process_dataset_without_storing_predictions_and_dataset_processors( self): self.postprocessor.has_dataset_processors = False self.evaluator.process_dataset(None, None) assert not self.evaluator.store_predictions.called assert not self.evaluator.load.called assert self.launcher.predict.called assert self.postprocessor.process_batch.called assert self.metric.update_metrics_on_batch.call_count == len( self.annotations) assert not self.postprocessor.process_dataset.called assert not self.postprocessor.full_process.called def test_process_dataset_with_storing_predictions_and_without_dataset_processors( self): self.postprocessor.has_dataset_processors = False self.evaluator.process_dataset('path', None) assert self.evaluator.store_predictions.called assert not self.evaluator.load.called assert self.launcher.predict.called assert self.postprocessor.process_batch.called assert self.metric.update_metrics_on_batch.call_count == len( self.annotations) assert not self.postprocessor.process_dataset.called assert not self.postprocessor.full_process.called def test_process_dataset_store_only(self): self.postprocessor.has_dataset_processors = False self.evaluator.process_dataset('path', None, store_only=True) assert self.evaluator.store_predictions.called assert not self.evaluator.load.called assert self.launcher.predict.called assert not self.postprocessor.process_batch.called assert not self.metric.update_metrics_on_batch.called assert not self.postprocessor.process_dataset.called assert not self.postprocessor.full_process.called def test_process_dataset_with_loading_predictions_and_without_dataset_processors( self, mocker): mocker.patch( 'openvino.tools.accuracy_checker.evaluators.model_evaluator.get_path' ) self.postprocessor.has_dataset_processors = False self.evaluator.process_dataset('path', None) assert self.evaluator.load.called assert not self.launcher.predict.called assert not self.postprocessor.process_batch.called assert self.metric.update_metrics_on_batch.call_count == 1 assert not self.postprocessor.process_dataset.called assert self.postprocessor.full_process.called def test_process_dataset_with_loading_predictions_and_with_dataset_processors( self, mocker): mocker.patch( 'openvino.tools.accuracy_checker.evaluators.model_evaluator.get_path' ) self.postprocessor.has_dataset_processors = True self.evaluator.process_dataset('path', None) assert not self.evaluator.store_predictions.called assert self.evaluator.load.called assert not self.launcher.predict.called assert not self.postprocessor.process_batch.called assert self.metric.update_metrics_on_batch.call_count == 1 assert not self.postprocessor.process_dataset.called assert self.postprocessor.full_process.called
def test_ignore_dataset_config(self): launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu'} config_errors = ModelEvaluator.validate_config( {'models': [{'launchers': [launcher_config], 'datasets': []}]}, delayed_annotation_loading=True ) assert not config_errors