def setup_method(self):
        self.launcher = MagicMock()
        infer_request = MagicMock()
        infer_request.wait = Mock(return_value=0)
        infer_request.outputs = Mock()
        self.launcher.infer_requests = [infer_request]
        data = MagicMock(data=MagicMock(), metadata=MagicMock(), identifier=0)
        self.preprocessor = Mock()
        self.preprocessor.process = Mock(return_value=data)
        self.postprocessor = Mock()
        self.adapter = MagicMock(return_value=[])
        self.input_feeder = Mock()
        self.data_reader = Mock(return_value=data)
        self.data_reader.data_source = 'source'

        annotation_0 = MagicMock()
        annotation_0.identifier = 0
        annotation_0.metadata = {'data_source': MagicMock()}
        annotation_1 = MagicMock()
        annotation_1.identifier = 1
        annotation_1.metadata = {'data_source': MagicMock()}
        annotation_container_0 = MagicMock()
        annotation_container_0.values = MagicMock(return_value=[annotation_0])
        annotation_container_1 = MagicMock()
        annotation_container_1.values = MagicMock(return_value=([annotation_1]))
        self.annotations = [[annotation_container_0], [annotation_container_1]]

        self.dataset = MagicMock()
        self.dataset.__iter__.return_value = self.annotations

        self.postprocessor.process_batch = Mock(side_effect=[
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ])
        self.postprocessor.process_dataset = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))
        self.postprocessor.full_process = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))

        self.metric = Mock()
        self.metric.update_metrics_on_batch = Mock()

        self.evaluator = ModelEvaluator(
            self.launcher,
            self.input_feeder,
            self.adapter,
            self.data_reader,
            self.preprocessor,
            self.postprocessor,
            self.dataset,
            self.metric,
            True
        )
        self.evaluator.store_predictions = Mock()
        self.evaluator.load = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))
    def setup_method(self):
        self.launcher = MagicMock()
        self.launcher.get_async_requests = Mock(return_value=[])
        data = MagicMock(data=MagicMock(), metadata=MagicMock(), identifier=0)
        self.preprocessor = Mock()
        self.preprocessor.process = Mock(return_value=data)
        self.postprocessor = Mock()
        self.adapter = MagicMock(return_value=[])
        self.input_feeder = MagicMock()
        self.input_feeder.lstm_inputs = []

        annotation_0 = MagicMock()
        annotation_0.identifier = 0
        annotation_0.metadata = {'data_source': MagicMock()}
        annotation_1 = MagicMock()
        annotation_1.identifier = 1
        annotation_1.metadata = {'data_source': MagicMock()}
        annotation_container_0 = MagicMock()
        annotation_container_0.values = MagicMock(return_value=[annotation_0])
        annotation_container_1 = MagicMock()
        annotation_container_1.values = MagicMock(return_value=([annotation_1]))
        self.annotations = [[annotation_container_0], [annotation_container_1]]

        self.dataset = MagicMock()
        self.dataset.__iter__.return_value = [
            (range(1), self.annotations[0], data, [0]),
            (range(1), self.annotations[1], data, [1])]
        self.dataset.multi_infer = False

        self.postprocessor.process_batch = Mock(side_effect=[
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ])
        self.postprocessor.process_dataset = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))
        self.postprocessor.full_process = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))

        self.metric = Mock()
        self.metric.update_metrics_on_batch = Mock(return_value=[{}, {}])

        self.evaluator = ModelEvaluator(
            self.launcher,
            self.input_feeder,
            self.adapter,
            self.preprocessor,
            self.postprocessor,
            self.dataset,
            self.metric,
            True,
            {}
        )
        self.evaluator.store_predictions = Mock()
        self.evaluator.load = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))
Ejemplo n.º 3
0
 def test_preprocessing_config_extra_parameter(self):
     preprocessing_config = [{
         'type': 'bgr_to_rgb'
     }, {
         'type': 'resize',
         'size': 224,
         'something_extra': True
     }]
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation': 'annotation',
         'preprocessing': preprocessing_config
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == "datasets.0.preprocessing.1 specifies unknown options: ['something_extra']"
     assert config_errors[1].entry == preprocessing_config[1]
     assert config_errors[1].field_uri == 'datasets.0.preprocessing.1'
Ejemplo n.º 4
0
 def test_preprocessing_config_unknown_parameter(self):
     preprocessing_config = [{
         'type': 'bgr_to_rgb'
     }, {
         'type': 'not_resize',
         'size': 224
     }]
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation': 'annotation',
         'preprocessing': preprocessing_config
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == "preprocessor not_resize unregistered"
     assert config_errors[1].entry == preprocessing_config[1]
     assert config_errors[1].field_uri == 'datasets.0.preprocessing.1'
Ejemplo n.º 5
0
 def test_empty_config(self):
     config_errors = ModelEvaluator.validate_config({'models': [{}]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.launchers'
     assert config_errors[1].message == 'datasets section is not provided'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets'
Ejemplo n.º 6
0
 def test_unregistered_launcher_config(self):
     launcher_config = {'framework': 'foo'}
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'launchers': [launcher_config],
             'datasets': []
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launcher foo is not unregistered'
     assert config_errors[0].entry == launcher_config
     assert config_errors[0].field_uri == 'models.launchers.0'
     assert config_errors[1].message == 'datasets section is not provided'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets'
Ejemplo n.º 7
0
 def test_valid_launcher_config(self):
     launcher_config = {
         'model': 'foo',
         'framework': 'dlsdk',
         'device': 'cpu'
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'launchers': [launcher_config],
             'datasets': []
         }]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'datasets section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.datasets'
Ejemplo n.º 8
0
 def test_str_data_reader(self):
     dataset_config = {
         'name': 'dataset',
         'annotation': 'annotation',
         'metrics': [{
             'type': 'accuracy'
         }],
         'reader': 'opencv_imread',
         'data_source': 'data'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
Ejemplo n.º 9
0
 def test_dataset_config_without_metrics(self):
     dataset_config = {
         'name': 'dataset',
         'data_source': 'data',
         'annotation': 'annotation'
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'datasets': [dataset_config]
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.launchers'
     assert config_errors[1].message == 'Metrics are not provided'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets.0'
Ejemplo n.º 10
0
 def test_common_validation_scheme(self):
     validation_scheme = ModelEvaluator.validation_scheme()
     assert isinstance(validation_scheme, dict)
     assert len(validation_scheme) == 1
     assert 'models' in validation_scheme
     assert len(validation_scheme['models']) == 1
     assert contains_all(validation_scheme['models'][0],
                         ['name', 'launchers', 'datasets'])
     assert isinstance(validation_scheme['models'][0]['name'], StringField)
     model_validation_scheme = validation_scheme['models'][0]
     assert model_validation_scheme[
         'launchers'].__name__ == Launcher.__name__
     assert model_validation_scheme['datasets'].__name__ == Dataset.__name__
     assert isinstance(
         model_validation_scheme['launchers'].validation_scheme(), list)
     assert isinstance(
         model_validation_scheme['datasets'].validation_scheme(), list)
Ejemplo n.º 11
0
 def test_data_source_does_not_exists(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data_dir'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 3
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         -1].message == 'Invalid value "data_dir" for datasets.0.data_source: path does not exist'
     assert config_errors[-1].entry == 'data_dir'
     assert config_errors[-1].field_uri == 'datasets.0.data_source'
Ejemplo n.º 12
0
 def test_annotation_conversion_without_converter(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': {}
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[1].message == 'converter is not found'
     assert config_errors[1].entry == {}
     assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
Ejemplo n.º 13
0
 def test_postprocessing_config(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation': 'annotation',
         'postprocessing': [{
             'type': 'resize_prediction_boxes'
         }]
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
Ejemplo n.º 14
0
 def test_annotation_is_not_provided(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == 'annotation_conversion or annotation field should be provided'
     assert config_errors[1].entry == dataset_config
     assert config_errors[1].field_uri == 'datasets.0'
Ejemplo n.º 15
0
 def test_annotation_conversion_config(self):
     conversion_parameters = {
         'converter': 'imagenet',
         'annotation_file': 'file'
     }
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': conversion_parameters
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
Ejemplo n.º 16
0
 def test_data_source_is_file(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'annotation': 'annotation',
         'data_source': 'data'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == 'Invalid value "data" for datasets.0.data_source: path is not a directory'
     assert config_errors[1].entry == 'data'
     assert config_errors[1].field_uri == 'datasets.0.data_source'
Ejemplo n.º 17
0
 def test_postprocessing_config_unknown_type(self):
     postprocessing_config = [{'type': 'unknown', 'size': 224}]
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation': 'annotation',
         'postprocessing': postprocessing_config
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[1].message == 'postprocessor unknown unregistered'
     assert config_errors[1].entry == postprocessing_config[0]
     assert config_errors[1].field_uri == 'datasets.0.postprocessing.0'
Ejemplo n.º 18
0
 def test_input_without_name(self):
     launcher_config = {
         'model': 'foo',
         'framework': 'dlsdk',
         'device': 'cpu',
         'inputs': [{
             "type": 'INPUT'
         }]
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'launchers': [launcher_config],
             'datasets': []
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message.endswith('input name is not provided')
     assert config_errors[0].field_uri == 'models.launchers.0.inputs.0'
     assert config_errors[1].message == 'datasets section is not provided'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets'
Ejemplo n.º 19
0
 def test_annotation_conversion_missed_parameter(self):
     conversion_parameters = {'converter': 'imagenet'}
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': conversion_parameters
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == 'Invalid config for datasets.0.annotation_conversion: missing required fields: annotation_file'
     assert config_errors[1].entry == conversion_parameters
     assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
Ejemplo n.º 20
0
 def test_data_reader_without_data_source(self):
     dataset_config = {
         'name': 'dataset',
         'annotation': 'annotation',
         'metrics': [{
             'type': 'accuracy'
         }]
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'datasets': [dataset_config]
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.launchers'
     assert config_errors[
         1].message == 'Invalid value "None" for models.datasets.0.data_source: models.datasets.0.data_source is not allowed to be None'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets.0.data_source'
Ejemplo n.º 21
0
 def test_unregistered_data_reader(self):
     dataset_config = {
         'name': 'dataset',
         'annotation': 'annotation',
         'metrics': [{
             'type': 'accuracy'
         }],
         'reader': 'unknown'
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'datasets': [dataset_config]
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.launchers'
     assert config_errors[1].message.startswith(
         'Invalid value "unknown" for models.datasets.0.reader')
     assert config_errors[-1].entry == 'unknown'
     assert config_errors[-1].field_uri == 'models.datasets.0.reader'
Ejemplo n.º 22
0
 def test_unregistered_adapter_config(self):
     launcher_config = {
         'model': 'foo',
         'framework': 'dlsdk',
         'device': 'cpu',
         'adapter': 'not_classification'
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'launchers': [launcher_config],
             'datasets': []
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message.startswith(
         'Invalid value "not_classification"')
     assert config_errors[0].entry == 'not_classification'
     assert config_errors[0].field_uri.startswith(
         'models.launchers.0') and config_errors[0].field_uri.endswith(
             'adapter')
     assert config_errors[1].message == 'datasets section is not provided'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets'
Ejemplo n.º 23
0
 def test_annotation_conversion_extra_parameter(self):
     conversion_parameters = {
         'converter': 'imagenet',
         'annotation_file': 'file',
         'something_extra': 'extra'
     }
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': conversion_parameters
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == "datasets.0.annotation_conversion specifies unknown options: ['something_extra']"
     assert config_errors[1].entry == conversion_parameters
     assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
Ejemplo n.º 24
0
class TestModelEvaluator:
    def setup_method(self):
        self.launcher = Mock()
        self.launcher.predict.return_value = []
        data = MagicMock(data=MagicMock(), metadata=MagicMock(), identifier=0)
        self.preprocessor = Mock()
        self.preprocessor.process = Mock(return_value=data)
        self.postprocessor = Mock()
        self.adapter = MagicMock(return_value=[])
        self.input_feeder = Mock()
        self.data_reader = Mock(return_value=data)
        self.data_reader.data_source = 'source'

        annotation_0 = MagicMock()
        annotation_0.identifier = 0
        annotation_0.metadata = {'data_source': MagicMock()}
        annotation_1 = MagicMock()
        annotation_1.identifier = 1
        annotation_1.metadata = {'data_source': MagicMock()}
        annotation_container_0 = MagicMock()
        annotation_container_0.values = MagicMock(return_value=[annotation_0])
        annotation_container_1 = MagicMock()
        annotation_container_1.values = MagicMock(return_value=([annotation_1]))
        self.annotations = [[annotation_container_0], [annotation_container_1]]

        self.dataset = MagicMock()
        self.dataset.__iter__.return_value = [(range(1), self.annotations[0]), (range(1), self.annotations[1])]

        self.postprocessor.process_batch = Mock(side_effect=[
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ])
        self.postprocessor.process_dataset = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))
        self.postprocessor.full_process = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))

        self.metric = Mock()
        self.metric.update_metrics_on_batch = Mock()

        self.evaluator = ModelEvaluator(
            self.launcher,
            self.input_feeder,
            self.adapter,
            self.data_reader,
            self.preprocessor,
            self.postprocessor,
            self.dataset,
            self.metric,
            False
        )
        self.evaluator.store_predictions = Mock()
        self.evaluator.load = Mock(return_value=(
            ([annotation_container_0], [annotation_container_0]), ([annotation_container_1], [annotation_container_1])
        ))

    def test_process_dataset_without_storing_predictions_and_dataset_processors(self):
        self.postprocessor.has_dataset_processors = False

        self.evaluator.process_dataset(None, None)

        assert not self.evaluator.store_predictions.called
        assert not self.evaluator.load.called
        assert self.launcher.predict.called
        assert self.postprocessor.process_batch.called
        assert self.metric.update_metrics_on_batch.call_count == len(self.annotations)
        assert not self.postprocessor.process_dataset.called
        assert not self.postprocessor.full_process.called

    def test_process_dataset_with_storing_predictions_and_without_dataset_processors(self):
        self.postprocessor.has_dataset_processors = False

        self.evaluator.process_dataset('path', None)

        assert self.evaluator.store_predictions.called
        assert not self.evaluator.load.called
        assert self.launcher.predict.called
        assert self.postprocessor.process_batch.called
        assert self.metric.update_metrics_on_batch.call_count == len(self.annotations)
        assert not self.postprocessor.process_dataset.called
        assert not self.postprocessor.full_process.called

    def test_process_dataset_with_loading_predictions_and_without_dataset_processors(self, mocker):
        mocker.patch('accuracy_checker.evaluators.model_evaluator.get_path')
        self.postprocessor.has_dataset_processors = False

        self.evaluator.process_dataset('path', None)

        assert self.evaluator.load.called
        assert not self.launcher.predict.called
        assert not self.postprocessor.process_batch.called
        assert self.metric.update_metrics_on_batch.call_count == 1
        assert not self.postprocessor.process_dataset.called
        assert self.postprocessor.full_process.called

    def test_process_dataset_with_loading_predictions_and_with_dataset_processors(self, mocker):
        mocker.patch('accuracy_checker.evaluators.model_evaluator.get_path')
        self.postprocessor.has_dataset_processors = True

        self.evaluator.process_dataset('path', None)

        assert not self.evaluator.store_predictions.called
        assert self.evaluator.load.called
        assert not self.launcher.predict.called
        assert not self.postprocessor.process_batch.called
        assert self.metric.update_metrics_on_batch.call_count == 1
        assert not self.postprocessor.process_dataset.called
        assert self.postprocessor.full_process.called
Ejemplo n.º 25
0
class TestModelEvaluatorAsync:
    def setup_method(self):
        self.launcher = MagicMock()
        self.launcher.get_async_requests = Mock(return_value=[])
        data = MagicMock(data=MagicMock(), metadata=MagicMock(), identifier=0)
        self.preprocessor = Mock()
        self.preprocessor.process = Mock(return_value=data)
        self.postprocessor = Mock()
        self.adapter = MagicMock(return_value=[])
        self.input_feeder = MagicMock()
        self.input_feeder.lstm_inputs = []

        annotation_0 = MagicMock()
        annotation_0.identifier = 0
        annotation_0.metadata = {'data_source': MagicMock()}
        annotation_1 = MagicMock()
        annotation_1.identifier = 1
        annotation_1.metadata = {'data_source': MagicMock()}
        annotation_container_0 = MagicMock()
        annotation_container_0.values = MagicMock(return_value=[annotation_0])
        annotation_container_1 = MagicMock()
        annotation_container_1.values = MagicMock(
            return_value=([annotation_1]))
        self.annotations = [[annotation_container_0], [annotation_container_1]]

        self.dataset = MagicMock()
        self.dataset.__iter__.return_value = [
            (range(1), self.annotations[0], data, [0]),
            (range(1), self.annotations[1], data, [1])
        ]
        self.dataset.multi_infer = False

        self.postprocessor.process_batch = Mock(
            side_effect=[([annotation_container_0], [annotation_container_0]),
                         ([annotation_container_1], [annotation_container_1])])
        self.postprocessor.process_dataset = Mock(
            return_value=(([annotation_container_0], [annotation_container_0]),
                          ([annotation_container_1],
                           [annotation_container_1])))
        self.postprocessor.full_process = Mock(
            return_value=(([annotation_container_0], [annotation_container_0]),
                          ([annotation_container_1],
                           [annotation_container_1])))

        self.metric = Mock()
        self.metric.update_metrics_on_batch = Mock(return_value=[{}, {}])

        self.evaluator = ModelEvaluator(self.launcher, self.input_feeder,
                                        self.adapter, self.preprocessor,
                                        self.postprocessor, self.dataset,
                                        self.metric, True)
        self.evaluator.store_predictions = Mock()
        self.evaluator.load = Mock(return_value=(([annotation_container_0],
                                                  [annotation_container_0]),
                                                 ([annotation_container_1],
                                                  [annotation_container_1])))

    def test_process_dataset_without_storing_predictions_and_dataset_processors(
            self):
        self.postprocessor.has_dataset_processors = False
        self.launcher.allow_reshape_input = False
        self.preprocessor.has_multi_infer_transformations = False

        self.evaluator.process_dataset(None, None)

        assert not self.evaluator.store_predictions.called
        assert not self.evaluator.load.called
        assert not self.launcher.predict.called
        assert self.launcher.get_async_requests.called

    def test_process_dataset_with_storing_predictions_and_without_dataset_processors(
            self):
        self.postprocessor.has_dataset_processors = False
        self.launcher.allow_reshape_input = False
        self.preprocessor.has_multi_infer_transformations = False
        self.dataset.multi_infer = False

        self.evaluator.process_dataset('path', None)

        assert not self.evaluator.load.called
        assert not self.launcher.predict.called
        assert self.launcher.get_async_requests.called

    def test_process_dataset_with_loading_predictions_and_without_dataset_processors(
            self, mocker):
        mocker.patch('accuracy_checker.evaluators.model_evaluator.get_path')
        self.postprocessor.has_dataset_processors = False

        self.evaluator.process_dataset('path', None)

        assert self.evaluator.load.called
        assert not self.launcher.predict.called
        assert not self.launcher.predict_async.called
        assert not self.postprocessor.process_batch.called
        assert self.metric.update_metrics_on_batch.call_count == 1
        assert not self.postprocessor.process_dataset.called
        assert self.postprocessor.full_process.called

    def test_switch_to_sync_predict_if_need_reshaping(self):
        self.postprocessor.has_dataset_processors = False
        self.launcher.allow_reshape_input = True
        self.preprocessor.has_multi_infer_transformations = False

        self.evaluator.process_dataset(None, None)

        assert not self.evaluator.store_predictions.called
        assert not self.evaluator.load.called
        assert self.launcher.predict.called
        assert not self.launcher.predict_async.called
        assert self.metric.update_metrics_on_batch.call_count == len(
            self.annotations)

    def test_switch_to_sync_predict_if_need_multi_infer_after_preprocessing(
            self):
        self.postprocessor.has_dataset_processors = False
        self.launcher.allow_reshape_input = False
        self.preprocessor.has_multi_infer_transformations = True

        self.evaluator.process_dataset(None, None)

        assert not self.evaluator.store_predictions.called
        assert not self.evaluator.load.called
        assert self.launcher.predict.called
        assert not self.launcher.predict_async.called
        assert self.metric.update_metrics_on_batch.call_count == len(
            self.annotations)

    def test_switch_to_sync_predict_if_need_multi_infer(self):
        self.postprocessor.has_dataset_processors = False
        self.launcher.allow_reshape_input = False
        self.preprocessor.has_multi_infer_transformations = False
        self.dataset.multi_infer = True

        self.evaluator.process_dataset(None, None)

        assert not self.evaluator.store_predictions.called
        assert not self.evaluator.load.called
        assert self.launcher.predict.called
        assert not self.launcher.predict_async.called
        assert self.metric.update_metrics_on_batch.call_count == len(
            self.annotations)