Beispiel #1
0
 def test_postprocessing_config(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation': 'annotation',
         'postprocessing': [{
             'type': 'resize_prediction_boxes'
         }]
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
Beispiel #2
0
 def test_annotation_conversion_without_converter(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': {}
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[1].message == 'converter is not found'
     assert config_errors[1].entry == {}
     assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
Beispiel #3
0
 def test_dict_data_reader(self):
     dataset_config = {
         'name': 'dataset',
         'annotation': 'annotation',
         'metrics': [{
             'type': 'accuracy'
         }],
         'reader': {
             'type': 'opencv_imread'
         },
         'data_source': 'data'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
Beispiel #4
0
 def test_annotation_is_not_provided(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == 'annotation_conversion or annotation field should be provided'
     assert config_errors[1].entry == dataset_config
     assert config_errors[1].field_uri == 'datasets.0'
Beispiel #5
0
 def test_data_source_does_not_exists(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data_dir'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 3
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         -1].message == 'Invalid value "data_dir" for datasets.0.data_source: path does not exist'
     assert config_errors[-1].entry == 'data_dir'
     assert config_errors[-1].field_uri == 'datasets.0.data_source'
Beispiel #6
0
 def test_adapter_dict_config(self):
     launcher_config = {
         'model': 'foo',
         'framework': 'dlsdk',
         'device': 'cpu',
         'adapter': {
             'type': 'classification'
         }
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'launchers': [launcher_config],
             'datasets': []
         }]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'datasets section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.datasets'
Beispiel #7
0
 def test_annotation_conversion_config(self):
     conversion_parameters = {
         'converter': 'imagenet',
         'annotation_file': 'file'
     }
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': conversion_parameters
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 1
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
Beispiel #8
0
 def test_data_source_is_file(self):
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'annotation': 'annotation',
         'data_source': 'data'
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == 'Invalid value "data" for datasets.0.data_source: path is not a directory'
     assert config_errors[1].entry == 'data'
     assert config_errors[1].field_uri == 'datasets.0.data_source'
Beispiel #9
0
 def test_postprocessing_config_unknown_type(self):
     postprocessing_config = [{'type': 'unknown', 'size': 224}]
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation': 'annotation',
         'postprocessing': postprocessing_config
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[1].message == 'postprocessor unknown unregistered'
     assert config_errors[1].entry == postprocessing_config[0]
     assert config_errors[1].field_uri == 'datasets.0.postprocessing.0'
Beispiel #10
0
 def test_annotation_conversion_missed_parameter(self):
     conversion_parameters = {'converter': 'imagenet'}
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': conversion_parameters
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == 'Invalid config for datasets.0.annotation_conversion: missing required fields: annotation_file'
     assert config_errors[1].entry == conversion_parameters
     assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'
Beispiel #11
0
 def test_data_reader_without_data_source(self):
     dataset_config = {
         'name': 'dataset',
         'annotation': 'annotation',
         'metrics': [{
             'type': 'accuracy'
         }]
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'datasets': [dataset_config]
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.launchers'
     assert config_errors[
         1].message == 'Invalid value "None" for models.datasets.0.data_source: models.datasets.0.data_source is not allowed to be None'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets.0.data_source'
Beispiel #12
0
 def test_input_without_name(self):
     launcher_config = {
         'model': 'foo',
         'framework': 'dlsdk',
         'device': 'cpu',
         'inputs': [{
             "type": 'INPUT'
         }]
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'launchers': [launcher_config],
             'datasets': []
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message.endswith('input name is not provided')
     assert config_errors[0].field_uri == 'models.launchers.0.inputs.0'
     assert config_errors[1].message == 'datasets section is not provided'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets'
Beispiel #13
0
 def test_unregistered_data_reader(self):
     dataset_config = {
         'name': 'dataset',
         'annotation': 'annotation',
         'metrics': [{
             'type': 'accuracy'
         }],
         'reader': 'unknown'
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'datasets': [dataset_config]
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'models.launchers'
     assert config_errors[1].message.startswith(
         'Invalid value "unknown" for models.datasets.0.reader')
     assert config_errors[-1].entry == 'unknown'
     assert config_errors[-1].field_uri == 'models.datasets.0.reader'
Beispiel #14
0
 def test_unregistered_adapter_config(self):
     launcher_config = {
         'model': 'foo',
         'framework': 'dlsdk',
         'device': 'cpu',
         'adapter': 'not_classification'
     }
     config_errors = ModelEvaluator.validate_config(
         {'models': [{
             'launchers': [launcher_config],
             'datasets': []
         }]})
     assert len(config_errors) == 2
     assert config_errors[0].message.startswith(
         'Invalid value "not_classification"')
     assert config_errors[0].entry == 'not_classification'
     assert config_errors[0].field_uri.startswith(
         'models.launchers.0') and config_errors[0].field_uri.endswith(
             'adapter')
     assert config_errors[1].message == 'datasets section is not provided'
     assert not config_errors[1].entry
     assert config_errors[1].field_uri == 'models.datasets'
Beispiel #15
0
 def test_postprocessing_config_extra_parameter(self):
     postprocessing_config = [{
         'type': 'resize_prediction_boxes',
         'something_extra': True
     }]
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation': 'annotation',
         'postprocessing': postprocessing_config
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == "datasets.0.postprocessing.0 specifies unknown options: ['something_extra']"
     assert config_errors[1].entry == postprocessing_config[0]
     assert config_errors[1].field_uri == 'datasets.0.postprocessing.0'
Beispiel #16
0
 def test_annotation_conversion_extra_parameter(self):
     conversion_parameters = {
         'converter': 'imagenet',
         'annotation_file': 'file',
         'something_extra': 'extra'
     }
     dataset_config = {
         'name': 'dataset',
         'metrics': [{
             'type': 'accuracy'
         }],
         'data_source': 'data',
         'annotation_conversion': conversion_parameters
     }
     config_errors = ModelEvaluator.validate_config(
         {'datasets': [dataset_config]})
     assert len(config_errors) == 2
     assert config_errors[0].message == 'launchers section is not provided'
     assert not config_errors[0].entry
     assert config_errors[0].field_uri == 'launchers'
     assert config_errors[
         1].message == "datasets.0.annotation_conversion specifies unknown options: ['something_extra']"
     assert config_errors[1].entry == conversion_parameters
     assert config_errors[1].field_uri == 'datasets.0.annotation_conversion'