def test_set_quantization_sampling_size_on_empty_config(self) -> None: """Test set_model_path.""" config = Config() config.set_quantization_sampling_size("new sampling size") self.assertIsNone(config.quantization)
def get_predefined_configuration( data: Dict[str, Any], ) -> Union[Dict[str, Any], List[Dict[str, Any]]]: """Get configuration.""" from lpot.ux.utils.utils import get_framework_from_path, get_predefined_config_path from lpot.ux.utils.workload.config import Config model_path = data.get("model_path", "") if not os.path.isfile(model_path): raise ClientErrorException( f"Could not find model in specified path: {model_path}.", ) model_name = Path(model_path).stem domain = data.get("domain", None) if not domain: raise ClientErrorException("Domain is not defined!") framework = get_framework_from_path(model_path) if framework is None: raise ClientErrorException( f"Could not find framework for specified model {model_name} in path {model_path}.", ) config = Config() predefined_config_path = get_predefined_config_path(framework, domain) config.load(predefined_config_path) return { "config": config.serialize(), "framework": framework, "name": model_name, "domain": domain, }
def test_set_performance_batch_size_on_empty_config(self) -> None: """Test set_performance_batch_size.""" config = Config() config.set_performance_batch_size(1234) self.assertIsNone(config.evaluation)
def test_set_model_path(self) -> None: """Test set_model_path.""" config = Config() config.set_model_path("new/model/path") self.assertEqual("new/model/path", config.model_path)
def test_set_optimization_precision_on_empty_config( self, mocked_load_precisions_config: MagicMock, ) -> None: """Test set_optimization_precision.""" mocked_load_precisions_config.return_value = { "framework_foo": [ { "name": "precision1", }, { "name": "precision2", }, { "name": "precision3", }, ], "framework_bar": [ { "name": "precision1", }, ], } config = Config() config.set_optimization_precision("framework_foo", "precision2") self.assertEqual("precision2", config.graph_optimization.precisions) mocked_load_precisions_config.assert_called_once()
def test_set_optimization_precision_to_unknown_framework( self, mocked_load_precisions_config: MagicMock, ) -> None: """Test set_optimization_precision.""" mocked_load_precisions_config.return_value = { "framework_foo": [ { "name": "precision1", }, { "name": "precision2", }, { "name": "precision3", }, ], "framework_bar": [ { "name": "precision1", }, ], } config = Config(self.predefined_config) with self.assertRaisesRegex( ClientErrorException, "Precision precision1 is not supported " "in graph optimization for framework framework_baz.", ): config.set_optimization_precision("framework_baz", "precision1") mocked_load_precisions_config.assert_called_once()
def test_set_outputs(self) -> None: """Test set_model_path.""" config = Config() config.set_outputs(["output1", "output2"]) self.assertEqual(["output1", "output2"], config.model.outputs)
def test_set_transform_on_empty_config(self) -> None: """Test set_transform.""" config = Config() config.set_transform([ { "name": "Some transform1", "params": { "param12": True } }, { "name": "SquadV1", "params": { "param1": True } }, { "name": "Some transform2", "params": { "param123": True } }, ], ) self.assertIsNone(config.evaluation) self.assertIsNone(config.quantization)
def test_set_performance_warmup(self) -> None: """Test set_performance_warmup.""" config = Config(self.predefined_config) config.set_performance_warmup(1234) self.assertEqual(1234, config.evaluation.performance.warmup)
def test_set_accuracy_goal_on_empty_config(self) -> None: """Test set_accuracy_goal.""" config = Config() config.set_accuracy_goal(1234) self.assertIsNone(config.tuning.accuracy_criterion.relative)
def test_set_accuracy_goal(self) -> None: """Test set_accuracy_goal.""" config = Config(self.predefined_config) config.set_accuracy_goal(1234) self.assertEqual(1234, config.tuning.accuracy_criterion.relative)
def test_set_quantization_approach_on_empty_config(self) -> None: """Test set_quantization_approach.""" config = Config() config.set_quantization_approach("Some quantization approach") self.assertIsNone(config.quantization)
def test_set_quantization_dataset_path_on_empty_config(self) -> None: """Test set_quantization_dataset_path on empty config.""" config = Config() config.set_quantization_dataset_path("new dataset path") self.assertIsNone(config.quantization)
def test_set_workspace(self) -> None: """Test set_workspace.""" config = Config(self.predefined_config) config.set_workspace("new/workspace/path") self.assertEqual("new/workspace/path", config.tuning.workspace.path)
def test_set_performance_iterations_on_empty_config(self) -> None: """Test set_performance_iterations.""" config = Config() config.set_performance_iterations(1234) self.assertIsNone(config.evaluation)
def test_set_performance_iterations(self) -> None: """Test set_performance_iterations.""" config = Config(self.predefined_config) config.set_performance_iterations(1234) self.assertEqual(1234, config.evaluation.performance.iteration)
def test_set_performance_num_of_instance_on_empty_config(self) -> None: """Test set_performance_num_of_instance on empty config.""" config = Config() config.set_performance_num_of_instance(1234) self.assertIsNone(config.get_performance_num_of_instance())
def test_get_performance_configs(self) -> None: """Test get_performance_configs.""" config = Config(self.predefined_config) self.assertIsNotNone(config.evaluation.performance.configs) self.assertEqual(config.evaluation.performance.configs, config.get_performance_configs())
def test_remove_accuracy_metric_on_empty_config(self) -> None: """Test remove_accuracy_metric on empty config.""" config = Config() config.remove_accuracy_metric() self.assertIsNone(config.evaluation)
def test_set_quantization_sampling_size(self) -> None: """Test set_model_path.""" config = Config(self.predefined_config) config.set_quantization_sampling_size("new sampling size") self.assertEqual("new sampling size", config.quantization.calibration.sampling_size)
def test_set_quantization_approach(self) -> None: """Test set_quantization_approach.""" config = Config(self.predefined_config) config.set_quantization_approach("Some quantization approach") self.assertEqual("Some quantization approach", config.quantization.approach)
def test_set_performance_batch_size(self) -> None: """Test set_performance_batch_size.""" config = Config(self.predefined_config) config.set_performance_batch_size(1234) self.assertEqual(1234, config.evaluation.performance.dataloader.batch_size)
def test_remove_dataloader_on_empty_config(self) -> None: """Test remove_dataloader on empty config.""" config = Config() config.remove_dataloader() self.assertIsNone(config.evaluation) self.assertIsNone(config.quantization)
def test_remove_accuracy_metric(self) -> None: """Test remove_accuracy_metric.""" config = Config(self.predefined_config) self.assertEqual("topk", config.evaluation.accuracy.metric.name) config.remove_accuracy_metric() self.assertIsNone(config.evaluation.accuracy)
def test_set_quantization_dataloader_on_empty_config(self) -> None: """Test set_quantization_dataloader on empty config.""" config = Config() config.set_quantization_dataloader({ "name": "dataloader_name", }, ) self.assertIsNone(config.quantization)
def test_set_performance_iterations_to_negative_value(self) -> None: """Test set_performance_iterations.""" config = Config(self.predefined_config) original_performance_iterations = config.evaluation.performance.iteration with self.assertRaises(ClientErrorException): config.set_performance_iterations(-1234) self.assertEqual(original_performance_iterations, config.evaluation.performance.iteration)
def test_set_accuracy_goal_to_negative_value(self) -> None: """Test set_accuracy_goal.""" config = Config(self.predefined_config) original_accuracy_goal = config.tuning.accuracy_criterion.relative with self.assertRaises(ClientErrorException): config.set_accuracy_goal(-1234) self.assertEqual(original_accuracy_goal, config.tuning.accuracy_criterion.relative)
def test_set_performance_warmup_to_negative_value(self) -> None: """Test set_performance_warmup.""" config = Config(self.predefined_config) original_performance_warmup = config.evaluation.performance.warmup with self.assertRaises(ClientErrorException): config.set_performance_warmup(-1234) self.assertEqual(original_performance_warmup, config.evaluation.performance.warmup)
def test_set_quantization_dataloader(self) -> None: """Test set_quantization_dataloader.""" config = Config(self.predefined_config) config.set_quantization_dataloader({ "name": "dataloader_name", }, ) self.assertEqual( "dataloader_name", config.quantization.calibration.dataloader.dataset.name, )
def test_set_accuracy_metric_on_empty_config(self) -> None: """Test set_accuracy_metric.""" config = Config() config.set_accuracy_metric({ "metric": "new metric", "metric_param": { "param1": True } }) self.assertIsNone(config.evaluation)