Exemplo n.º 1
0
    def test_set_quantization_sampling_size_on_empty_config(self) -> None:
        """Test set_model_path."""
        config = Config()

        config.set_quantization_sampling_size("new sampling size")

        self.assertIsNone(config.quantization)
Exemplo n.º 2
0
def get_predefined_configuration(
    data: Dict[str, Any], ) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
    """Get configuration."""
    from lpot.ux.utils.utils import get_framework_from_path, get_predefined_config_path
    from lpot.ux.utils.workload.config import Config

    model_path = data.get("model_path", "")
    if not os.path.isfile(model_path):
        raise ClientErrorException(
            f"Could not find model in specified path: {model_path}.", )

    model_name = Path(model_path).stem

    domain = data.get("domain", None)

    if not domain:
        raise ClientErrorException("Domain is not defined!")

    framework = get_framework_from_path(model_path)
    if framework is None:
        raise ClientErrorException(
            f"Could not find framework for specified model {model_name} in path {model_path}.",
        )

    config = Config()
    predefined_config_path = get_predefined_config_path(framework, domain)
    config.load(predefined_config_path)

    return {
        "config": config.serialize(),
        "framework": framework,
        "name": model_name,
        "domain": domain,
    }
Exemplo n.º 3
0
    def test_set_performance_batch_size_on_empty_config(self) -> None:
        """Test set_performance_batch_size."""
        config = Config()

        config.set_performance_batch_size(1234)

        self.assertIsNone(config.evaluation)
Exemplo n.º 4
0
    def test_set_model_path(self) -> None:
        """Test set_model_path."""
        config = Config()

        config.set_model_path("new/model/path")

        self.assertEqual("new/model/path", config.model_path)
Exemplo n.º 5
0
    def test_set_optimization_precision_on_empty_config(
        self,
        mocked_load_precisions_config: MagicMock,
    ) -> None:
        """Test set_optimization_precision."""
        mocked_load_precisions_config.return_value = {
            "framework_foo": [
                {
                    "name": "precision1",
                },
                {
                    "name": "precision2",
                },
                {
                    "name": "precision3",
                },
            ],
            "framework_bar": [
                {
                    "name": "precision1",
                },
            ],
        }

        config = Config()

        config.set_optimization_precision("framework_foo", "precision2")

        self.assertEqual("precision2", config.graph_optimization.precisions)
        mocked_load_precisions_config.assert_called_once()
Exemplo n.º 6
0
    def test_set_optimization_precision_to_unknown_framework(
        self,
        mocked_load_precisions_config: MagicMock,
    ) -> None:
        """Test set_optimization_precision."""
        mocked_load_precisions_config.return_value = {
            "framework_foo": [
                {
                    "name": "precision1",
                },
                {
                    "name": "precision2",
                },
                {
                    "name": "precision3",
                },
            ],
            "framework_bar": [
                {
                    "name": "precision1",
                },
            ],
        }
        config = Config(self.predefined_config)

        with self.assertRaisesRegex(
                ClientErrorException,
                "Precision precision1 is not supported "
                "in graph optimization for framework framework_baz.",
        ):
            config.set_optimization_precision("framework_baz", "precision1")
        mocked_load_precisions_config.assert_called_once()
Exemplo n.º 7
0
    def test_set_outputs(self) -> None:
        """Test set_model_path."""
        config = Config()

        config.set_outputs(["output1", "output2"])

        self.assertEqual(["output1", "output2"], config.model.outputs)
Exemplo n.º 8
0
    def test_set_transform_on_empty_config(self) -> None:
        """Test set_transform."""
        config = Config()

        config.set_transform([
            {
                "name": "Some transform1",
                "params": {
                    "param12": True
                }
            },
            {
                "name": "SquadV1",
                "params": {
                    "param1": True
                }
            },
            {
                "name": "Some transform2",
                "params": {
                    "param123": True
                }
            },
        ], )

        self.assertIsNone(config.evaluation)
        self.assertIsNone(config.quantization)
Exemplo n.º 9
0
    def test_set_performance_warmup(self) -> None:
        """Test set_performance_warmup."""
        config = Config(self.predefined_config)

        config.set_performance_warmup(1234)

        self.assertEqual(1234, config.evaluation.performance.warmup)
Exemplo n.º 10
0
    def test_set_accuracy_goal_on_empty_config(self) -> None:
        """Test set_accuracy_goal."""
        config = Config()

        config.set_accuracy_goal(1234)

        self.assertIsNone(config.tuning.accuracy_criterion.relative)
Exemplo n.º 11
0
    def test_set_accuracy_goal(self) -> None:
        """Test set_accuracy_goal."""
        config = Config(self.predefined_config)

        config.set_accuracy_goal(1234)

        self.assertEqual(1234, config.tuning.accuracy_criterion.relative)
Exemplo n.º 12
0
    def test_set_quantization_approach_on_empty_config(self) -> None:
        """Test set_quantization_approach."""
        config = Config()

        config.set_quantization_approach("Some quantization approach")

        self.assertIsNone(config.quantization)
Exemplo n.º 13
0
    def test_set_quantization_dataset_path_on_empty_config(self) -> None:
        """Test set_quantization_dataset_path on empty config."""
        config = Config()

        config.set_quantization_dataset_path("new dataset path")

        self.assertIsNone(config.quantization)
Exemplo n.º 14
0
    def test_set_workspace(self) -> None:
        """Test set_workspace."""
        config = Config(self.predefined_config)

        config.set_workspace("new/workspace/path")

        self.assertEqual("new/workspace/path", config.tuning.workspace.path)
Exemplo n.º 15
0
    def test_set_performance_iterations_on_empty_config(self) -> None:
        """Test set_performance_iterations."""
        config = Config()

        config.set_performance_iterations(1234)

        self.assertIsNone(config.evaluation)
Exemplo n.º 16
0
    def test_set_performance_iterations(self) -> None:
        """Test set_performance_iterations."""
        config = Config(self.predefined_config)

        config.set_performance_iterations(1234)

        self.assertEqual(1234, config.evaluation.performance.iteration)
Exemplo n.º 17
0
    def test_set_performance_num_of_instance_on_empty_config(self) -> None:
        """Test set_performance_num_of_instance on empty config."""
        config = Config()

        config.set_performance_num_of_instance(1234)

        self.assertIsNone(config.get_performance_num_of_instance())
Exemplo n.º 18
0
    def test_get_performance_configs(self) -> None:
        """Test get_performance_configs."""
        config = Config(self.predefined_config)

        self.assertIsNotNone(config.evaluation.performance.configs)
        self.assertEqual(config.evaluation.performance.configs,
                         config.get_performance_configs())
Exemplo n.º 19
0
    def test_remove_accuracy_metric_on_empty_config(self) -> None:
        """Test remove_accuracy_metric on empty config."""
        config = Config()

        config.remove_accuracy_metric()

        self.assertIsNone(config.evaluation)
Exemplo n.º 20
0
    def test_set_quantization_sampling_size(self) -> None:
        """Test set_model_path."""
        config = Config(self.predefined_config)

        config.set_quantization_sampling_size("new sampling size")

        self.assertEqual("new sampling size",
                         config.quantization.calibration.sampling_size)
Exemplo n.º 21
0
    def test_set_quantization_approach(self) -> None:
        """Test set_quantization_approach."""
        config = Config(self.predefined_config)

        config.set_quantization_approach("Some quantization approach")

        self.assertEqual("Some quantization approach",
                         config.quantization.approach)
Exemplo n.º 22
0
    def test_set_performance_batch_size(self) -> None:
        """Test set_performance_batch_size."""
        config = Config(self.predefined_config)

        config.set_performance_batch_size(1234)

        self.assertEqual(1234,
                         config.evaluation.performance.dataloader.batch_size)
Exemplo n.º 23
0
    def test_remove_dataloader_on_empty_config(self) -> None:
        """Test remove_dataloader on empty config."""
        config = Config()

        config.remove_dataloader()

        self.assertIsNone(config.evaluation)
        self.assertIsNone(config.quantization)
Exemplo n.º 24
0
    def test_remove_accuracy_metric(self) -> None:
        """Test remove_accuracy_metric."""
        config = Config(self.predefined_config)

        self.assertEqual("topk", config.evaluation.accuracy.metric.name)

        config.remove_accuracy_metric()

        self.assertIsNone(config.evaluation.accuracy)
Exemplo n.º 25
0
    def test_set_quantization_dataloader_on_empty_config(self) -> None:
        """Test set_quantization_dataloader on empty config."""
        config = Config()

        config.set_quantization_dataloader({
            "name": "dataloader_name",
        }, )

        self.assertIsNone(config.quantization)
Exemplo n.º 26
0
    def test_set_performance_iterations_to_negative_value(self) -> None:
        """Test set_performance_iterations."""
        config = Config(self.predefined_config)

        original_performance_iterations = config.evaluation.performance.iteration

        with self.assertRaises(ClientErrorException):
            config.set_performance_iterations(-1234)
        self.assertEqual(original_performance_iterations,
                         config.evaluation.performance.iteration)
Exemplo n.º 27
0
    def test_set_accuracy_goal_to_negative_value(self) -> None:
        """Test set_accuracy_goal."""
        config = Config(self.predefined_config)

        original_accuracy_goal = config.tuning.accuracy_criterion.relative

        with self.assertRaises(ClientErrorException):
            config.set_accuracy_goal(-1234)
        self.assertEqual(original_accuracy_goal,
                         config.tuning.accuracy_criterion.relative)
Exemplo n.º 28
0
    def test_set_performance_warmup_to_negative_value(self) -> None:
        """Test set_performance_warmup."""
        config = Config(self.predefined_config)

        original_performance_warmup = config.evaluation.performance.warmup

        with self.assertRaises(ClientErrorException):
            config.set_performance_warmup(-1234)
        self.assertEqual(original_performance_warmup,
                         config.evaluation.performance.warmup)
Exemplo n.º 29
0
    def test_set_quantization_dataloader(self) -> None:
        """Test set_quantization_dataloader."""
        config = Config(self.predefined_config)

        config.set_quantization_dataloader({
            "name": "dataloader_name",
        }, )

        self.assertEqual(
            "dataloader_name",
            config.quantization.calibration.dataloader.dataset.name,
        )
Exemplo n.º 30
0
    def test_set_accuracy_metric_on_empty_config(self) -> None:
        """Test set_accuracy_metric."""
        config = Config()

        config.set_accuracy_metric({
            "metric": "new metric",
            "metric_param": {
                "param1": True
            }
        })

        self.assertIsNone(config.evaluation)