def test_set_accuracy_metric_on_empty_config(self) -> None: """Test set_accuracy_metric.""" config = Config() config.set_accuracy_metric({ "metric": "new metric", "metric_param": { "param1": True } }) self.assertIsNone(config.evaluation)
def test_set_accuracy_metric(self) -> None: """Test set_accuracy_metric.""" config = Config(self.predefined_config) config.set_accuracy_metric({ "metric": "new metric", "metric_param": { "param1": True } }) self.assertEqual("new metric", config.evaluation.accuracy.metric.name) self.assertEqual({"param1": True}, config.evaluation.accuracy.metric.param)
def update_evaluation_data(config: Config, evaluation_data: Dict[str, Any]) -> None: """Update config with evaluation data.""" config.set_accuracy_metric(evaluation_data) map_key_to_action: Dict[str, Any] = { "warmup": config.set_performance_warmup, "iterations": config.set_performance_iterations, "dataloader": config.set_evaluation_dataloader, "dataset_path": config.set_evaluation_dataset_path, "cores_per_instance": config.set_performance_cores_per_instance, "num_of_instance": config.set_performance_num_of_instance, "batch_size": config.set_performance_batch_size, } for key, action in map_key_to_action.items(): if evaluation_data.get(key, None): action(evaluation_data[key])