def test_random_composer(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data available_model_types, _ = ModelTypesRepository().suitable_model( task_type=TaskTypesEnum.classification) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) random_composer = RandomSearchComposer(iter_num=1) req = ComposerRequirements(primary=available_model_types, secondary=available_model_types) chain_random_composed = random_composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=req, metrics=metric_function) chain_random_composed.fit_from_scratch(input_data=dataset_to_compose) predicted_random_composed = chain_random_composed.predict( dataset_to_validate) roc_on_valid_random_composed = roc_auc( y_true=dataset_to_validate.target, y_score=predicted_random_composed.predict) assert roc_on_valid_random_composed > 0.6
def test_classification_quality_metric(data_setup): train, _ = data_setup chain = default_valid_chain() chain.fit(input_data=train) metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC) metric_value = metric_function(chain=chain, reference_data=train) metric_function_with_penalty = \ MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty) metric_value_with_penalty = \ metric_function_with_penalty(chain=chain, reference_data=train) assert 0.5 < abs(metric_value) < 1.0 assert 0.5 < abs(metric_value_with_penalty) < 1.0 assert metric_value < metric_value_with_penalty
def test_gp_composer_build_chain_correct(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data available_model_types, _ = ModelTypesRepository().suitable_model( task_type=TaskTypesEnum.classification) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) gp_composer = GPComposer() req = GPComposerRequirements(primary=available_model_types, secondary=available_model_types, max_arity=2, max_depth=2, pop_size=2, num_of_generations=1, crossover_prob=0.4, mutation_prob=0.5) chain_gp_composed = gp_composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=req, metrics=metric_function) chain_gp_composed.fit_from_scratch(input_data=dataset_to_compose) predicted_gp_composed = chain_gp_composed.predict(dataset_to_validate) roc_on_valid_gp_composed = roc_auc(y_true=dataset_to_validate.target, y_score=predicted_gp_composed.predict) assert roc_on_valid_gp_composed > 0.6
def test_regression_quality_metric(data_setup): train, _ = data_setup chain = default_valid_chain() chain.fit(input_data=train) metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE) metric_value = metric_function(chain=chain, reference_data=train) metric_function_with_penalty = \ MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE_penalty) metric_value_with_penalty = \ metric_function_with_penalty(chain=chain, reference_data=train) assert metric_value > 0 assert metric_value_with_penalty > 0 assert metric_value < metric_value_with_penalty
def get_model(train_file_path: str, cur_lead_time: datetime.timedelta = timedelta(seconds=60)): task = Task(task_type=TaskTypesEnum.classification) dataset_to_compose = InputData.from_csv(train_file_path, task=task) # the search of the models provided by the framework # that can be used as nodes in a chain for the selected task models_repo = ModelTypesRepository() available_model_types, _ = models_repo.suitable_model( task_type=task.task_type) metric_function = MetricsRepository(). \ metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_lead_time=cur_lead_time) # Create the genetic programming-based composer, that allow to find # the optimal structure of the composite model composer = GPComposer() # run the search of best suitable model chain_evo_composed = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) chain_evo_composed.fit(input_data=dataset_to_compose) return chain_evo_composed
def test_structural_quality_correct(): chain = default_valid_chain() metric_functions = MetricsRepository().metric_by_id(ComplexityMetricsEnum.structural) expected_metric_value = 13 actual_metric_value = metric_functions(chain, None) assert actual_metric_value == expected_metric_value
def test_classification_quality_metric(data_setup): train, _ = data_setup metric_functions = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC) chain = default_valid_chain() chain.fit(input_data=train) metric_value = metric_functions(chain=chain, reference_data=train) assert 0.0 < abs(metric_value) < 1.0
def run_credit_scoring_problem( train_file_path, test_file_path, max_lead_time: datetime.timedelta = datetime.timedelta(minutes=5), is_visualise=False): task = Task(TaskTypesEnum.classification) dataset_to_compose = InputData.from_csv(train_file_path, task=task) dataset_to_validate = InputData.from_csv(test_file_path, task=task) # the search of the models provided by the framework that can be used as nodes in a chain for the selected task available_model_types, _ = ModelTypesRepository().suitable_model( task_type=task.task_type) # the choice of the metric for the chain quality assessment during composition metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC_penalty) # the choice and initialisation of the GP search composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=3, max_depth=3, pop_size=20, num_of_generations=20, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time) # Create GP-based composer composer = GPComposer() # the optimal chain generation by composition - the most time-consuming task chain_evo_composed = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) chain_evo_composed.fine_tune_primary_nodes(input_data=dataset_to_compose, iterations=50) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True) if is_visualise: ComposerVisualiser.visualise(chain_evo_composed) # the quality assessment for the obtained composite models roc_on_valid_evo_composed = calculate_validation_metric( chain_evo_composed, dataset_to_validate) print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}') return roc_on_valid_evo_composed
def compose_chain(data: InputData) -> Chain: dummy_composer = DummyComposer(DummyChainTypeEnum.hierarchical) composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.kmeans, ModelTypesIdsEnum.kmeans], secondary=[ModelTypesIdsEnum.logit]) metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC) chain = dummy_composer.compose_chain(data=data, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) return chain
def compose_chain(data: InputData) -> Chain: dummy_composer = DummyComposer(DummyChainTypeEnum.hierarchical) composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.lasso, ModelTypesIdsEnum.ridge], secondary=[ModelTypesIdsEnum.linear]) metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE) chain = dummy_composer.compose_chain(data=data, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) return chain
def test_gp_composer_quality(data_fixture, request): random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate( input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=MachineLearningTasksEnum.classification, can_be_initial=True, can_be_secondary=True)) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) baseline = baseline_chain() baseline.fit_from_scratch(input_data=dataset_to_compose) predict_baseline = baseline.predict(dataset_to_validate).predict dataset_to_compose.target = np.array( [int(round(i)) for i in predict_baseline]) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=2, max_depth=3, pop_size=5, num_of_generations=5, crossover_prob=0.8, mutation_prob=0.8) # Create GP-based composer composer = GPComposer() composed_chain = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function) composed_chain.fit_from_scratch(input_data=dataset_to_compose) predict_composed = composed_chain.predict(dataset_to_validate).predict roc_auc_chain_created_by_hand = roc_auc(y_true=dataset_to_validate.target, y_score=predict_baseline) roc_auc_chain_evo_alg = roc_auc(y_true=dataset_to_validate.target, y_score=predict_composed) print("model created by hand prediction:", roc_auc_chain_created_by_hand) print("gp composed model prediction:", roc_auc_chain_evo_alg) assert composed_chain == baseline or composed_chain != baseline and abs( roc_auc_chain_created_by_hand - roc_auc_chain_evo_alg) < 0.01
def run_credit_scoring_problem(train_file_path, test_file_path, max_lead_time: datetime.timedelta = datetime.timedelta(minutes=5), gp_optimiser_params: Optional[GPChainOptimiserParameters] = None, pop_size=None, generations=None): dataset_to_compose = InputData.from_csv(train_file_path) dataset_to_validate = InputData.from_csv(test_file_path) models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=[MachineLearningTasksEnum.classification, MachineLearningTasksEnum.clustering], can_be_initial=True, can_be_secondary=True)) # the choice of the metric for the chain quality assessment during composition metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC) if gp_optimiser_params: optimiser_parameters = gp_optimiser_params else: selection_types = [SelectionTypesEnum.tournament] crossover_types = [CrossoverTypesEnum.subtree] mutation_types = [MutationTypesEnum.simple, MutationTypesEnum.growth, MutationTypesEnum.reduce] regularization_type = RegularizationTypesEnum.decremental optimiser_parameters = GPChainOptimiserParameters(selection_types=selection_types, crossover_types=crossover_types, mutation_types=mutation_types, regularization_type=regularization_type) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=4, max_depth=3, pop_size=pop_size, num_of_generations=generations, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time) # Create GP-based composer composer = GPComposer() chain_evo_composed = composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, optimiser_parameters=optimiser_parameters, is_visualise=False) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True) roc_on_valid_evo_composed = calculate_validation_metric(chain_evo_composed, dataset_to_validate) print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}') return roc_on_valid_evo_composed, chain_evo_composed, composer
def test_composition_time(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) models_impl = [ModelTypesIdsEnum.mlp, ModelTypesIdsEnum.knn] metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) gp_composer_terminated_evolution = GPComposer() req_terminated_evolution = GPComposerRequirements( primary=models_impl, secondary=models_impl, max_arity=2, max_depth=2, pop_size=2, num_of_generations=5, crossover_prob=0.9, mutation_prob=0.9, max_lead_time=datetime.timedelta(minutes=0.01)) chain_terminated_evolution = gp_composer_terminated_evolution.compose_chain( data=data, initial_chain=None, composer_requirements=req_terminated_evolution, metrics=metric_function) gp_composer_completed_evolution = GPComposer() req_completed_evolution = GPComposerRequirements(primary=models_impl, secondary=models_impl, max_arity=2, max_depth=2, pop_size=2, num_of_generations=2, crossover_prob=0.4, mutation_prob=0.5) chain_completed_evolution = gp_composer_completed_evolution.compose_chain( data=data, initial_chain=None, composer_requirements=req_completed_evolution, metrics=metric_function) assert len(gp_composer_terminated_evolution.history) == 4 assert len(gp_composer_completed_evolution.history) == 4
def test_gp_composer_build_chain_correct(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate( input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=MachineLearningTasksEnum.classification, can_be_initial=True, can_be_secondary=True)) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) gp_composer = GPComposer() req = GPComposerRequirements(primary=available_model_types, secondary=available_model_types, max_arity=2, max_depth=2, pop_size=2, num_of_generations=1, crossover_prob=0.4, mutation_prob=0.5) chain_gp_composed = gp_composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=req, metrics=metric_function) chain_gp_composed.fit_from_scratch(input_data=dataset_to_compose) predicted_gp_composed = chain_gp_composed.predict(dataset_to_validate) roc_on_valid_gp_composed = roc_auc(y_true=dataset_to_validate.target, y_score=predicted_gp_composed.predict) assert roc_on_valid_gp_composed > 0.6
def test_fixed_structure_composer(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data available_model_types = ['logit', 'lda', 'knn'] metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) composer = FixedStructureComposer() req = GPComposerRequirements(primary=available_model_types, secondary=available_model_types, pop_size=2, num_of_generations=1, crossover_prob=0.4, mutation_prob=0.5, add_single_model_chains=False) reference_chain = get_class_chain() chain_composed = composer.compose_chain(data=dataset_to_compose, initial_chain=reference_chain, composer_requirements=req, metrics=metric_function) chain_composed.fit_from_scratch(input_data=dataset_to_compose) predicted_random_composed = chain_composed.predict(dataset_to_validate) roc_on_valid_random_composed = roc_auc( y_true=dataset_to_validate.target, y_score=predicted_random_composed.predict) assert roc_on_valid_random_composed > 0.6 assert chain_composed.depth == reference_chain.depth assert chain_composed.length == reference_chain.length
def test_random_composer(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate( input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=MachineLearningTasksEnum.classification, can_be_initial=True, can_be_secondary=True)) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) random_composer = RandomSearchComposer(iter_num=1) req = ComposerRequirements(primary=available_model_types, secondary=available_model_types) chain_random_composed = random_composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=req, metrics=metric_function) chain_random_composed.fit_from_scratch(input_data=dataset_to_compose) predicted_random_composed = chain_random_composed.predict( dataset_to_validate) roc_on_valid_random_composed = roc_auc( y_true=dataset_to_validate.target, y_score=predicted_random_composed.predict) assert roc_on_valid_random_composed > 0.6
def run_fedot(params: 'ExecutionParams'): train_file_path = params.train_file test_file_path = params.test_file case_label = params.case_label task_type = params.task if task_type == TaskTypesEnum.classification: metric = ClassificationMetricsEnum.ROCAUC elif task_type == TaskTypesEnum.regression: metric = RegressionMetricsEnum.RMSE else: raise NotImplementedError() task = Task(task_type) dataset_to_compose = InputData.from_csv(train_file_path, task=task) dataset_to_validate = InputData.from_csv(test_file_path, task=task) models_hyperparameters = get_models_hyperparameters()['FEDOT'] cur_lead_time = models_hyperparameters['MAX_RUNTIME_MINS'] saved_model_name = f'fedot_{case_label}_{task_type}_{cur_lead_time}_{metric}' loaded_model = load_fedot_model(saved_model_name) if not loaded_model: generations = models_hyperparameters['GENERATIONS'] population_size = models_hyperparameters['POPULATION_SIZE'] # the search of the models provided by the framework that can be used as nodes in a chain' models_repo = ModelTypesRepository() available_model_types, _ = models_repo.suitable_model(task.task_type) metric_function = MetricsRepository().metric_by_id(metric) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=3, max_depth=3, pop_size=population_size, num_of_generations=generations, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=datetime.timedelta(minutes=cur_lead_time)) # Create GP-based composer composer = GPComposer() # the optimal chain generation by composition - the most time-consuming task chain_evo_composed = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) chain_evo_composed.fine_tune_primary_nodes( input_data=dataset_to_compose, iterations=50) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=False) save_fedot_model(chain_evo_composed, saved_model_name) else: chain_evo_composed = loaded_model evo_predicted = chain_evo_composed.predict(dataset_to_validate) return dataset_to_validate.target, evo_predicted.predict
def run_metocean_forecasting_problem(train_file_path, test_file_path, forecast_length=1, max_window_size=64, with_visualisation=True): # specify the task to solve task_to_solve = Task( TaskTypesEnum.ts_forecasting, TsForecastingParams(forecast_length=forecast_length, max_window_size=max_window_size)) full_path_train = os.path.join(str(project_root()), train_file_path) dataset_to_train = InputData.from_csv(full_path_train, task=task_to_solve, data_type=DataTypesEnum.ts) # a dataset for a final validation of the composed model full_path_test = os.path.join(str(project_root()), test_file_path) dataset_to_validate = InputData.from_csv(full_path_test, task=task_to_solve, data_type=DataTypesEnum.ts) metric_function = MetricsRepository().metric_by_id( RegressionMetricsEnum.RMSE) ref_chain = get_composite_lstm_chain() available_model_types_primary = ['trend_data_model', 'residual_data_model'] available_model_types_secondary = [ 'rfr', 'linear', 'ridge', 'lasso', 'additive_data_model' ] composer = FixedStructureComposer() composer_requirements = GPComposerRequirements( primary=available_model_types_primary, secondary=available_model_types_secondary, max_arity=2, max_depth=4, pop_size=10, num_of_generations=10, crossover_prob=0, mutation_prob=0.8, max_lead_time=datetime.timedelta(minutes=20)) chain = composer.compose_chain(data=dataset_to_train, initial_chain=ref_chain, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) if with_visualisation: ComposerVisualiser.visualise(chain) chain.fit(input_data=dataset_to_train, verbose=False) rmse_on_valid = calculate_validation_metric( chain.predict(dataset_to_validate), dataset_to_validate, f'full-composite_{forecast_length}', is_visualise=with_visualisation) print(f'RMSE composite: {rmse_on_valid}') return rmse_on_valid
full_path_test = os.path.join(str(project_root()), file_path_test) dataset_to_validate = InputData.from_csv(full_path_test, task_type=problem_class) # the search of the models provided by the framework that can be used as nodes in a chain for the selected task models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate( input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=problem_class, can_be_initial=True, can_be_secondary=True)) # the choice of the metric for the chain quality assessment during composition metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE) # the choice and initialisation single_composer_requirements = ComposerRequirements( primary=[ModelTypesIdsEnum.ar], secondary=[]) chain_single = DummyComposer(DummyChainTypeEnum.flat).compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=single_composer_requirements, metrics=metric_function) train_prediction = chain_single.fit(input_data=dataset_to_compose, verbose=True) print("Composition finished") compare_plot(train_prediction, dataset_to_compose)
def run_credit_scoring_problem(train_file_path, test_file_path, max_lead_time: datetime.timedelta = datetime.timedelta(minutes=20), gp_optimiser_params: Optional[GPChainOptimiserParameters] = None): dataset_to_compose = InputData.from_csv(train_file_path) dataset_to_validate = InputData.from_csv(test_file_path) # the search of the models provided by the framework that can be used as nodes in a chain for the selected task models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=[MachineLearningTasksEnum.classification, MachineLearningTasksEnum.clustering], can_be_initial=True, can_be_secondary=True)) # the choice of the metric for the chain quality assessment during composition metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC) if gp_optimiser_params: optimiser_parameters = gp_optimiser_params else: optimiser_parameters = GPChainOptimiserParameters(selection_types=[SelectionTypesEnum.tournament], crossover_types=[CrossoverTypesEnum.subtree], mutation_types=[MutationTypesEnum.growth], regularization_type=RegularizationTypesEnum.decremental, chain_generation_function=random_ml_chain, crossover_types_dict=crossover_by_type, mutation_types_dict=mutation_by_type) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=4, max_depth=3, pop_size=5, num_of_generations=5, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time) # Create GP-based composer composer = GPComposer() # the optimal chain generation by composition - the most time-consuming task chain_evo_composed = composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, optimiser_parameters=optimiser_parameters, is_visualise=False) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True) # the choice and initialisation of the dummy_composer dummy_composer = DummyComposer(DummyChainTypeEnum.hierarchical) chain_static = dummy_composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=True) chain_static.fit(input_data=dataset_to_compose, verbose=True) # the single-model variant of optimal chain single_composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.xgboost], secondary=[]) chain_single = DummyComposer(DummyChainTypeEnum.flat).compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=single_composer_requirements, metrics=metric_function) chain_single.fit(input_data=dataset_to_compose, verbose=True) print("Composition finished") ComposerVisualiser.visualise(chain_static) ComposerVisualiser.visualise(chain_evo_composed) # the quality assessment for the obtained composite models roc_on_valid_static = calculate_validation_metric(chain_static, dataset_to_validate) roc_on_valid_single = calculate_validation_metric(chain_single, dataset_to_validate) roc_on_valid_evo_composed = calculate_validation_metric(chain_evo_composed, dataset_to_validate) print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}') print(f'Static ROC AUC is {round(roc_on_valid_static, 3)}') print(f'Single-model ROC AUC is {round(roc_on_valid_single, 3)}') return (roc_on_valid_evo_composed, chain_evo_composed), (chain_static, roc_on_valid_static), ( chain_single, roc_on_valid_single)