def get_model(train_file_path: str, cur_lead_time: datetime.timedelta = timedelta(seconds=60)): task = Task(task_type=TaskTypesEnum.classification) dataset_to_compose = InputData.from_csv(train_file_path, task=task) # the search of the models provided by the framework # that can be used as nodes in a chain for the selected task models_repo = ModelTypesRepository() available_model_types, _ = models_repo.suitable_model( task_type=task.task_type) metric_function = MetricsRepository(). \ metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_lead_time=cur_lead_time) # Create the genetic programming-based composer, that allow to find # the optimal structure of the composite model composer = GPComposer() # run the search of best suitable model chain_evo_composed = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) chain_evo_composed.fit(input_data=dataset_to_compose) return chain_evo_composed
def test_gp_composer_build_chain_correct(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data available_model_types, _ = ModelTypesRepository().suitable_model( task_type=TaskTypesEnum.classification) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) gp_composer = GPComposer() req = GPComposerRequirements(primary=available_model_types, secondary=available_model_types, max_arity=2, max_depth=2, pop_size=2, num_of_generations=1, crossover_prob=0.4, mutation_prob=0.5) chain_gp_composed = gp_composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=req, metrics=metric_function) chain_gp_composed.fit_from_scratch(input_data=dataset_to_compose) predicted_gp_composed = chain_gp_composed.predict(dataset_to_validate) roc_on_valid_gp_composed = roc_auc(y_true=dataset_to_validate.target, y_score=predicted_gp_composed.predict) assert roc_on_valid_gp_composed > 0.6
def run_credit_scoring_problem( train_file_path, test_file_path, max_lead_time: datetime.timedelta = datetime.timedelta(minutes=5), is_visualise=False): task = Task(TaskTypesEnum.classification) dataset_to_compose = InputData.from_csv(train_file_path, task=task) dataset_to_validate = InputData.from_csv(test_file_path, task=task) # the search of the models provided by the framework that can be used as nodes in a chain for the selected task available_model_types, _ = ModelTypesRepository().suitable_model( task_type=task.task_type) # the choice of the metric for the chain quality assessment during composition metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC_penalty) # the choice and initialisation of the GP search composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=3, max_depth=3, pop_size=20, num_of_generations=20, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time) # Create GP-based composer composer = GPComposer() # the optimal chain generation by composition - the most time-consuming task chain_evo_composed = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) chain_evo_composed.fine_tune_primary_nodes(input_data=dataset_to_compose, iterations=50) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True) if is_visualise: ComposerVisualiser.visualise(chain_evo_composed) # the quality assessment for the obtained composite models roc_on_valid_evo_composed = calculate_validation_metric( chain_evo_composed, dataset_to_validate) print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}') return roc_on_valid_evo_composed
def test_gp_composer_quality(data_fixture, request): random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate( input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=MachineLearningTasksEnum.classification, can_be_initial=True, can_be_secondary=True)) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) baseline = baseline_chain() baseline.fit_from_scratch(input_data=dataset_to_compose) predict_baseline = baseline.predict(dataset_to_validate).predict dataset_to_compose.target = np.array( [int(round(i)) for i in predict_baseline]) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=2, max_depth=3, pop_size=5, num_of_generations=5, crossover_prob=0.8, mutation_prob=0.8) # Create GP-based composer composer = GPComposer() composed_chain = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function) composed_chain.fit_from_scratch(input_data=dataset_to_compose) predict_composed = composed_chain.predict(dataset_to_validate).predict roc_auc_chain_created_by_hand = roc_auc(y_true=dataset_to_validate.target, y_score=predict_baseline) roc_auc_chain_evo_alg = roc_auc(y_true=dataset_to_validate.target, y_score=predict_composed) print("model created by hand prediction:", roc_auc_chain_created_by_hand) print("gp composed model prediction:", roc_auc_chain_evo_alg) assert composed_chain == baseline or composed_chain != baseline and abs( roc_auc_chain_created_by_hand - roc_auc_chain_evo_alg) < 0.01
def run_credit_scoring_problem(train_file_path, test_file_path, max_lead_time: datetime.timedelta = datetime.timedelta(minutes=5), gp_optimiser_params: Optional[GPChainOptimiserParameters] = None, pop_size=None, generations=None): dataset_to_compose = InputData.from_csv(train_file_path) dataset_to_validate = InputData.from_csv(test_file_path) models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=[MachineLearningTasksEnum.classification, MachineLearningTasksEnum.clustering], can_be_initial=True, can_be_secondary=True)) # the choice of the metric for the chain quality assessment during composition metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC) if gp_optimiser_params: optimiser_parameters = gp_optimiser_params else: selection_types = [SelectionTypesEnum.tournament] crossover_types = [CrossoverTypesEnum.subtree] mutation_types = [MutationTypesEnum.simple, MutationTypesEnum.growth, MutationTypesEnum.reduce] regularization_type = RegularizationTypesEnum.decremental optimiser_parameters = GPChainOptimiserParameters(selection_types=selection_types, crossover_types=crossover_types, mutation_types=mutation_types, regularization_type=regularization_type) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=4, max_depth=3, pop_size=pop_size, num_of_generations=generations, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time) # Create GP-based composer composer = GPComposer() chain_evo_composed = composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, optimiser_parameters=optimiser_parameters, is_visualise=False) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True) roc_on_valid_evo_composed = calculate_validation_metric(chain_evo_composed, dataset_to_validate) print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}') return roc_on_valid_evo_composed, chain_evo_composed, composer
def test_gp_composer_build_chain_correct(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) dataset_to_compose = data dataset_to_validate = data models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate( input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=MachineLearningTasksEnum.classification, can_be_initial=True, can_be_secondary=True)) metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) gp_composer = GPComposer() req = GPComposerRequirements(primary=available_model_types, secondary=available_model_types, max_arity=2, max_depth=2, pop_size=2, num_of_generations=1, crossover_prob=0.4, mutation_prob=0.5) chain_gp_composed = gp_composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=req, metrics=metric_function) chain_gp_composed.fit_from_scratch(input_data=dataset_to_compose) predicted_gp_composed = chain_gp_composed.predict(dataset_to_validate) roc_on_valid_gp_composed = roc_auc(y_true=dataset_to_validate.target, y_score=predicted_gp_composed.predict) assert roc_on_valid_gp_composed > 0.6
def test_composition_time(data_fixture, request): random.seed(1) np.random.seed(1) data = request.getfixturevalue(data_fixture) models_impl = [ModelTypesIdsEnum.mlp, ModelTypesIdsEnum.knn] metric_function = MetricsRepository().metric_by_id( ClassificationMetricsEnum.ROCAUC) gp_composer_terminated_evolution = GPComposer() req_terminated_evolution = GPComposerRequirements( primary=models_impl, secondary=models_impl, max_arity=2, max_depth=2, pop_size=2, num_of_generations=5, crossover_prob=0.9, mutation_prob=0.9, max_lead_time=datetime.timedelta(minutes=0.01)) chain_terminated_evolution = gp_composer_terminated_evolution.compose_chain( data=data, initial_chain=None, composer_requirements=req_terminated_evolution, metrics=metric_function) gp_composer_completed_evolution = GPComposer() req_completed_evolution = GPComposerRequirements(primary=models_impl, secondary=models_impl, max_arity=2, max_depth=2, pop_size=2, num_of_generations=2, crossover_prob=0.4, mutation_prob=0.5) chain_completed_evolution = gp_composer_completed_evolution.compose_chain( data=data, initial_chain=None, composer_requirements=req_completed_evolution, metrics=metric_function) assert len(gp_composer_terminated_evolution.history) == 4 assert len(gp_composer_completed_evolution.history) == 4
def run_fedot(params: 'ExecutionParams'): train_file_path = params.train_file test_file_path = params.test_file case_label = params.case_label task_type = params.task if task_type == TaskTypesEnum.classification: metric = ClassificationMetricsEnum.ROCAUC elif task_type == TaskTypesEnum.regression: metric = RegressionMetricsEnum.RMSE else: raise NotImplementedError() task = Task(task_type) dataset_to_compose = InputData.from_csv(train_file_path, task=task) dataset_to_validate = InputData.from_csv(test_file_path, task=task) models_hyperparameters = get_models_hyperparameters()['FEDOT'] cur_lead_time = models_hyperparameters['MAX_RUNTIME_MINS'] saved_model_name = f'fedot_{case_label}_{task_type}_{cur_lead_time}_{metric}' loaded_model = load_fedot_model(saved_model_name) if not loaded_model: generations = models_hyperparameters['GENERATIONS'] population_size = models_hyperparameters['POPULATION_SIZE'] # the search of the models provided by the framework that can be used as nodes in a chain' models_repo = ModelTypesRepository() available_model_types, _ = models_repo.suitable_model(task.task_type) metric_function = MetricsRepository().metric_by_id(metric) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=3, max_depth=3, pop_size=population_size, num_of_generations=generations, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=datetime.timedelta(minutes=cur_lead_time)) # Create GP-based composer composer = GPComposer() # the optimal chain generation by composition - the most time-consuming task chain_evo_composed = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) chain_evo_composed.fine_tune_primary_nodes( input_data=dataset_to_compose, iterations=50) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=False) save_fedot_model(chain_evo_composed, saved_model_name) else: chain_evo_composed = loaded_model evo_predicted = chain_evo_composed.predict(dataset_to_validate) return dataset_to_validate.target, evo_predicted.predict
crossover_prob=0.8, mutation_prob=0.8, max_lead_time=datetime.timedelta(minutes=3)) single_composer_requirements = ComposerRequirements( primary=[ModelTypesIdsEnum.lasso, ModelTypesIdsEnum.ridge], secondary=[ModelTypesIdsEnum.linear]) chain_static = DummyComposer(DummyChainTypeEnum.hierarchical).compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=single_composer_requirements, metrics=metric_function) chain_static.fit(input_data=dataset_to_compose, verbose=False) # Create GP-based composer composer = GPComposer() # the optimal chain generation by composition - the most time-consuming task chain_evo_composed = composer.compose_chain( data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=False) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=False) train_prediction = chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True) print("Composition finished") compare_plot(train_prediction, dataset_to_compose)
def run_credit_scoring_problem(train_file_path, test_file_path, max_lead_time: datetime.timedelta = datetime.timedelta(minutes=20), gp_optimiser_params: Optional[GPChainOptimiserParameters] = None): dataset_to_compose = InputData.from_csv(train_file_path) dataset_to_validate = InputData.from_csv(test_file_path) # the search of the models provided by the framework that can be used as nodes in a chain for the selected task models_repo = ModelTypesRepository() available_model_types, _ = models_repo.search_models( desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table, output_type=CategoricalDataTypesEnum.vector, task_type=[MachineLearningTasksEnum.classification, MachineLearningTasksEnum.clustering], can_be_initial=True, can_be_secondary=True)) # the choice of the metric for the chain quality assessment during composition metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC) if gp_optimiser_params: optimiser_parameters = gp_optimiser_params else: optimiser_parameters = GPChainOptimiserParameters(selection_types=[SelectionTypesEnum.tournament], crossover_types=[CrossoverTypesEnum.subtree], mutation_types=[MutationTypesEnum.growth], regularization_type=RegularizationTypesEnum.decremental, chain_generation_function=random_ml_chain, crossover_types_dict=crossover_by_type, mutation_types_dict=mutation_by_type) composer_requirements = GPComposerRequirements( primary=available_model_types, secondary=available_model_types, max_arity=4, max_depth=3, pop_size=5, num_of_generations=5, crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time) # Create GP-based composer composer = GPComposer() # the optimal chain generation by composition - the most time-consuming task chain_evo_composed = composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, optimiser_parameters=optimiser_parameters, is_visualise=False) chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True) # the choice and initialisation of the dummy_composer dummy_composer = DummyComposer(DummyChainTypeEnum.hierarchical) chain_static = dummy_composer.compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=composer_requirements, metrics=metric_function, is_visualise=True) chain_static.fit(input_data=dataset_to_compose, verbose=True) # the single-model variant of optimal chain single_composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.xgboost], secondary=[]) chain_single = DummyComposer(DummyChainTypeEnum.flat).compose_chain(data=dataset_to_compose, initial_chain=None, composer_requirements=single_composer_requirements, metrics=metric_function) chain_single.fit(input_data=dataset_to_compose, verbose=True) print("Composition finished") ComposerVisualiser.visualise(chain_static) ComposerVisualiser.visualise(chain_evo_composed) # the quality assessment for the obtained composite models roc_on_valid_static = calculate_validation_metric(chain_static, dataset_to_validate) roc_on_valid_single = calculate_validation_metric(chain_single, dataset_to_validate) roc_on_valid_evo_composed = calculate_validation_metric(chain_evo_composed, dataset_to_validate) print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}') print(f'Static ROC AUC is {round(roc_on_valid_static, 3)}') print(f'Single-model ROC AUC is {round(roc_on_valid_single, 3)}') return (roc_on_valid_evo_composed, chain_evo_composed), (chain_static, roc_on_valid_static), ( chain_single, roc_on_valid_single)