コード例 #1
0
def get_model(train_file_path: str,
              cur_lead_time: datetime.timedelta = timedelta(seconds=60)):
    task = Task(task_type=TaskTypesEnum.classification)
    dataset_to_compose = InputData.from_csv(train_file_path, task=task)

    # the search of the models provided by the framework
    # that can be used as nodes in a chain for the selected task
    models_repo = ModelTypesRepository()
    available_model_types, _ = models_repo.suitable_model(
        task_type=task.task_type)

    metric_function = MetricsRepository(). \
        metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty)

    composer_requirements = GPComposerRequirements(
        primary=available_model_types,
        secondary=available_model_types,
        max_lead_time=cur_lead_time)

    # Create the genetic programming-based composer, that allow to find
    # the optimal structure of the composite model
    composer = GPComposer()

    # run the search of best suitable model
    chain_evo_composed = composer.compose_chain(
        data=dataset_to_compose,
        initial_chain=None,
        composer_requirements=composer_requirements,
        metrics=metric_function,
        is_visualise=False)
    chain_evo_composed.fit(input_data=dataset_to_compose)

    return chain_evo_composed
コード例 #2
0
ファイル: test_repository.py プロジェクト: timur9831/FEDOT
def test_search_in_repository_by_id_correct():
    repo = ModelTypesRepository(mocked_path())

    model = repo.model_info_by_id(id='tpot')

    assert model.id == 'tpot'
    assert 'automl' in model.tags
コード例 #3
0
def _eval_strategy_for_task(model_type: str,
                            task_type_for_data: TaskTypesEnum):
    models_repo = ModelTypesRepository()
    model_info = models_repo.model_info_by_id(model_type)

    task_type_for_model = task_type_for_data
    task_types_acceptable_for_model = model_info.task_type

    # if the model can't be used directly for the task type from data
    if task_type_for_model not in task_types_acceptable_for_model:
        # search the supplementary task types, that can be included in chain which solves original task
        globally_compatible_task_types = compatible_task_types(
            task_type_for_model)
        compatible_task_types_acceptable_for_model = list(
            set(task_types_acceptable_for_model).intersection(
                set(globally_compatible_task_types)))
        if len(compatible_task_types_acceptable_for_model) == 0:
            raise ValueError(
                f'Model {model_type} can not be used as a part of {task_type_for_model}.'
            )
        task_type_for_model = compatible_task_types_acceptable_for_model[0]

    strategy = models_repo.model_info_by_id(model_type).current_strategy(
        task_type_for_model)
    return strategy
コード例 #4
0
def test_search_in_repository_by_model_id_correct(mock_init_tree):
    repo = ModelTypesRepository()

    model_names, _ = repo.search_models(desired_ids=[ModelGroupsIdsEnum.all])

    assert ModelTypesIdsEnum.xgboost in model_names
    assert len(model_names) == 3
コード例 #5
0
ファイル: test_repository.py プロジェクト: timur9831/FEDOT
def test_search_in_repository_by_tag_and_metainfo_correct():
    repo = ModelTypesRepository(mocked_path())

    model_names, _ = repo.suitable_model(task_type=TaskTypesEnum.regression,
                                         tags=['ml'])

    assert 'linear' in model_names
    assert len(model_names) == 3
コード例 #6
0
ファイル: model.py プロジェクト: ITMO-NSS-team/nas-fedot
def _eval_strategy_for_task(model_type: ModelTypesIdsEnum,
                            task_type_for_data: TaskTypesEnum):
    strategies_for_tasks = {
        MachineLearningTasksEnum.classification:
        [SkLearnClassificationStrategy, AutoMLEvaluationStrategy],
        MachineLearningTasksEnum.regression: [SkLearnRegressionStrategy],
        MachineLearningTasksEnum.auto_regression:
        [StatsModelsAutoRegressionStrategy],
        MachineLearningTasksEnum.clustering: [SkLearnClusteringStrategy]
    }

    models_for_strategies = {
        SkLearnClassificationStrategy: [
            ModelTypesIdsEnum.xgboost, ModelTypesIdsEnum.knn,
            ModelTypesIdsEnum.logit, ModelTypesIdsEnum.dt,
            ModelTypesIdsEnum.rf, ModelTypesIdsEnum.mlp, ModelTypesIdsEnum.lda,
            ModelTypesIdsEnum.qda
        ],
        AutoMLEvaluationStrategy:
        [ModelTypesIdsEnum.tpot, ModelTypesIdsEnum.h2o],
        SkLearnClusteringStrategy: [ModelTypesIdsEnum.kmeans],
        SkLearnRegressionStrategy: [
            ModelTypesIdsEnum.linear, ModelTypesIdsEnum.ridge,
            ModelTypesIdsEnum.lasso
        ],
        StatsModelsAutoRegressionStrategy:
        [ModelTypesIdsEnum.ar, ModelTypesIdsEnum.arima]
    }

    models_repo = ModelTypesRepository()
    _, model_info = models_repo.search_models(desired_ids=[model_type])

    task_type_for_model = task_type_for_data
    task_types_acceptable_for_model = model_info[0].task_type

    # if the model can't be used directly for the task type from data
    if task_type_for_model not in task_types_acceptable_for_model:
        # search the supplementary task types, that can be included in chain which solves original task
        globally_compatible_task_types = compatible_task_types(
            task_type_for_model)
        compatible_task_types_acceptable_for_model = list(
            set(task_types_acceptable_for_model).intersection(
                set(globally_compatible_task_types)))
        if len(compatible_task_types_acceptable_for_model) == 0:
            raise ValueError(
                f'Model {model_type} can not be used as a part of {task_type_for_model}.'
            )
        task_type_for_model = compatible_task_types_acceptable_for_model[0]

    eval_strategies = strategies_for_tasks[task_type_for_model]

    for strategy in eval_strategies:
        if model_type in models_for_strategies[strategy]:
            eval_strategy = strategy(model_type)
            return eval_strategy

    return None
コード例 #7
0
def test_search_in_repository_by_metainfo_correct(mock_init_tree):
    repo = ModelTypesRepository()

    model_names, _ = repo.search_models(desired_metainfo=ModelMetaInfoTemplate(
        input_type=NumericalDataTypesEnum.table,
        output_type=CategoricalDataTypesEnum.vector,
        task_type=MachineLearningTasksEnum.classification))

    assert ModelTypesIdsEnum.knn in model_names
    assert len(model_names) == 3
コード例 #8
0
def test_search_in_repository_by_id_and_metainfo_correct(mock_init_tree):
    repo = ModelTypesRepository()

    model_names, _ = repo.search_models(
        desired_ids=[ModelGroupsIdsEnum.ml],
        desired_metainfo=ModelMetaInfoTemplate(
            task_type=MachineLearningTasksEnum.regression))

    assert ModelTypesIdsEnum.xgboost in model_names
    assert len(model_names) == 1
コード例 #9
0
def test_gp_composer_quality(data_fixture, request):
    random.seed(1)
    data = request.getfixturevalue(data_fixture)
    dataset_to_compose = data
    dataset_to_validate = data
    models_repo = ModelTypesRepository()
    available_model_types, _ = models_repo.search_models(
        desired_metainfo=ModelMetaInfoTemplate(
            input_type=NumericalDataTypesEnum.table,
            output_type=CategoricalDataTypesEnum.vector,
            task_type=MachineLearningTasksEnum.classification,
            can_be_initial=True,
            can_be_secondary=True))
    metric_function = MetricsRepository().metric_by_id(
        ClassificationMetricsEnum.ROCAUC)

    baseline = baseline_chain()
    baseline.fit_from_scratch(input_data=dataset_to_compose)

    predict_baseline = baseline.predict(dataset_to_validate).predict
    dataset_to_compose.target = np.array(
        [int(round(i)) for i in predict_baseline])

    composer_requirements = GPComposerRequirements(
        primary=available_model_types,
        secondary=available_model_types,
        max_arity=2,
        max_depth=3,
        pop_size=5,
        num_of_generations=5,
        crossover_prob=0.8,
        mutation_prob=0.8)

    # Create GP-based composer
    composer = GPComposer()
    composed_chain = composer.compose_chain(
        data=dataset_to_compose,
        initial_chain=None,
        composer_requirements=composer_requirements,
        metrics=metric_function)
    composed_chain.fit_from_scratch(input_data=dataset_to_compose)

    predict_composed = composed_chain.predict(dataset_to_validate).predict

    roc_auc_chain_created_by_hand = roc_auc(y_true=dataset_to_validate.target,
                                            y_score=predict_baseline)
    roc_auc_chain_evo_alg = roc_auc(y_true=dataset_to_validate.target,
                                    y_score=predict_composed)
    print("model created by hand prediction:", roc_auc_chain_created_by_hand)
    print("gp composed model prediction:", roc_auc_chain_evo_alg)

    assert composed_chain == baseline or composed_chain != baseline and abs(
        roc_auc_chain_created_by_hand - roc_auc_chain_evo_alg) < 0.01
コード例 #10
0
def run_credit_scoring_problem(train_file_path, test_file_path,
                               max_lead_time: datetime.timedelta = datetime.timedelta(minutes=5),
                               gp_optimiser_params: Optional[GPChainOptimiserParameters] = None, pop_size=None,
                               generations=None):
    dataset_to_compose = InputData.from_csv(train_file_path)
    dataset_to_validate = InputData.from_csv(test_file_path)

    models_repo = ModelTypesRepository()
    available_model_types, _ = models_repo.search_models(
        desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table,
                                               output_type=CategoricalDataTypesEnum.vector,
                                               task_type=[MachineLearningTasksEnum.classification,
                                                          MachineLearningTasksEnum.clustering],
                                               can_be_initial=True,
                                               can_be_secondary=True))

    # the choice of the metric for the chain quality assessment during composition
    metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC)

    if gp_optimiser_params:
        optimiser_parameters = gp_optimiser_params
    else:
        selection_types = [SelectionTypesEnum.tournament]
        crossover_types = [CrossoverTypesEnum.subtree]
        mutation_types = [MutationTypesEnum.simple, MutationTypesEnum.growth, MutationTypesEnum.reduce]
        regularization_type = RegularizationTypesEnum.decremental
        optimiser_parameters = GPChainOptimiserParameters(selection_types=selection_types,
                                                          crossover_types=crossover_types,
                                                          mutation_types=mutation_types,
                                                          regularization_type=regularization_type)
    composer_requirements = GPComposerRequirements(
        primary=available_model_types,
        secondary=available_model_types, max_arity=4,
        max_depth=3, pop_size=pop_size, num_of_generations=generations,
        crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time)

    # Create GP-based composer
    composer = GPComposer()

    chain_evo_composed = composer.compose_chain(data=dataset_to_compose,
                                                initial_chain=None,
                                                composer_requirements=composer_requirements,
                                                metrics=metric_function, optimiser_parameters=optimiser_parameters,
                                                is_visualise=False)
    chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True)

    roc_on_valid_evo_composed = calculate_validation_metric(chain_evo_composed, dataset_to_validate)

    print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}')

    return roc_on_valid_evo_composed, chain_evo_composed, composer
コード例 #11
0
ファイル: test_composer.py プロジェクト: timur9831/FEDOT
def test_random_composer(data_fixture, request):
    random.seed(1)
    np.random.seed(1)
    data = request.getfixturevalue(data_fixture)
    dataset_to_compose = data
    dataset_to_validate = data

    available_model_types, _ = ModelTypesRepository().suitable_model(
        task_type=TaskTypesEnum.classification)

    metric_function = MetricsRepository().metric_by_id(
        ClassificationMetricsEnum.ROCAUC)

    random_composer = RandomSearchComposer(iter_num=1)
    req = ComposerRequirements(primary=available_model_types,
                               secondary=available_model_types)
    chain_random_composed = random_composer.compose_chain(
        data=dataset_to_compose,
        initial_chain=None,
        composer_requirements=req,
        metrics=metric_function)
    chain_random_composed.fit_from_scratch(input_data=dataset_to_compose)

    predicted_random_composed = chain_random_composed.predict(
        dataset_to_validate)

    roc_on_valid_random_composed = roc_auc(
        y_true=dataset_to_validate.target,
        y_score=predicted_random_composed.predict)

    assert roc_on_valid_random_composed > 0.6
コード例 #12
0
ファイル: test_composer.py プロジェクト: timur9831/FEDOT
def test_gp_composer_build_chain_correct(data_fixture, request):
    random.seed(1)
    np.random.seed(1)
    data = request.getfixturevalue(data_fixture)
    dataset_to_compose = data
    dataset_to_validate = data

    available_model_types, _ = ModelTypesRepository().suitable_model(
        task_type=TaskTypesEnum.classification)

    metric_function = MetricsRepository().metric_by_id(
        ClassificationMetricsEnum.ROCAUC)

    gp_composer = GPComposer()
    req = GPComposerRequirements(primary=available_model_types,
                                 secondary=available_model_types,
                                 max_arity=2,
                                 max_depth=2,
                                 pop_size=2,
                                 num_of_generations=1,
                                 crossover_prob=0.4,
                                 mutation_prob=0.5)
    chain_gp_composed = gp_composer.compose_chain(data=dataset_to_compose,
                                                  initial_chain=None,
                                                  composer_requirements=req,
                                                  metrics=metric_function)

    chain_gp_composed.fit_from_scratch(input_data=dataset_to_compose)
    predicted_gp_composed = chain_gp_composed.predict(dataset_to_validate)

    roc_on_valid_gp_composed = roc_auc(y_true=dataset_to_validate.target,
                                       y_score=predicted_gp_composed.predict)

    assert roc_on_valid_gp_composed > 0.6
コード例 #13
0
def run_credit_scoring_problem(
        train_file_path,
        test_file_path,
        max_lead_time: datetime.timedelta = datetime.timedelta(minutes=5),
        is_visualise=False):
    task = Task(TaskTypesEnum.classification)
    dataset_to_compose = InputData.from_csv(train_file_path, task=task)
    dataset_to_validate = InputData.from_csv(test_file_path, task=task)

    # the search of the models provided by the framework that can be used as nodes in a chain for the selected task
    available_model_types, _ = ModelTypesRepository().suitable_model(
        task_type=task.task_type)

    # the choice of the metric for the chain quality assessment during composition
    metric_function = MetricsRepository().metric_by_id(
        ClassificationMetricsEnum.ROCAUC_penalty)

    # the choice and initialisation of the GP search
    composer_requirements = GPComposerRequirements(
        primary=available_model_types,
        secondary=available_model_types,
        max_arity=3,
        max_depth=3,
        pop_size=20,
        num_of_generations=20,
        crossover_prob=0.8,
        mutation_prob=0.8,
        max_lead_time=max_lead_time)

    # Create GP-based composer
    composer = GPComposer()

    # the optimal chain generation by composition - the most time-consuming task
    chain_evo_composed = composer.compose_chain(
        data=dataset_to_compose,
        initial_chain=None,
        composer_requirements=composer_requirements,
        metrics=metric_function,
        is_visualise=False)

    chain_evo_composed.fine_tune_primary_nodes(input_data=dataset_to_compose,
                                               iterations=50)

    chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True)

    if is_visualise:
        ComposerVisualiser.visualise(chain_evo_composed)

    # the quality assessment for the obtained composite models
    roc_on_valid_evo_composed = calculate_validation_metric(
        chain_evo_composed, dataset_to_validate)

    print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}')

    return roc_on_valid_evo_composed
コード例 #14
0
def test_gp_composer_build_chain_correct(data_fixture, request):
    random.seed(1)
    np.random.seed(1)
    data = request.getfixturevalue(data_fixture)
    dataset_to_compose = data
    dataset_to_validate = data

    models_repo = ModelTypesRepository()

    available_model_types, _ = models_repo.search_models(
        desired_metainfo=ModelMetaInfoTemplate(
            input_type=NumericalDataTypesEnum.table,
            output_type=CategoricalDataTypesEnum.vector,
            task_type=MachineLearningTasksEnum.classification,
            can_be_initial=True,
            can_be_secondary=True))
    metric_function = MetricsRepository().metric_by_id(
        ClassificationMetricsEnum.ROCAUC)

    gp_composer = GPComposer()
    req = GPComposerRequirements(primary=available_model_types,
                                 secondary=available_model_types,
                                 max_arity=2,
                                 max_depth=2,
                                 pop_size=2,
                                 num_of_generations=1,
                                 crossover_prob=0.4,
                                 mutation_prob=0.5)
    chain_gp_composed = gp_composer.compose_chain(data=dataset_to_compose,
                                                  initial_chain=None,
                                                  composer_requirements=req,
                                                  metrics=metric_function)

    chain_gp_composed.fit_from_scratch(input_data=dataset_to_compose)
    predicted_gp_composed = chain_gp_composed.predict(dataset_to_validate)

    roc_on_valid_gp_composed = roc_auc(y_true=dataset_to_validate.target,
                                       y_score=predicted_gp_composed.predict)

    assert roc_on_valid_gp_composed > 0.6
コード例 #15
0
def test_random_composer(data_fixture, request):
    random.seed(1)
    np.random.seed(1)
    data = request.getfixturevalue(data_fixture)
    dataset_to_compose = data
    dataset_to_validate = data

    models_repo = ModelTypesRepository()
    available_model_types, _ = models_repo.search_models(
        desired_metainfo=ModelMetaInfoTemplate(
            input_type=NumericalDataTypesEnum.table,
            output_type=CategoricalDataTypesEnum.vector,
            task_type=MachineLearningTasksEnum.classification,
            can_be_initial=True,
            can_be_secondary=True))

    metric_function = MetricsRepository().metric_by_id(
        ClassificationMetricsEnum.ROCAUC)

    random_composer = RandomSearchComposer(iter_num=1)
    req = ComposerRequirements(primary=available_model_types,
                               secondary=available_model_types)
    chain_random_composed = random_composer.compose_chain(
        data=dataset_to_compose,
        initial_chain=None,
        composer_requirements=req,
        metrics=metric_function)
    chain_random_composed.fit_from_scratch(input_data=dataset_to_compose)

    predicted_random_composed = chain_random_composed.predict(
        dataset_to_validate)

    roc_on_valid_random_composed = roc_auc(
        y_true=dataset_to_validate.target,
        y_score=predicted_random_composed.predict)

    assert roc_on_valid_random_composed > 0.6
コード例 #16
0
ファイル: test_repository.py プロジェクト: timur9831/FEDOT
def test_search_in_repository_by_tag_correct():
    repo = ModelTypesRepository(mocked_path())

    model_names, _ = repo.models_with_tag(tags=['automl'])
    assert 'tpot' in model_names
    assert len(model_names) == 1

    model_names, _ = repo.models_with_tag(tags=['simple', 'linear'],
                                          is_full_match=True)
    assert {'linear', 'logit', 'lasso', 'ridge'}.issubset(model_names)
    assert len(model_names) == 4

    model_names, _ = repo.models_with_tag(tags=['simple', 'linear'])
    assert {'linear', 'logit', 'knn', 'lda', 'lasso',
            'ridge'}.issubset(model_names)
    assert len(model_names) == 6

    model_names, _ = repo.models_with_tag(tags=['non_real_tag'])
    assert len(model_names) == 0
コード例 #17
0
 def acceptable_task_types(self):
     model_info = ModelTypesRepository().model_info_by_id(self.model_type)
     return model_info.task_type
コード例 #18
0
 def metadata(self) -> ModelMetaInfo:
     model_info = ModelTypesRepository().model_info_by_id(self.model_type)
     if not model_info:
         raise ValueError(f'Model {self.model_type} not found')
     return model_info
コード例 #19
0
                                         return_df: bool = False):
    df = pd.read_excel(file_path)
    train, test = split_data(df)
    file_dir_name = file_path.replace('.', '/').split('/')[-2]
    file_csv_name = f'{file_dir_name}.csv'
    directory_names = ['examples', 'data', file_dir_name]
    ensure_directory_exists(directory_names)
    if return_df:
        path = os.path.join(directory_names[0], directory_names[1],
                            directory_names[2], file_csv_name)
        full_file_path = os.path.join(str(project_root()), path)
        save_file_to_csv(df, full_file_path)
        return df, full_file_path
    else:
        full_train_file_path, full_test_file_path = get_split_data_paths(
            directory_names)
        save_file_to_csv(train, full_train_file_path)
        save_file_to_csv(train, full_test_file_path)
        return full_train_file_path, full_test_file_path


def print_models_info(repository: ModelTypesRepository,
                      task=TaskTypesEnum.classification):
    for model in repository.models:
        print(f'{model.id}, {model.current_strategy(task)}, '
              f'{model.current_strategy(task)(model.id).implementation_info}')


if __name__ == '__main__':
    print_models_info(ModelTypesRepository())
コード例 #20
0
ファイル: b_fedot.py プロジェクト: timur9831/FEDOT
def run_fedot(params: 'ExecutionParams'):
    train_file_path = params.train_file
    test_file_path = params.test_file
    case_label = params.case_label
    task_type = params.task

    if task_type == TaskTypesEnum.classification:
        metric = ClassificationMetricsEnum.ROCAUC
    elif task_type == TaskTypesEnum.regression:
        metric = RegressionMetricsEnum.RMSE
    else:
        raise NotImplementedError()

    task = Task(task_type)
    dataset_to_compose = InputData.from_csv(train_file_path, task=task)
    dataset_to_validate = InputData.from_csv(test_file_path, task=task)

    models_hyperparameters = get_models_hyperparameters()['FEDOT']
    cur_lead_time = models_hyperparameters['MAX_RUNTIME_MINS']

    saved_model_name = f'fedot_{case_label}_{task_type}_{cur_lead_time}_{metric}'
    loaded_model = load_fedot_model(saved_model_name)

    if not loaded_model:
        generations = models_hyperparameters['GENERATIONS']
        population_size = models_hyperparameters['POPULATION_SIZE']

        # the search of the models provided by the framework that can be used as nodes in a chain'
        models_repo = ModelTypesRepository()
        available_model_types, _ = models_repo.suitable_model(task.task_type)

        metric_function = MetricsRepository().metric_by_id(metric)

        composer_requirements = GPComposerRequirements(
            primary=available_model_types,
            secondary=available_model_types,
            max_arity=3,
            max_depth=3,
            pop_size=population_size,
            num_of_generations=generations,
            crossover_prob=0.8,
            mutation_prob=0.8,
            max_lead_time=datetime.timedelta(minutes=cur_lead_time))

        # Create GP-based composer
        composer = GPComposer()

        # the optimal chain generation by composition - the most time-consuming task
        chain_evo_composed = composer.compose_chain(
            data=dataset_to_compose,
            initial_chain=None,
            composer_requirements=composer_requirements,
            metrics=metric_function,
            is_visualise=False)
        chain_evo_composed.fine_tune_primary_nodes(
            input_data=dataset_to_compose, iterations=50)
        chain_evo_composed.fit(input_data=dataset_to_compose, verbose=False)
        save_fedot_model(chain_evo_composed, saved_model_name)
    else:
        chain_evo_composed = loaded_model

    evo_predicted = chain_evo_composed.predict(dataset_to_validate)

    return dataset_to_validate.target, evo_predicted.predict
コード例 #21
0
ファイル: test_repository.py プロジェクト: timur9831/FEDOT
def test_lazy_load():
    repo = ModelTypesRepository(mocked_path())
    repo_second = ModelTypesRepository()

    assert repo._repo == repo_second._repo
コード例 #22
0
problem_class = MachineLearningTasksEnum.auto_regression

# a dataset that will be used as a train and test set during composition
file_path_train = 'cases/data/ts/metocean_data_train.csv'
full_path_train = os.path.join(str(project_root()), file_path_train)
dataset_to_compose = InputData.from_csv(full_path_train,
                                        task_type=problem_class)

# a dataset for a final validation of the composed model
file_path_test = 'cases/data/ts/metocean_data_test.csv'
full_path_test = os.path.join(str(project_root()), file_path_test)
dataset_to_validate = InputData.from_csv(full_path_test,
                                         task_type=problem_class)

# the search of the models provided by the framework that can be used as nodes in a chain for the selected task
models_repo = ModelTypesRepository()
available_model_types, _ = models_repo.search_models(
    desired_metainfo=ModelMetaInfoTemplate(
        input_type=NumericalDataTypesEnum.table,
        output_type=CategoricalDataTypesEnum.vector,
        task_type=problem_class,
        can_be_initial=True,
        can_be_secondary=True))

# the choice of the metric for the chain quality assessment during composition
metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)

# the choice and initialisation

single_composer_requirements = ComposerRequirements(
    primary=[ModelTypesIdsEnum.ar], secondary=[])
コード例 #23
0
def run_credit_scoring_problem(train_file_path, test_file_path,
                               max_lead_time: datetime.timedelta = datetime.timedelta(minutes=20),
                               gp_optimiser_params: Optional[GPChainOptimiserParameters] = None):
    dataset_to_compose = InputData.from_csv(train_file_path)
    dataset_to_validate = InputData.from_csv(test_file_path)
    # the search of the models provided by the framework that can be used as nodes in a chain for the selected task
    models_repo = ModelTypesRepository()
    available_model_types, _ = models_repo.search_models(
        desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table,
                                               output_type=CategoricalDataTypesEnum.vector,
                                               task_type=[MachineLearningTasksEnum.classification,
                                                          MachineLearningTasksEnum.clustering],
                                               can_be_initial=True,
                                               can_be_secondary=True))

    # the choice of the metric for the chain quality assessment during composition
    metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC)

    if gp_optimiser_params:
        optimiser_parameters = gp_optimiser_params
    else:
        optimiser_parameters = GPChainOptimiserParameters(selection_types=[SelectionTypesEnum.tournament],
                                                          crossover_types=[CrossoverTypesEnum.subtree],
                                                          mutation_types=[MutationTypesEnum.growth],
                                                          regularization_type=RegularizationTypesEnum.decremental,
                                                          chain_generation_function=random_ml_chain,
                                                          crossover_types_dict=crossover_by_type,
                                                          mutation_types_dict=mutation_by_type)
    composer_requirements = GPComposerRequirements(
        primary=available_model_types,
        secondary=available_model_types, max_arity=4,
        max_depth=3, pop_size=5, num_of_generations=5,
        crossover_prob=0.8, mutation_prob=0.8, max_lead_time=max_lead_time)

    # Create GP-based composer
    composer = GPComposer()

    # the optimal chain generation by composition - the most time-consuming task
    chain_evo_composed = composer.compose_chain(data=dataset_to_compose,
                                                initial_chain=None,
                                                composer_requirements=composer_requirements,
                                                metrics=metric_function, optimiser_parameters=optimiser_parameters,
                                                is_visualise=False)
    chain_evo_composed.fit(input_data=dataset_to_compose, verbose=True)

    # the choice and initialisation of the dummy_composer
    dummy_composer = DummyComposer(DummyChainTypeEnum.hierarchical)

    chain_static = dummy_composer.compose_chain(data=dataset_to_compose,
                                                initial_chain=None,
                                                composer_requirements=composer_requirements,
                                                metrics=metric_function, is_visualise=True)
    chain_static.fit(input_data=dataset_to_compose, verbose=True)
    # the single-model variant of optimal chain
    single_composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.xgboost],
                                                        secondary=[])
    chain_single = DummyComposer(DummyChainTypeEnum.flat).compose_chain(data=dataset_to_compose,
                                                                        initial_chain=None,
                                                                        composer_requirements=single_composer_requirements,
                                                                        metrics=metric_function)
    chain_single.fit(input_data=dataset_to_compose, verbose=True)
    print("Composition finished")

    ComposerVisualiser.visualise(chain_static)
    ComposerVisualiser.visualise(chain_evo_composed)

    # the quality assessment for the obtained composite models
    roc_on_valid_static = calculate_validation_metric(chain_static, dataset_to_validate)
    roc_on_valid_single = calculate_validation_metric(chain_single, dataset_to_validate)
    roc_on_valid_evo_composed = calculate_validation_metric(chain_evo_composed, dataset_to_validate)

    print(f'Composed ROC AUC is {round(roc_on_valid_evo_composed, 3)}')
    print(f'Static ROC AUC is {round(roc_on_valid_static, 3)}')
    print(f'Single-model ROC AUC is {round(roc_on_valid_single, 3)}')

    return (roc_on_valid_evo_composed, chain_evo_composed), (chain_static, roc_on_valid_static), (
        chain_single, roc_on_valid_single)