Exemple #1
0
    def fit(self, pipeline_config, data_manager):
        if (data_manager.problem_type == ProblemType.FeatureRegression):
            autonet_type = AutoNetRegression
        elif (data_manager.problem_type == ProblemType.FeatureMultilabel):
            autonet_type = AutoNetMultilabel
        elif (data_manager.problem_type == ProblemType.FeatureClassification):
            autonet_type = AutoNetClassification
        else:
            raise ValueError('Problem type ' + str(data_manager.problem_type) + ' is not defined')

        autonet = autonet_type() if not pipeline_config["enable_ensemble"] else AutoNetEnsemble(autonet_type)
        test_logger = test_result if not pipeline_config["enable_ensemble"] else test_predictions_for_ensemble
        autonet.pipeline[autonet_nodes.LogFunctionsSelector.get_name()].add_log_function(
            test_logger.__name__, test_logger(autonet, data_manager.X_test, data_manager.Y_test))

        metrics = autonet.pipeline[autonet_nodes.MetricSelector.get_name()]

        return { 'autonet': autonet }
Exemple #2
0
    if any(cat_feats):
        autonet_config["categorical_features"] = cat_feats
    autonet_config["embeddings"] = ['none', 'learned']

    # Test logging
    autonet_config["additional_logs"] = [
        test_predictions_for_ensemble.__name__, test_result_ens.__name__
    ]

    # Initialize (ensemble)
    if args.ensemble_setting == "ensemble":
        print("Using ensembles!")
        ensemble_config = get_ensemble_config()
        autonet_config = {**autonet_config, **ensemble_config}
        autonet = AutoNetEnsemble(AutoNetClassification,
                                  config_preset="full_cs",
                                  **autonet_config)
    elif args.ensemble_setting == "normal":
        autonet = AutoNetClassification(config_preset="full_cs",
                                        **autonet_config)

    # Test logging cont.
    autonet.pipeline[LogFunctionsSelector.get_name()].add_log_function(
        name=test_predictions_for_ensemble.__name__,
        log_function=test_predictions_for_ensemble(autonet, X_test, y_test),
        loss_transform=False)
    autonet.pipeline[LogFunctionsSelector.get_name()].add_log_function(
        name=test_result_ens.__name__,
        log_function=test_result_ens(autonet, X_test, y_test))

    autonet.pipeline[BaselineTrainer.get_name()].add_test_data(X_test)
__author__ = "Max Dippel, Michael Burkart and Matthias Urban"
__version__ = "0.0.1"
__license__ = "BSD"

import os, sys
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
from autoPyTorch import AutoNetClassification, AutoNetEnsemble
from autoPyTorch.data_management.data_manager import DataManager

# Note: You can write your own datamanager! Call fit with respective train, valid data (numpy matrices)
dm = DataManager()
dm.generate_classification(num_classes=3, num_features=21, num_samples=1500)

# Note: every parameter has a default value, you do not have to specify anything. The given parameter allow a fast test.
autonet = AutoNetEnsemble(AutoNetClassification,
                          budget_type='epochs',
                          min_budget=1,
                          max_budget=9,
                          num_iterations=1,
                          log_level='debug')

res = autonet.fit(X_train=dm.X,
                  Y_train=dm.Y,
                  cross_validator="k_fold",
                  cross_validator_args={"n_splits": 3},
                  validation_split=0.2,
                  ensemble_only_consider_n_best=3)

print(res)
print("Score:", autonet.score(X_test=dm.X_train, Y_test=dm.Y_train))