Beispiel #1
0
def get_fit_dictionary(X, y, validator, backend):
    datamanager = TabularDataset(
        X=X,
        Y=y,
        validator=validator,
        X_test=X,
        Y_test=y,
    )

    info = datamanager.get_required_dataset_info()

    dataset_properties = datamanager.get_dataset_properties(
        get_dataset_requirements(info))
    fit_dictionary = {
        'X_train': datamanager.train_tensors[0],
        'y_train': datamanager.train_tensors[1],
        'train_indices': datamanager.splits[0][0],
        'val_indices': datamanager.splits[0][1],
        'dataset_properties': dataset_properties,
        'num_run': np.random.randint(50),
        'device': 'cpu',
        'budget_type': 'epochs',
        'epochs': 5,
        'torch_num_threads': 1,
        'early_stopping': 10,
        'working_dir': '/tmp',
        'use_tensorboard_logger': True,
        'metrics_during_training': True,
        'split_id': 0,
        'backend': backend,
        'logger_port': logging.handlers.DEFAULT_TCP_LOGGING_PORT,
    }
    backend.save_datamanager(datamanager)
    return fit_dictionary
Beispiel #2
0
def get_500_classes_datamanager(resampling_strategy=HoldoutValTypes.holdout_validation):
    weights = ([0.002] * 475) + ([0.001] * 25)
    X, Y = sklearn.datasets.make_classification(n_samples=1000,
                                                n_features=20,
                                                n_classes=500,
                                                n_clusters_per_class=1,
                                                n_informative=15,
                                                n_redundant=5,
                                                n_repeated=0,
                                                weights=weights,
                                                flip_y=0,
                                                class_sep=1.0,
                                                hypercube=True,
                                                shift=None,
                                                scale=1.0,
                                                shuffle=True,
                                                random_state=1)

    validator = TabularInputValidator(is_classification=True).fit(X, Y)
    dataset = TabularDataset(
        X=X[:700], Y=Y[:700],
        X_test=X[700:], Y_test=Y[710:],
        validator=validator,
        resampling_strategy=resampling_strategy
    )

    return dataset
Beispiel #3
0
    def test_get_dataset_properties(self):
        # Get data to train
        fit_dictionary = get_data_to_train()

        # Build a repository with random fitted models
        try:
            backend = create(
                temporary_directory='/tmp/autoPyTorch_ensemble_test_tmp',
                output_directory='/tmp/autoPyTorch_ensemble_test_out',
                delete_tmp_folder_after_terminate=False)
        except Exception:
            self.assertRaises(FileExistsError)
            return unittest.skip("File already exists")

        fit_dictionary['backend'] = backend

        # Create the directory structure
        backend._make_internals_directory()

        # Create a datamanager for this toy problem
        datamanager = TabularDataset(
            X=fit_dictionary['X_train'],
            Y=fit_dictionary['y_train'],
            X_test=fit_dictionary['X_test'],
            Y_test=fit_dictionary['y_test'],
        )
        backend.save_datamanager(datamanager)

        datamanager = backend.load_datamanager()
        info = {
            'task_type': datamanager.task_type,
            'output_type': datamanager.output_type,
            'issparse': datamanager.issparse,
            'numerical_columns': datamanager.numerical_columns,
            'categorical_columns': datamanager.categorical_columns
        }
        dataset_requirements = get_dataset_requirements(info)

        dataset_properties = datamanager.get_dataset_properties(
            dataset_requirements)

        self.assertIsInstance(dataset_properties, dict)
        for dataset_requirement in dataset_requirements:
            self.assertIn(dataset_requirement.name, dataset_properties.keys())
            self.assertIsInstance(dataset_properties[dataset_requirement.name],
                                  dataset_requirement.supported_types)
Beispiel #4
0
    def runTest(self):
        matrix = np.array([(0, 0.1, 1), (1, np.nan, 3)], dtype='f4, f4, i4')
        target_df = pd.Series([1, 2])
        ds = TabularDataset(matrix, target_df)
        self.assertEqual(
            ds.data_types,
            [DataTypes.Canonical, DataTypes.Float, DataTypes.Canonical])
        self.assertEqual(set(ds.itovs[2]), {np.nan, 1, 3})

        self.assertEqual(ds.vtois[0][1], 2)
        self.assertEqual(ds.vtois[0][np.nan], 0)
        self.assertEqual(ds.vtois[0][pd._libs.NaT], 0)
        self.assertEqual(ds.vtois[0][pd._libs.missing.NAType()], 0)
        self.assertTrue((ds.nan_mask == np.array([[0, 0, 0], [0, 1, 0]],
                                                 dtype=np.bool)).all())
Beispiel #5
0
def get_regression_datamanager(resampling_strategy=HoldoutValTypes.holdout_validation):
    X_train, Y_train, X_test, Y_test = get_dataset('boston')
    indices = list(range(X_train.shape[0]))
    np.random.seed(1)
    np.random.shuffle(indices)
    X_train = X_train[indices]
    Y_train = Y_train[indices]

    validator = TabularInputValidator(is_classification=True).fit(X_train, Y_train)
    dataset = TabularDataset(
        X=X_train, Y=Y_train,
        X_test=X_test, Y_test=Y_test,
        validator=validator,
        resampling_strategy=resampling_strategy
    )
    return dataset
Beispiel #6
0
def get_abalone_datamanager(resampling_strategy=HoldoutValTypes.holdout_validation):
    # https://www.openml.org/d/183
    X, y = sklearn.datasets.fetch_openml(data_id=183, return_X_y=True, as_frame=False)
    y = preprocessing.LabelEncoder().fit_transform(y)
    X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
        X, y, random_state=1
    )

    validator = TabularInputValidator(is_classification=True).fit(X_train, y_train)
    dataset = TabularDataset(
        X=X_train, Y=y_train,
        validator=validator,
        X_test=X_test, Y_test=y_test,
        resampling_strategy=resampling_strategy
    )
    return dataset
Beispiel #7
0
    def runTest(self):
        df = pd.DataFrame([['a', 0.1, 1], ['b', 0.2, np.nan]])
        target_df = pd.Series([1, 2])
        ds = TabularDataset(df, target_df)
        self.assertEqual(
            ds.data_types,
            [DataTypes.String, DataTypes.Float, DataTypes.Canonical])
        self.assertEqual(set(ds.itovs[2]), {np.nan, 1})
        self.assertEqual(set(ds.itovs[0]), {np.nan, 'a', 'b'})

        self.assertEqual(ds.vtois[0]['a'], 1)
        self.assertEqual(ds.vtois[0][np.nan], 0)
        self.assertEqual(ds.vtois[0][pd._libs.NaT], 0)
        self.assertEqual(ds.vtois[0][pd._libs.missing.NAType()], 0)
        self.assertTrue((ds.nan_mask == np.array([[0, 0, 0], [0, 0, 1]],
                                                 dtype=np.bool)).all())
Beispiel #8
0
    def setUp(self):
        self.num_features = 4
        self.num_classes = 2
        self.X, self.y = make_classification(n_samples=200,
                                             n_features=self.num_features,
                                             n_informative=3,
                                             n_redundant=1,
                                             n_repeated=0,
                                             n_classes=self.num_classes,
                                             n_clusters_per_class=2,
                                             shuffle=True,
                                             random_state=0)
        self.dataset_properties = {
            'task_type': 'tabular_classification',
            'output_type': 'binary',
            'numerical_columns': list(range(4)),
            'categorical_columns': [],
        }

        # Create run dir
        tmp_dir = '/tmp/autoPyTorch_ensemble_test_tmp'
        if os.path.exists(tmp_dir):
            shutil.rmtree(tmp_dir)
        output_dir = '/tmp/autoPyTorch_ensemble_test_out'
        if os.path.exists(output_dir):
            shutil.rmtree(output_dir)
        self.backend = create(temporary_directory=tmp_dir,
                              output_directory=output_dir,
                              delete_tmp_folder_after_terminate=False)

        # Create the directory structure
        self.backend._make_internals_directory()

        # Create a datamanager for this toy problem
        datamanager = TabularDataset(
            X=self.X,
            Y=self.y,
            X_test=self.X,
            Y_test=self.y,
        )
        self.backend.save_datamanager(datamanager)
Beispiel #9
0
categorical_columns = ['A1', 'A4', 'A5', 'A6', 'A8', 'A9', 'A11', 'A12']
numerical_columns = ['A2', 'A3', 'A7', 'A10', 'A13', 'A14']
categories = [np.unique(X[a]) for a in categorical_columns]

# Create a proof of concept pipeline!
dataset_properties = {
    'task_type': 'tabular_classification',
    'categorical_columns': categorical_columns,
    'numerical_columns': numerical_columns,
    'output_type': output_type,
}

# Save data via backend to fit the pipeline
datamanager = TabularDataset(
    X=X_train,
    Y=y_train,
    X_test=X_test,
    Y_test=y_test,
)

backend = create(
    temporary_directory='./tmp/autoPyTorch_tabular_classification_tmp',
    output_directory='./tmp/autoPyTorch_tabular_classification_out',
    delete_tmp_folder_after_terminate=False)
backend.save_datamanager(datamanager)

pipeline = TabularClassificationPipeline(dataset_properties=dataset_properties)

# Create a fit dictionary
fit_dictionary = {
    'categorical_columns': categorical_columns,
    'numerical_columns': numerical_columns,
Beispiel #10
0
    def search(
        self,
        optimize_metric: str,
        X_train: Optional[Union[List, pd.DataFrame, np.ndarray]] = None,
        y_train: Optional[Union[List, pd.DataFrame, np.ndarray]] = None,
        X_test: Optional[Union[List, pd.DataFrame, np.ndarray]] = None,
        y_test: Optional[Union[List, pd.DataFrame, np.ndarray]] = None,
        dataset_name: Optional[str] = None,
        budget_type: str = 'epochs',
        min_budget: int = 5,
        max_budget: int = 50,
        total_walltime_limit: int = 100,
        func_eval_time_limit_secs: Optional[int] = None,
        enable_traditional_pipeline: bool = True,
        memory_limit: Optional[int] = 4096,
        smac_scenario_args: Optional[Dict[str, Any]] = None,
        get_smac_object_callback: Optional[Callable] = None,
        all_supported_metrics: bool = True,
        precision: int = 32,
        disable_file_output: List = [],
        load_models: bool = True,
        portfolio_selection: Optional[str] = None,
    ) -> 'BaseTask':
        """
        Search for the best pipeline configuration for the given dataset.

        Fit both optimizes the machine learning models and builds an ensemble out of them.
        To disable ensembling, set ensemble_size==0.
        using the optimizer.

        Args:
            X_train, y_train, X_test, y_test: Union[np.ndarray, List, pd.DataFrame]
                A pair of features (X_train) and targets (y_train) used to fit a
                pipeline. Additionally, a holdout of this pairs (X_test, y_test) can
                be provided to track the generalization performance of each stage.
            optimize_metric (str):
                name of the metric that is used to evaluate a pipeline.
            budget_type (str):
                Type of budget to be used when fitting the pipeline.
                It can be one of:

                + `epochs`: The training of each pipeline will be terminated after
                    a number of epochs have passed. This number of epochs is determined by the
                    budget argument of this method.
                + `runtime`: The training of each pipeline will be terminated after
                    a number of seconds have passed. This number of seconds is determined by the
                    budget argument of this method. The overall fitting time of a pipeline is
                    controlled by func_eval_time_limit_secs. 'runtime' only controls the allocated
                    time to train a pipeline, but it does not consider the overall time it takes
                    to create a pipeline (data loading and preprocessing, other i/o operations, etc.).
                    budget_type will determine the units of min_budget/max_budget. If budget_type=='epochs'
                    is used, min_budget will refer to epochs whereas if budget_type=='runtime' then
                    min_budget will refer to seconds.
            min_budget (int):
                Auto-PyTorch uses `Hyperband <https://arxiv.org/abs/1603.06560>`_ to
                trade-off resources between running many pipelines at min_budget and
                running the top performing pipelines on max_budget.
                min_budget states the minimum resource allocation a pipeline should have
                so that we can compare and quickly discard bad performing models.
                For example, if the budget_type is epochs, and min_budget=5, then we will
                run every pipeline to a minimum of 5 epochs before performance comparison.
            max_budget (int):
                Auto-PyTorch uses `Hyperband <https://arxiv.org/abs/1603.06560>`_ to
                trade-off resources between running many pipelines at min_budget and
                running the top performing pipelines on max_budget.
                max_budget states the maximum resource allocation a pipeline is going to
                be ran. For example, if the budget_type is epochs, and max_budget=50,
                then the pipeline training will be terminated after 50 epochs.
            total_walltime_limit (int: default=100):
                Time limit in seconds for the search of appropriate models.
                By increasing this value, autopytorch has a higher
                chance of finding better models.
            func_eval_time_limit_secs (Optional[int]):
                Time limit for a single call to the machine learning model.
                Model fitting will be terminated if the machine
                learning algorithm runs over the time limit. Set
                this value high enough so that typical machine
                learning algorithms can be fit on the training
                data.
                When set to None, this time will automatically be set to
                total_walltime_limit // 2 to allow enough time to fit
                at least 2 individual machine learning algorithms.
                Set to np.inf in case no time limit is desired.
            enable_traditional_pipeline (bool: default=True):
                We fit traditional machine learning algorithms
                (LightGBM, CatBoost, RandomForest, ExtraTrees, KNN, SVM)
                prior building PyTorch Neural Networks. You can disable this
                feature by turning this flag to False. All machine learning
                algorithms that are fitted during search() are considered for
                ensemble building.
            memory_limit (Optional[int]: default=4096):
                Memory limit in MB for the machine learning algorithm.
                Autopytorch will stop fitting the machine learning algorithm
                if it tries to allocate more than memory_limit MB. If None
                is provided, no memory limit is set. In case of multi-processing,
                memory_limit will be per job. This memory limit also applies to
                the ensemble creation process.
            smac_scenario_args (Optional[Dict]):
                Additional arguments inserted into the scenario of SMAC. See the
                `SMAC documentation <https://automl.github.io/SMAC3/master/options.html?highlight=scenario#scenario>`_
                for a list of available arguments.
            get_smac_object_callback (Optional[Callable]):
                Callback function to create an object of class
                `smac.optimizer.smbo.SMBO <https://automl.github.io/SMAC3/master/apidoc/smac.optimizer.smbo.html>`_.
                The function must accept the arguments scenario_dict,
                instances, num_params, runhistory, seed and ta. This is
                an advanced feature. Use only if you are familiar with
                `SMAC <https://automl.github.io/SMAC3/master/index.html>`_.
            tae_func (Optional[Callable]):
                TargetAlgorithm to be optimised. If None, `eval_function`
                available in autoPyTorch/evaluation/train_evaluator is used.
                Must be child class of AbstractEvaluator.
            all_supported_metrics (bool: default=True):
                If True, all metrics supporting current task will be calculated
                for each pipeline and results will be available via cv_results
            precision (int: default=32):
                Numeric precision used when loading ensemble data.
                Can be either '16', '32' or '64'.
            disable_file_output (Union[bool, List]):
                If True, disable model and prediction output.
                Can also be used as a list to pass more fine-grained
                information on what to save. Allowed elements in the list are:

                + `y_optimization`:
                    do not save the predictions for the optimization set,
                    which would later on be used to build an ensemble. Note that SMAC
                    optimizes a metric evaluated on the optimization set.
                + `pipeline`:
                    do not save any individual pipeline files
                + `pipelines`:
                    In case of cross validation, disables saving the joint model of the
                    pipelines fit on each fold.
                + `y_test`:
                    do not save the predictions for the test set.
            load_models (bool: default=True):
                Whether to load the models after fitting AutoPyTorch.
            portfolio_selection (Optional[str]):
                This argument controls the initial configurations that
                AutoPyTorch uses to warm start SMAC for hyperparameter
                optimization. By default, no warm-starting happens.
                The user can provide a path to a json file containing
                configurations, similar to (...herepathtogreedy...).
                Additionally, the keyword 'greedy' is supported,
                which would use the default portfolio from
                `AutoPyTorch Tabular <https://arxiv.org/abs/2006.13799>`_.

        Returns:
            self

        """
        if dataset_name is None:
            dataset_name = str(uuid.uuid1(clock_seq=os.getpid()))

        # we have to create a logger for at this point for the validator
        self._logger = self._get_logger(dataset_name)

        # Create a validator object to make sure that the data provided by
        # the user matches the autopytorch requirements
        self.InputValidator = TabularInputValidator(
            is_classification=True,
            logger_port=self._logger_port,
        )

        # Fit a input validator to check the provided data
        # Also, an encoder is fit to both train and test data,
        # to prevent unseen categories during inference
        self.InputValidator.fit(X_train=X_train,
                                y_train=y_train,
                                X_test=X_test,
                                y_test=y_test)

        self.dataset = TabularDataset(
            X=X_train,
            Y=y_train,
            X_test=X_test,
            Y_test=y_test,
            validator=self.InputValidator,
            dataset_name=dataset_name,
            resampling_strategy=self.resampling_strategy,
            resampling_strategy_args=self.resampling_strategy_args,
        )

        return self._search(
            dataset=self.dataset,
            optimize_metric=optimize_metric,
            budget_type=budget_type,
            min_budget=min_budget,
            max_budget=max_budget,
            total_walltime_limit=total_walltime_limit,
            func_eval_time_limit_secs=func_eval_time_limit_secs,
            enable_traditional_pipeline=enable_traditional_pipeline,
            memory_limit=memory_limit,
            smac_scenario_args=smac_scenario_args,
            get_smac_object_callback=get_smac_object_callback,
            all_supported_metrics=all_supported_metrics,
            precision=precision,
            disable_file_output=disable_file_output,
            load_models=load_models,
            portfolio_selection=portfolio_selection,
        )
Beispiel #11
0
def test_not_supported():
    with pytest.raises(ValueError,
                       match=r".*A feature validator is required to build.*"):
        TabularDataset(np.ones(10), np.ones(10))
Beispiel #12
0
    # Get data to train
    fit_dictionary = get_data_to_train()

    # Build a repository with random fitted models
    backend = create(temporary_directory='./tmp/autoPyTorch_ensemble_test_tmp',
                     output_directory='./tmp/autoPyTorch_ensemble_test_out',
                     delete_tmp_folder_after_terminate=False)
    fit_dictionary['backend'] = backend

    # Create the directory structure
    backend._make_internals_directory()

    # Create a datamanager for this toy problem
    datamanager = TabularDataset(
        X=fit_dictionary['X_train'],
        Y=fit_dictionary['y_train'],
        X_test=fit_dictionary['X_test'],
        Y_test=fit_dictionary['y_test'],
    )
    backend.save_datamanager(datamanager)

    # Create some random models for the ensemble
    random_search_and_save(fit_dictionary, backend, num_models=1)

    # Build a ensemble from the above components
    # Use dak client here to make sure this is proper working,
    # as with smac we will have to use a client
    dask.config.set({'distributed.worker.daemon': False})
    dask_client = dask.distributed.Client(
        dask.distributed.LocalCluster(
            n_workers=2,
            processes=True,