示例#1
0
def test_split_data_raises_deprecated_random_state_warning(X_y_binary):
    X, y = X_y_binary
    with warnings.catch_warnings(record=True) as warn:
        warnings.simplefilter("always")
        split_data(X, y, test_size=0.2, problem_type="binary", random_state=0)
        assert str(warn[0].message).startswith(
            "Argument 'random_state' has been deprecated in favor of 'random_seed'"
        )

    with warnings.catch_warnings(record=True) as warn:
        warnings.simplefilter("always")
        split_data(X, y, test_size=0.2, problem_type="binary", random_seed=0)
        assert not warn
示例#2
0
    def train_pipeline(pipeline, X, y, optimize_thresholds, objective):
        """Train a pipeline and tune the threshold if necessary.

        Arguments:
            pipeline (PipelineBase): Pipeline to train.
            X (ww.DataTable, pd.DataFrame): Features to train on.
            y (ww.DataColumn, pd.Series): Target to train on.
            optimize_thresholds (bool): Whether to tune the threshold (if pipeline supports it).
            objective (ObjectiveBase): Objective used in threshold tuning.

        Returns:
            pipeline (PipelineBase) - trained pipeline.
        """
        X_threshold_tuning = None
        y_threshold_tuning = None
        if optimize_thresholds and pipeline.can_tune_threshold_with_objective(
                objective):
            X, X_threshold_tuning, y, y_threshold_tuning = split_data(
                X,
                y,
                pipeline.problem_type,
                test_size=0.2,
                random_seed=pipeline.random_seed)
        cv_pipeline = pipeline.clone()
        cv_pipeline.fit(X, y)
        tune_binary_threshold(cv_pipeline, objective, cv_pipeline.problem_type,
                              X_threshold_tuning, y_threshold_tuning)
        return cv_pipeline
示例#3
0
def test_pipeline_metrics(X_y_binary, get_test_params):
    parameters, components, metrics, random_state = get_test_params
    X, y = X_y_binary
    X_train, X_test, y_train, y_test = split_data(X,
                                                  y,
                                                  random_state=random_state)
    pipeline = KeystoneXL(
        parameters=parameters,  # noqa: F841
        components=components,
        random_state=random_state)
    pipeline.fit(X_train, y_train)
    y_pred = pipeline.predict(X_test)
    result = pipeline.metrics(y_predicted=y_pred,
                              y_true=y_test,
                              metrics=metrics)
    np.testing.assert_allclose(
        y_pred.tolist(),
        [1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1])
    assert result == {
        'AUC': 0.9166666666666667,
        'AccuracyBinary': 0.9,
        'AccuracyMulticlass': 0.9,
        'F1': 0.9090909090909091,
        'MSE': 0.1,
        'Precision': 1.0,
        'R2': 0.5833333333333334,
        'Recall': 0.8333333333333334
    }
def test_automl_time_series_classification_threshold(mock_binary_fit, mock_binary_score, mock_predict_proba, mock_optimize_threshold, mock_split_data,
                                                     optimize, objective, X_y_binary):
    X, y = X_y_binary
    mock_binary_score.return_value = {objective: 0.4}
    problem_type = 'time series binary'

    configuration = {"gap": 0, "max_delay": 0, 'delay_target': False, 'delay_features': True}

    mock_optimize_threshold.return_value = 0.62
    mock_split_data.return_value = split_data(X, y, problem_type, test_size=0.2, random_state=0)
    automl = AutoMLSearch(X_train=X, y_train=y, problem_type=problem_type,
                          problem_configuration=configuration, objective=objective, optimize_thresholds=optimize,
                          max_batches=2)
    automl.search()
    assert isinstance(automl.data_splitter, TimeSeriesSplit)
    if objective == 'Log Loss Binary':
        mock_optimize_threshold.assert_not_called()
        assert automl.best_pipeline.threshold is None
        mock_split_data.assert_not_called()
    elif optimize and objective == 'F1':
        mock_optimize_threshold.assert_called()
        assert automl.best_pipeline.threshold == 0.62
        mock_split_data.assert_called()
        assert str(mock_split_data.call_args[0][2]) == problem_type
    elif not optimize and objective == 'F1':
        mock_optimize_threshold.assert_not_called()
        assert automl.best_pipeline.threshold == 0.5
        mock_split_data.assert_not_called()
示例#5
0
def test_train_pipeline_trains_and_tunes_threshold(
        mock_split_data, mock_pipeline_fit, mock_predict_proba,
        mock_encode_targets, mock_optimize, X_y_binary,
        dummy_binary_pipeline_class):
    X, y = X_y_binary
    mock_split_data.return_value = split_data(X,
                                              y,
                                              "binary",
                                              test_size=0.2,
                                              random_seed=0)

    _ = EngineBase.train_pipeline(dummy_binary_pipeline_class({}),
                                  X,
                                  y,
                                  optimize_thresholds=True,
                                  objective=LogLossBinary())

    mock_pipeline_fit.assert_called_once()
    mock_optimize.assert_not_called()
    mock_split_data.assert_not_called()

    mock_pipeline_fit.reset_mock()
    mock_optimize.reset_mock()
    mock_split_data.reset_mock()

    _ = EngineBase.train_pipeline(dummy_binary_pipeline_class({}),
                                  X,
                                  y,
                                  optimize_thresholds=True,
                                  objective=F1())
    mock_pipeline_fit.assert_called_once()
    mock_optimize.assert_called_once()
    mock_split_data.assert_called_once()
示例#6
0
 def _find_best_pipeline(self):
     """Finds the best pipeline in the rankings
     If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
     if len(self.rankings) == 0:
         return
     best_pipeline = self.rankings.iloc[0]
     if not (self._best_pipeline and self._best_pipeline
             == self.get_pipeline(best_pipeline['id'])):
         self._best_pipeline = self.get_pipeline(best_pipeline['id'])
         if self._train_best_pipeline:
             X_threshold_tuning = None
             y_threshold_tuning = None
             X_train, y_train = self.X_train, self.y_train
             if is_binary(self.problem_type) and self.objective.is_defined_for_problem_type(self.problem_type) \
                and self.optimize_thresholds and self.objective.can_optimize_threshold:
                 X_train, X_threshold_tuning, y_train, y_threshold_tuning = split_data(
                     X_train,
                     y_train,
                     self.problem_type,
                     test_size=0.2,
                     random_seed=self.random_seed)
             self._best_pipeline.fit(X_train, y_train)
             tune_binary_threshold(self._best_pipeline, self.objective,
                                   self.problem_type, X_threshold_tuning,
                                   y_threshold_tuning)
示例#7
0
def test_split_data(problem_type, data_type, X_y_binary, X_y_multi,
                    X_y_regression, make_data_type):
    if is_binary(problem_type):
        X, y = X_y_binary
    if is_multiclass(problem_type):
        X, y = X_y_multi
    if is_regression(problem_type):
        X, y = X_y_regression
    problem_configuration = None
    if is_time_series(problem_type):
        problem_configuration = {'gap': 1, 'max_delay': 7}

    X = make_data_type(data_type, X)
    y = make_data_type(data_type, y)

    test_pct = 0.25
    X_train, X_test, y_train, y_test = split_data(
        X,
        y,
        test_size=test_pct,
        problem_type=problem_type,
        problem_configuration=problem_configuration)
    test_size = len(X) * test_pct
    train_size = len(X) - test_size
    assert len(X_train) == train_size
    assert len(X_test) == test_size
    assert len(y_train) == train_size
    assert len(y_test) == test_size
    assert isinstance(X_train, ww.DataTable)
    assert isinstance(X_test, ww.DataTable)
    assert isinstance(y_train, ww.DataColumn)
    assert isinstance(y_test, ww.DataColumn)
示例#8
0
def test_custom_indices():
    class MyPipeline(RegressionPipeline):
        component_graph = ['Imputer', 'One Hot Encoder', 'Linear Regressor']
        custom_name = "My Pipeline"

    X = pd.DataFrame({"a": ["a", "b", "a", "a", "a", "c", "c", "c"], "b": [0, 1, 1, 1, 1, 1, 0, 1]})
    y = pd.Series([0, 0, 0, 1, 0, 1, 0, 0], index=[7, 2, 1, 4, 5, 3, 6, 8])

    x1, x2, y1, y2 = split_data(X, y, problem_type='regression')
    pipeline = MyPipeline({})
    pipeline.fit(x2, y2)
    assert not pd.isnull(pipeline.predict(X).to_series()).any()
示例#9
0
def test_ensemble_data(mock_fit, mock_score, dummy_binary_pipeline_class,
                       stackable_classifiers):
    X = pd.DataFrame({"a": [i for i in range(100)]})
    y = pd.Series([i % 2 for i in range(100)])
    automl = AutoMLSearch(X_train=X,
                          y_train=y,
                          problem_type='binary',
                          max_batches=19,
                          ensembling=True,
                          _ensembling_split_size=0.25)
    mock_should_continue_callback = MagicMock(return_value=True)
    mock_pre_evaluation_callback = MagicMock()
    mock_post_evaluation_callback = MagicMock()

    training_indices, ensembling_indices, _, _ = split_data(
        ww.DataTable(np.arange(X.shape[0])),
        y,
        problem_type='binary',
        test_size=0.25,
        random_seed=0)
    training_indices, ensembling_indices = training_indices.to_dataframe(
    )[0].tolist(), ensembling_indices.to_dataframe()[0].tolist()

    engine = SequentialEngine(
        X_train=infer_feature_types(X),
        y_train=infer_feature_types(y),
        ensembling_indices=ensembling_indices,
        automl=automl,
        should_continue_callback=mock_should_continue_callback,
        pre_evaluation_callback=mock_pre_evaluation_callback,
        post_evaluation_callback=mock_post_evaluation_callback)
    pipeline1 = [dummy_binary_pipeline_class({'Mock Classifier': {'a': 1}})]
    engine.evaluate_batch(pipeline1)
    # check the fit length is correct, taking into account the data splits
    assert len(mock_fit.call_args[0][0]) == int(2 / 3 * len(training_indices))

    input_pipelines = [
        make_pipeline_from_components([classifier], problem_type='binary')
        for classifier in stackable_classifiers
    ]
    pipeline2 = [
        make_pipeline_from_components(
            [StackedEnsembleClassifier(input_pipelines, n_jobs=1)],
            problem_type='binary',
            custom_name="Stacked Ensemble Classification Pipeline")
    ]
    engine.evaluate_batch(pipeline2)
    assert len(mock_fit.call_args[0][0]) == int(2 / 3 *
                                                len(ensembling_indices))
示例#10
0
def test_pipeline_predict(X_y_binary, get_test_params):
    parameters, components, metrics, random_state = get_test_params
    X, y = X_y_binary
    X_train, X_test, y_train, y_test = split_data(X,
                                                  y,
                                                  random_state=random_state)
    pipeline = KeystoneXL(
        parameters=parameters,  # noqa: F841
        components=components,
        random_state=random_state)
    pipeline.fit(X_train, y_train)
    y_pred = pipeline.predict(X_test)
    np.testing.assert_allclose(
        y_pred.tolist(),
        [1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1])
def test_custom_indices():
    # custom regression pipeline
    class MyTargetPipeline(RegressionPipeline):
        component_graph = ['Imputer', 'Target Encoder', 'Linear Regressor']
        custom_name = "Target Pipeline"

    X = pd.DataFrame({
        "a": ["a", "b", "a", "a", "a", "c", "c", "c"],
        "b": [0, 1, 1, 1, 1, 1, 0, 1]
    })
    y = pd.Series([0, 0, 0, 1, 0, 1, 0, 0], index=[7, 2, 1, 4, 5, 3, 6, 8])

    x1, x2, y1, y2 = split_data(X, y, problem_type='binary')
    tp = MyTargetPipeline({})
    tp.fit(x2, y2)
示例#12
0
def test_evaluate_pipeline_handles_ensembling_indices(
        mock_fit, mock_score, dummy_binary_pipeline_class,
        stackable_classifiers):
    X = ww.DataTable(pd.DataFrame({"a": [i for i in range(100)]}))
    y = ww.DataColumn(pd.Series([i % 2 for i in range(100)]))

    automl = AutoMLSearch(X_train=X,
                          y_train=y,
                          problem_type='binary',
                          max_batches=19,
                          ensembling=True,
                          _ensembling_split_size=0.25)

    training_indices, ensembling_indices, _, _ = split_data(
        ww.DataTable(np.arange(X.shape[0])),
        y,
        problem_type='binary',
        test_size=0.25,
        random_seed=0)
    training_indices, ensembling_indices = training_indices.to_dataframe(
    )[0].tolist(), ensembling_indices.to_dataframe()[0].tolist()

    pipeline1 = dummy_binary_pipeline_class({'Mock Classifier': {'a': 1}})

    _ = evaluate_pipeline(pipeline1, automl, X, y, logger=MagicMock())
    # check the fit length is correct, taking into account the data splits
    assert len(mock_fit.call_args[0][0]) == int(2 / 3 * len(training_indices))

    input_pipelines = [
        BinaryClassificationPipeline([classifier])
        for classifier in stackable_classifiers
    ]
    ensemble = BinaryClassificationPipeline(
        [StackedEnsembleClassifier],
        parameters={
            "Stacked Ensemble Classifier": {
                "input_pipelines": input_pipelines,
                "n_jobs": 1
            }
        })

    _ = evaluate_pipeline(ensemble, automl, X, y, logger=MagicMock())
    assert len(mock_fit.call_args[0][0]) == int(2 / 3 *
                                                len(ensembling_indices))
示例#13
0
def test_pipeline_fit(X_y_binary, get_test_params):
    parameters, components, metrics, random_state = get_test_params
    X, y = X_y_binary
    X_train, X_test, y_train, y_test = split_data(X,
                                                  y,
                                                  random_state=random_state)
    pipeline = KeystoneXL(
        parameters=parameters,  # noqa: F841
        components=components,
        random_state=random_state)

    # Check that pipeline fit function is called for now.
    assert not pipeline.fit.has_been_called
    pipeline.fit(X_train, y_train)
    assert pipeline.fit.has_been_called

    # Use prediction to infer that fitting done properly
    y_pred = pipeline.predict(X_test)
    np.testing.assert_allclose(
        y_pred.tolist(),
        [1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1])
示例#14
0
def test_add_ensemble_data():
    X = pd.DataFrame({"a": [i for i in range(100)]})
    y = pd.Series([i % 2 for i in range(100)])
    engine = SequentialEngine(X_train=X, y_train=y, automl=None)
    pd.testing.assert_frame_equal(engine.X_train, X)
    assert engine.ensembling_indices is None

    training_indices, ensembling_indices, _, _ = split_data(
        ww.DataTable(np.arange(X.shape[0])),
        y,
        problem_type='binary',
        test_size=0.2,
        random_seed=0)
    training_indices, ensembling_indices = training_indices.to_dataframe(
    )[0].tolist(), ensembling_indices.to_dataframe()[0].tolist()
    engine = SequentialEngine(X_train=X,
                              y_train=y,
                              ensembling_indices=ensembling_indices,
                              automl=None)
    pd.testing.assert_frame_equal(engine.X_train, X)
    assert engine.ensembling_indices == ensembling_indices
示例#15
0
    def __init__(self,
                 X_train=None,
                 y_train=None,
                 problem_type=None,
                 objective='auto',
                 max_iterations=None,
                 max_time=None,
                 patience=None,
                 tolerance=None,
                 data_splitter=None,
                 allowed_pipelines=None,
                 allowed_model_families=None,
                 start_iteration_callback=None,
                 add_result_callback=None,
                 error_callback=None,
                 additional_objectives=None,
                 random_state=None,
                 random_seed=0,
                 n_jobs=-1,
                 tuner_class=None,
                 optimize_thresholds=False,
                 ensembling=False,
                 max_batches=None,
                 problem_configuration=None,
                 train_best_pipeline=True,
                 pipeline_parameters=None,
                 _ensembling_split_size=0.2,
                 _pipelines_per_batch=5):
        """Automated pipeline search

        Arguments:
            X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.

            y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.

            problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.

            objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
                When set to 'auto', chooses:

                - LogLossBinary for binary classification problems,
                - LogLossMulticlass for multiclass classification problems, and
                - R2 for regression problems.

            max_iterations (int): Maximum number of iterations to search. If max_iterations and
                max_time is not set, then max_iterations will default to max_iterations of 5.

            max_time (int, str): Maximum time to search for pipelines.
                This will not start a new pipeline search after the duration
                has elapsed. If it is an integer, then the time will be in seconds.
                For strings, time can be specified as seconds, minutes, or hours.

            patience (int): Number of iterations without improvement to stop search early. Must be positive.
                If None, early stopping is disabled. Defaults to None.

            tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
                Only applicable if patience is not None. Defaults to None.

            allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
                The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
                allowed_model_families to be ignored.

            allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
                model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
                to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
                this parameter will be ignored.

            data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.

            tuner_class: The tuner class to use. Defaults to SKOptTuner.

            start_iteration_callback (callable): Function called before each pipeline training iteration.
                Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.

            add_result_callback (callable): Function called after each pipeline training iteration.
                Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.

            error_callback (callable): Function called when `search()` errors and raises an Exception.
                Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
                Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
                Defaults to None, which will call `log_error_callback`.

            additional_objectives (list): Custom set of objectives to score on.
                Will override default objectives for problem type if not empty.

            random_state (int): Deprecated - use random_seed instead.

            random_seed (int): Seed for the random number generator. Defaults to 0.

            n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
                None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.

            ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
                If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.

            max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
                max_iterations have precedence over stopping the search.

            problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
                in time series problems, values should be passed in for the gap and max_delay variables.

            train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True

            _ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True.
                Must be between 0 and 1, exclusive. Defaults to 0.2

            _pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
                The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
        """
        if X_train is None:
            raise ValueError('Must specify training data as a 2d array using the X_train argument')
        if y_train is None:
            raise ValueError('Must specify training data target values as a 1d vector using the y_train argument')
        try:
            self.problem_type = handle_problem_types(problem_type)
        except ValueError:
            raise ValueError('choose one of (binary, multiclass, regression) as problem_type')

        self.tuner_class = tuner_class or SKOptTuner
        self.start_iteration_callback = start_iteration_callback
        self.add_result_callback = add_result_callback
        self.error_callback = error_callback or log_error_callback
        self.data_splitter = data_splitter
        self.optimize_thresholds = optimize_thresholds
        self.ensembling = ensembling
        if objective == 'auto':
            objective = get_default_primary_search_objective(self.problem_type.value)
        objective = get_objective(objective, return_instance=False)
        self.objective = self._validate_objective(objective)
        if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator):
            raise ValueError("Not a valid data splitter")
        if not objective.is_defined_for_problem_type(self.problem_type):
            raise ValueError("Given objective {} is not compatible with a {} problem.".format(self.objective.name, self.problem_type.value))
        if additional_objectives is None:
            additional_objectives = get_core_objectives(self.problem_type)
            # if our main objective is part of default set of objectives for problem_type, remove it
            existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None)
            if existing_main_objective is not None:
                additional_objectives.remove(existing_main_objective)
        else:
            additional_objectives = [get_objective(o) for o in additional_objectives]
        additional_objectives = [self._validate_objective(obj) for obj in additional_objectives]
        self.additional_objectives = additional_objectives
        self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives}

        if not isinstance(max_time, (int, float, str, type(None))):
            raise TypeError(f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}..")
        if isinstance(max_time, (int, float)) and max_time < 0:
            raise ValueError(f"Parameter max_time must be None or non-negative. Received {max_time}.")
        if max_batches is not None and max_batches < 0:
            raise ValueError(f"Parameter max_batches must be None or non-negative. Received {max_batches}.")
        if max_iterations is not None and max_iterations < 0:
            raise ValueError(f"Parameter max_iterations must be None or non-negative. Received {max_iterations}.")
        self.max_time = convert_to_seconds(max_time) if isinstance(max_time, str) else max_time
        self.max_iterations = max_iterations
        self.max_batches = max_batches
        self._pipelines_per_batch = _pipelines_per_batch
        if not self.max_iterations and not self.max_time and not self.max_batches:
            self.max_batches = 1
            logger.info("Using default limit of max_batches=1.\n")

        if patience and (not isinstance(patience, int) or patience < 0):
            raise ValueError("patience value must be a positive integer. Received {} instead".format(patience))

        if tolerance and (tolerance > 1.0 or tolerance < 0.0):
            raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".format(tolerance))

        self.patience = patience
        self.tolerance = tolerance or 0.0

        self._results = {
            'pipeline_results': {},
            'search_order': [],
            'errors': []
        }
        self.random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
        self.n_jobs = n_jobs

        self.plot = None
        try:
            self.plot = PipelineSearchPlots(self)
        except ImportError:
            logger.warning("Unable to import plotly; skipping pipeline search plotting\n")

        self._data_check_results = None

        self.allowed_pipelines = allowed_pipelines
        self.allowed_model_families = allowed_model_families
        self._automl_algorithm = None
        self._start = 0.0
        self._baseline_cv_scores = {}
        self.show_batch_output = False

        self._validate_problem_type()
        self.problem_configuration = self._validate_problem_configuration(problem_configuration)
        self._train_best_pipeline = train_best_pipeline
        self._best_pipeline = None
        self._searched = False

        self.X_train = infer_feature_types(X_train)
        self.y_train = infer_feature_types(y_train)
        self.ensembling_indices = None

        default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration,
                                                   n_splits=3, shuffle=True, random_seed=self.random_seed)
        self.data_splitter = self.data_splitter or default_data_splitter
        self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
        self.search_iteration_plot = None
        self._interrupted = False

        if self.allowed_pipelines is None:
            logger.info("Generating pipelines to search over...")
            allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families)
            logger.debug(f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}")
            self.allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators]

        if self.allowed_pipelines == []:
            raise ValueError("No allowed pipelines to search")

        run_ensembling = self.ensembling
        if run_ensembling and len(self.allowed_pipelines) == 1:
            logger.warning("Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run.")
            run_ensembling = False

        if run_ensembling and self.max_iterations is not None:
            # Baseline + first batch + each pipeline iteration + 1
            first_ensembling_iteration = (1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
            if self.max_iterations < first_ensembling_iteration:
                run_ensembling = False
                logger.warning(f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling.")
            else:
                logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that.")

        if self.max_batches and self.max_iterations is None:
            self.show_batch_output = True
            if run_ensembling:
                ensemble_nth_batch = len(self.allowed_pipelines) + 1
                num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch
                if num_ensemble_batches == 0:
                    run_ensembling = False
                    logger.warning(f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling.")
                else:
                    logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.")

                self.max_iterations = (1 + len(self.allowed_pipelines) +
                                       self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) +
                                       num_ensemble_batches)
            else:
                self.max_iterations = 1 + len(self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1))
        if run_ensembling:
            if not (0 < _ensembling_split_size < 1):
                raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}")
            X_shape = ww.DataTable(np.arange(self.X_train.shape[0]))
            _, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed)
            self.ensembling_indices = ensembling_indices.to_dataframe()[0].tolist()

        self._engine = SequentialEngine(self.X_train,
                                        self.y_train,
                                        self.ensembling_indices,
                                        self,
                                        should_continue_callback=self._should_continue,
                                        pre_evaluation_callback=self._pre_evaluation_callback,
                                        post_evaluation_callback=self._post_evaluation_callback)

        self.allowed_model_families = list(set([p.model_family for p in (self.allowed_pipelines)]))

        logger.debug(f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}")
        logger.debug(f"allowed_model_families set to {self.allowed_model_families}")
        if len(self.problem_configuration):
            pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters}
        else:
            pipeline_params = self.pipeline_parameters

        self._automl_algorithm = IterativeAlgorithm(
            max_iterations=self.max_iterations,
            allowed_pipelines=self.allowed_pipelines,
            tuner_class=self.tuner_class,
            random_seed=self.random_seed,
            n_jobs=self.n_jobs,
            number_features=self.X_train.shape[1],
            pipelines_per_batch=self._pipelines_per_batch,
            ensembling=run_ensembling,
            pipeline_params=pipeline_params
        )
示例#16
0
    def train_and_score_pipeline(pipeline, automl, full_X_train, full_y_train):
        """Given a pipeline, config and data, train and score the pipeline and return the CV or TV scores

        Arguments:
            pipeline (PipelineBase): The pipeline to score
            automl (AutoMLSearch): The AutoML search, used to access config and for the error callback
            full_X_train (ww.DataTable): Training features
            full_y_train (ww.DataColumn): Training target

        Returns:
            dict: A dict containing cv_score_mean, cv_scores, training_time and a cv_data structure with details.
        """
        start = time.time()
        cv_data = []
        logger.info("\tStarting cross validation")
        X_pd = _convert_woodwork_types_wrapper(full_X_train.to_dataframe())
        y_pd = _convert_woodwork_types_wrapper(full_y_train.to_series())
        for i, (train,
                valid) in enumerate(automl.data_splitter.split(X_pd, y_pd)):
            if pipeline.model_family == ModelFamily.ENSEMBLE and i > 0:
                # Stacked ensembles do CV internally, so we do not run CV here for performance reasons.
                logger.debug(
                    f"Skipping fold {i} because CV for stacked ensembles is not supported."
                )
                break
            logger.debug(f"\t\tTraining and scoring on fold {i}")
            X_train, X_valid = full_X_train.iloc[train], full_X_train.iloc[
                valid]
            y_train, y_valid = full_y_train.iloc[train], full_y_train.iloc[
                valid]
            if is_binary(automl.problem_type) or is_multiclass(
                    automl.problem_type):
                diff_train = set(
                    np.setdiff1d(full_y_train.to_series(),
                                 y_train.to_series()))
                diff_valid = set(
                    np.setdiff1d(full_y_train.to_series(),
                                 y_valid.to_series()))
                diff_string = f"Missing target values in the training set after data split: {diff_train}. " if diff_train else ""
                diff_string += f"Missing target values in the validation set after data split: {diff_valid}." if diff_valid else ""
                if diff_string:
                    raise Exception(diff_string)
            objectives_to_score = [automl.objective
                                   ] + automl.additional_objectives
            cv_pipeline = None
            try:
                X_threshold_tuning = None
                y_threshold_tuning = None
                if automl.optimize_thresholds and automl.objective.is_defined_for_problem_type(automl.problem_type) and \
                   automl.objective.can_optimize_threshold and is_binary(automl.problem_type):
                    X_train, X_threshold_tuning, y_train, y_threshold_tuning = split_data(
                        X_train,
                        y_train,
                        automl.problem_type,
                        test_size=0.2,
                        random_seed=automl.random_seed)
                cv_pipeline = pipeline.clone()
                logger.debug(f"\t\t\tFold {i}: starting training")
                cv_pipeline.fit(X_train, y_train)
                logger.debug(f"\t\t\tFold {i}: finished training")
                tune_binary_threshold(cv_pipeline, automl.objective,
                                      automl.problem_type, X_threshold_tuning,
                                      y_threshold_tuning)
                if X_threshold_tuning:
                    logger.debug(
                        f"\t\t\tFold {i}: Optimal threshold found ({cv_pipeline.threshold:.3f})"
                    )
                logger.debug(f"\t\t\tFold {i}: Scoring trained pipeline")
                scores = cv_pipeline.score(X_valid,
                                           y_valid,
                                           objectives=objectives_to_score)
                logger.debug(
                    f"\t\t\tFold {i}: {automl.objective.name} score: {scores[automl.objective.name]:.3f}"
                )
                score = scores[automl.objective.name]
            except Exception as e:
                if automl.error_callback is not None:
                    automl.error_callback(exception=e,
                                          traceback=traceback.format_tb(
                                              sys.exc_info()[2]),
                                          automl=automl,
                                          fold_num=i,
                                          pipeline=pipeline)
                if isinstance(e, PipelineScoreError):
                    nan_scores = {
                        objective: np.nan
                        for objective in e.exceptions
                    }
                    scores = {**nan_scores, **e.scored_successfully}
                    scores = OrderedDict({
                        o.name: scores[o.name]
                        for o in [automl.objective] +
                        automl.additional_objectives
                    })
                    score = scores[automl.objective.name]
                else:
                    score = np.nan
                    scores = OrderedDict(
                        zip([n.name for n in automl.additional_objectives],
                            [np.nan] * len(automl.additional_objectives)))

            ordered_scores = OrderedDict()
            ordered_scores.update({automl.objective.name: score})
            ordered_scores.update(scores)
            ordered_scores.update({"# Training": y_train.shape[0]})
            ordered_scores.update({"# Validation": y_valid.shape[0]})

            evaluation_entry = {
                "all_objective_scores": ordered_scores,
                "score": score,
                'binary_classification_threshold': None
            }
            if is_binary(
                    automl.problem_type
            ) and cv_pipeline is not None and cv_pipeline.threshold is not None:
                evaluation_entry[
                    'binary_classification_threshold'] = cv_pipeline.threshold
            cv_data.append(evaluation_entry)
        training_time = time.time() - start
        cv_scores = pd.Series([fold['score'] for fold in cv_data])
        cv_score_mean = cv_scores.mean()
        logger.info(
            f"\tFinished cross validation - mean {automl.objective.name}: {cv_score_mean:.3f}"
        )
        return {
            'cv_data': cv_data,
            'training_time': training_time,
            'cv_scores': cv_scores,
            'cv_score_mean': cv_score_mean
        }