예제 #1
0
def test_iterative_algorithm_sampling_params(problem_type,
                                             mock_imbalanced_data_X_y):
    X, y = mock_imbalanced_data_X_y(problem_type, "some", 'small')
    estimators = get_estimators(problem_type, None)
    pipelines = [
        make_pipeline(X, y, e, problem_type, sampler_name='Undersampler')
        for e in estimators
    ]
    algo = IterativeAlgorithm(
        allowed_pipelines=pipelines,
        random_seed=0,
        _frozen_pipeline_parameters={"Undersampler": {
            "sampling_ratio": 0.5
        }})

    next_batch = algo.next_batch()
    for p in next_batch:
        for component in p._component_graph:
            if "sampler" in component.name:
                assert component.parameters["sampling_ratio"] == 0.5

    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline, {"id": algo.pipeline_number})

    # # make sure that future batches remain in the hyperparam range
    for i in range(1, 5):
        next_batch = algo.next_batch()
        for p in next_batch:
            for component in p._component_graph:
                if "sampler" in component.name:
                    assert component.parameters["sampling_ratio"] == 0.5
예제 #2
0
def test_iterative_algorithm_results_best_pipeline_info_id(
        dummy_binary_pipeline_classes,
        logistic_regression_binary_pipeline_class):
    allowed_pipelines = [
        dummy_binary_pipeline_classes()[0],
        logistic_regression_binary_pipeline_class({})
    ]
    algo = IterativeAlgorithm(allowed_pipelines=allowed_pipelines)

    # initial batch contains one of each pipeline, with default parameters
    next_batch = algo.next_batch()
    scores = np.arange(0, len(next_batch))
    for pipeline_num, (score, pipeline) in enumerate(zip(scores, next_batch)):
        algo.add_result(score, pipeline,
                        {"id": algo.pipeline_number + pipeline_num})
    assert algo._best_pipeline_info[ModelFamily.RANDOM_FOREST]['id'] == 3
    assert algo._best_pipeline_info[ModelFamily.LINEAR_MODEL]['id'] == 2

    for i in range(1, 3):
        next_batch = algo.next_batch()
        scores = -np.arange(
            1, len(next_batch))  # Score always gets better with each pipeline
        for pipeline_num, (score,
                           pipeline) in enumerate(zip(scores, next_batch)):
            algo.add_result(score, pipeline,
                            {"id": algo.pipeline_number + pipeline_num})
            assert algo._best_pipeline_info[pipeline.model_family][
                'id'] == algo.pipeline_number + pipeline_num
예제 #3
0
def test_iterative_algorithm_pipeline_params_skopt(parameters, dummy_binary_pipeline_classes):
    dummy_binary_pipeline_classes = dummy_binary_pipeline_classes(parameters)
    algo = IterativeAlgorithm(allowed_pipelines=dummy_binary_pipeline_classes,
                              pipeline_params={'pipeline': {"gap": 2, "max_delay": 10},
                                               'Mock Classifier': {'dummy_parameter': parameters}},
                              random_state=0)

    next_batch = algo.next_batch()
    if isinstance(parameters, (Real, Integer)):
        parameter = parameters.rvs(random_state=0)[0]
    else:
        parameter = parameters.rvs(random_state=0)
    assert all([p.parameters['pipeline'] == {"gap": 2, "max_delay": 10} for p in next_batch])
    assert all([p.parameters['Mock Classifier'] == {"dummy_parameter": parameter, "n_jobs": -1} for p in next_batch])

    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline)

    # make sure that future batches remain in the hyperparam range
    for i in range(1, 5):
        next_batch = algo.next_batch()
        for p in next_batch:
            if isinstance(parameters, Categorical):
                assert p.parameters['Mock Classifier']['dummy_parameter'] in ["random", "dummy", "test"]
            elif isinstance(parameters, Real):
                assert 0 < p.parameters['Mock Classifier']['dummy_parameter'] <= 1
            else:
                assert 1 <= p.parameters['Mock Classifier']['dummy_parameter'] <= 10
예제 #4
0
def test_iterative_algorithm_pipeline_params(parameters, dummy_binary_pipeline_classes):
    dummy_binary_pipeline_classes = dummy_binary_pipeline_classes(parameters)
    algo = IterativeAlgorithm(allowed_pipelines=dummy_binary_pipeline_classes,
                              pipeline_params={'pipeline': {"gap": 2, "max_delay": 10},
                                               'Mock Classifier': {'dummy_parameter': parameters}})

    next_batch = algo.next_batch()
    parameter = parameters
    if isinstance(parameter, (list, tuple)):
        parameter = parameters[0]
    assert all([p.parameters['pipeline'] == {"gap": 2, "max_delay": 10} for p in next_batch])
    assert all([p.parameters['Mock Classifier'] == {"dummy_parameter": parameter, "n_jobs": -1} for p in next_batch])

    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline)

    # make sure that future batches remain in the hyperparam range
    for i in range(1, 5):
        next_batch = algo.next_batch()
        for p in next_batch:
            if isinstance(parameters, (tuple, list)):
                assert p.parameters['Mock Classifier']['dummy_parameter'] in parameters
            else:
                assert p.parameters['Mock Classifier']['dummy_parameter'] == parameter
예제 #5
0
def test_iterative_algorithm_passes_pipeline_params(mock_stack, ensembling_value, dummy_binary_pipeline_classes):
    dummy_binary_pipeline_classes = dummy_binary_pipeline_classes()
    algo = IterativeAlgorithm(allowed_pipelines=dummy_binary_pipeline_classes, ensembling=ensembling_value,
                              pipeline_params={'pipeline': {"gap": 2, "max_delay": 10}})

    next_batch = algo.next_batch()
    assert all([p.parameters['pipeline'] == {"gap": 2, "max_delay": 10} for p in next_batch])

    # the "best" score will be the 1st dummy pipeline
    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline)

    for i in range(1, 5):
        for _ in range(len(dummy_binary_pipeline_classes)):
            next_batch = algo.next_batch()
            assert all([p.parameters['pipeline'] == {"gap": 2, "max_delay": 10} for p in next_batch])
            scores = -np.arange(0, len(next_batch))
            for score, pipeline in zip(scores, next_batch):
                algo.add_result(score, pipeline)

        if ensembling_value:
            next_batch = algo.next_batch()
            input_pipelines = next_batch[0].parameters['Stacked Ensemble Classifier']['input_pipelines']
            assert all([pl.parameters['pipeline'] == {"gap": 2, "max_delay": 10} for pl in input_pipelines])
def test_iterative_algorithm_one_allowed_pipeline(
        ensembling_value, logistic_regression_binary_pipeline_class):
    # Checks that when len(allowed_pipeline) == 1, ensembling is not run, even if set to True
    algo = IterativeAlgorithm(
        allowed_pipelines=[logistic_regression_binary_pipeline_class],
        ensembling=ensembling_value)
    assert algo.pipeline_number == 0
    assert algo.batch_number == 0
    assert algo.allowed_pipelines == [
        logistic_regression_binary_pipeline_class
    ]

    # initial batch contains one of each pipeline, with default parameters
    next_batch = algo.next_batch()
    assert len(next_batch) == 1
    assert [p.__class__ for p in next_batch
            ] == [logistic_regression_binary_pipeline_class] * len(next_batch)
    assert algo.pipeline_number == 1
    assert algo.batch_number == 1
    assert all(
        [p.parameters == p.__class__.default_parameters for p in next_batch])
    # the "best" score will be the 1st dummy pipeline
    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline, {"id": algo.pipeline_number})

    # subsequent batches contain pipelines_per_batch copies of one pipeline, moving from best to worst from the first batch
    last_batch_number = algo.batch_number
    last_pipeline_number = algo.pipeline_number
    all_parameters = []
    for i in range(1, 5):
        next_batch = algo.next_batch()
        assert len(next_batch) == algo.pipelines_per_batch
        assert all((p.random_seed == algo.random_seed) for p in next_batch)
        assert [
            p.__class__ for p in next_batch
        ] == [logistic_regression_binary_pipeline_class] * len(next_batch)
        assert algo.pipeline_number == last_pipeline_number + len(next_batch)
        last_pipeline_number = algo.pipeline_number
        assert algo.batch_number == last_batch_number + 1
        last_batch_number = algo.batch_number
        all_parameters.extend([p.parameters for p in next_batch])
        scores = -np.arange(0, len(next_batch))
        for score, pipeline in zip(scores, next_batch):
            algo.add_result(score, pipeline, {"id": algo.pipeline_number})

        assert any([
            p != logistic_regression_binary_pipeline_class.default_parameters
            for p in all_parameters
        ])
예제 #7
0
def test_iterative_algorithm_stacked_ensemble_n_jobs_regression(n_jobs, linear_regression_pipeline_class):
    algo = IterativeAlgorithm(allowed_pipelines=[linear_regression_pipeline_class, linear_regression_pipeline_class], ensembling=True, n_jobs=n_jobs)
    next_batch = algo.next_batch()
    seen_ensemble = False
    scores = range(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline)
    for i in range(5):
        next_batch = algo.next_batch()
        for pipeline in next_batch:
            if isinstance(pipeline.estimator, StackedEnsembleRegressor):
                seen_ensemble = True
                assert pipeline.parameters['Stacked Ensemble Regressor']['n_jobs'] == n_jobs
    assert seen_ensemble
예제 #8
0
def test_iterative_algorithm_empty(dummy_binary_pipeline_classes):
    algo = IterativeAlgorithm()
    assert algo.pipeline_number == 0
    assert algo.batch_number == 0
    assert algo.allowed_pipelines == []

    next_batch = algo.next_batch()
    assert [p.__class__ for p in next_batch] == []
    assert algo.pipeline_number == 0
    assert algo.batch_number == 1

    with pytest.raises(AutoMLAlgorithmException, match='No results were reported from the first batch'):
        algo.next_batch()
    assert algo.batch_number == 1
    assert algo.pipeline_number == 0
예제 #9
0
def test_iterative_algorithm_pipeline_params_kwargs(dummy_binary_pipeline_classes):
    dummy_binary_pipeline_classes = dummy_binary_pipeline_classes()
    algo = IterativeAlgorithm(allowed_pipelines=dummy_binary_pipeline_classes,
                              pipeline_params={'Mock Classifier': {'dummy_parameter': "dummy", 'fake_param': 'fake'}},
                              random_state=0)

    next_batch = algo.next_batch()
    assert all([p.parameters['Mock Classifier'] == {"dummy_parameter": "dummy", "n_jobs": -1, "fake_param": "fake"} for p in next_batch])
예제 #10
0
def test_iterative_algorithm_passes_njobs(dummy_binary_pipeline_classes):
    dummy_binary_pipeline_classes = dummy_binary_pipeline_classes()
    algo = IterativeAlgorithm(allowed_pipelines=dummy_binary_pipeline_classes, n_jobs=2, ensembling=False)
    next_batch = algo.next_batch()

    # the "best" score will be the 1st dummy pipeline
    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline)

    for i in range(1, 3):
        for _ in range(len(dummy_binary_pipeline_classes)):
            next_batch = algo.next_batch()
            assert all([p.parameters['Mock Classifier']['n_jobs'] == 2 for p in next_batch])
            scores = -np.arange(0, len(next_batch))
            for score, pipeline in zip(scores, next_batch):
                algo.add_result(score, pipeline)
예제 #11
0
def test_iterative_algorithm_instantiates_text(dummy_classifier_estimator_class):
    class MockTextClassificationPipeline(BinaryClassificationPipeline):
        component_graph = [TextFeaturizer, dummy_classifier_estimator_class]

    algo = IterativeAlgorithm(allowed_pipelines=[MockTextClassificationPipeline], text_columns=['text_col_1', 'text_col_2'])
    pipeline = algo.next_batch()[0]
    expected_params = {'text_columns': ['text_col_1', 'text_col_2']}
    assert pipeline.parameters['Text Featurization Component'] == expected_params
    assert isinstance(pipeline[0], TextFeaturizer)
    assert pipeline[0]._all_text_columns == ['text_col_1', 'text_col_2']
예제 #12
0
def test_iterative_algorithm_stacked_ensemble_n_jobs_binary(
        n_jobs, dummy_binary_pipeline_classes):
    dummy_binary_pipeline_classes = dummy_binary_pipeline_classes()
    algo = IterativeAlgorithm(allowed_pipelines=dummy_binary_pipeline_classes,
                              ensembling=True,
                              n_jobs=n_jobs)
    next_batch = algo.next_batch()
    seen_ensemble = False
    scores = range(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline, {"id": algo.pipeline_number})

    for i in range(5):
        next_batch = algo.next_batch()
        for pipeline in next_batch:
            if isinstance(pipeline.estimator, StackedEnsembleClassifier):
                seen_ensemble = True
                assert pipeline.parameters['Stacked Ensemble Classifier'][
                    'n_jobs'] == n_jobs
    assert seen_ensemble
예제 #13
0
def test_iterative_algorithm_first_batch_order(problem_type, X_y_binary,
                                               has_minimal_dependencies):
    X, y = X_y_binary
    estimators = get_estimators(problem_type, None)
    pipelines = [make_pipeline(X, y, e, problem_type) for e in estimators]
    algo = IterativeAlgorithm(allowed_pipelines=pipelines)

    # initial batch contains one of each pipeline, with default parameters
    next_batch = algo.next_batch()
    estimators_in_first_batch = [p.estimator.name for p in next_batch]

    if problem_type == ProblemTypes.REGRESSION:
        final_estimators = [
            'XGBoost Regressor', 'LightGBM Regressor', 'CatBoost Regressor'
        ]
    else:
        final_estimators = [
            'XGBoost Classifier', 'LightGBM Classifier', 'CatBoost Classifier'
        ]
    if has_minimal_dependencies:
        final_estimators = []
    if problem_type == ProblemTypes.REGRESSION:
        assert estimators_in_first_batch == [
            'Linear Regressor', 'Elastic Net Regressor',
            'Decision Tree Regressor', 'Extra Trees Regressor',
            'Random Forest Regressor'
        ] + final_estimators
    if problem_type == ProblemTypes.BINARY:
        assert estimators_in_first_batch == [
            'Elastic Net Classifier', 'Logistic Regression Classifier',
            'Decision Tree Classifier', 'Extra Trees Classifier',
            'Random Forest Classifier'
        ] + final_estimators
    if problem_type == ProblemTypes.MULTICLASS:
        assert estimators_in_first_batch == [
            'Elastic Net Classifier', 'Logistic Regression Classifier',
            'Decision Tree Classifier', 'Extra Trees Classifier',
            'Random Forest Classifier'
        ] + final_estimators
예제 #14
0
class AutoMLSearch:
    """Automated Pipeline search."""
    _MAX_NAME_LEN = 40

    # Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes.
    plot = PipelineSearchPlots

    def __init__(self,
                 X_train=None,
                 y_train=None,
                 problem_type=None,
                 objective='auto',
                 max_iterations=None,
                 max_time=None,
                 patience=None,
                 tolerance=None,
                 data_splitter=None,
                 allowed_pipelines=None,
                 allowed_model_families=None,
                 start_iteration_callback=None,
                 add_result_callback=None,
                 error_callback=None,
                 additional_objectives=None,
                 random_state=None,
                 random_seed=0,
                 n_jobs=-1,
                 tuner_class=None,
                 optimize_thresholds=False,
                 ensembling=False,
                 max_batches=None,
                 problem_configuration=None,
                 train_best_pipeline=True,
                 pipeline_parameters=None,
                 _pipelines_per_batch=5):
        """Automated pipeline search

        Arguments:
            X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.

            y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.

            problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.

            objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
                When set to 'auto', chooses:

                - LogLossBinary for binary classification problems,
                - LogLossMulticlass for multiclass classification problems, and
                - R2 for regression problems.

            max_iterations (int): Maximum number of iterations to search. If max_iterations and
                max_time is not set, then max_iterations will default to max_iterations of 5.

            max_time (int, str): Maximum time to search for pipelines.
                This will not start a new pipeline search after the duration
                has elapsed. If it is an integer, then the time will be in seconds.
                For strings, time can be specified as seconds, minutes, or hours.

            patience (int): Number of iterations without improvement to stop search early. Must be positive.
                If None, early stopping is disabled. Defaults to None.

            tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
                Only applicable if patience is not None. Defaults to None.

            allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
                The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
                allowed_model_families to be ignored.

            allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
                model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
                to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
                this parameter will be ignored.

            data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.

            tuner_class: The tuner class to use. Defaults to SKOptTuner.

            start_iteration_callback (callable): Function called before each pipeline training iteration.
                Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.

            add_result_callback (callable): Function called after each pipeline training iteration.
                Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.

            error_callback (callable): Function called when `search()` errors and raises an Exception.
                Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
                Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
                Defaults to None, which will call `log_error_callback`.

            additional_objectives (list): Custom set of objectives to score on.
                Will override default objectives for problem type if not empty.

            random_state (int): Deprecated - use random_seed instead.

            random_seed (int): Seed for the random number generator. Defaults to 0.

            n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
                None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.

            ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
                If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.

            max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
                max_iterations have precedence over stopping the search.

            problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
                in time series problems, values should be passed in for the gap and max_delay variables.

            train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True

            _pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
                The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
        """
        if X_train is None:
            raise ValueError(
                'Must specify training data as a 2d array using the X_train argument'
            )
        if y_train is None:
            raise ValueError(
                'Must specify training data target values as a 1d vector using the y_train argument'
            )
        try:
            self.problem_type = handle_problem_types(problem_type)
        except ValueError:
            raise ValueError(
                'choose one of (binary, multiclass, regression) as problem_type'
            )

        self.tuner_class = tuner_class or SKOptTuner
        self.start_iteration_callback = start_iteration_callback
        self.add_result_callback = add_result_callback
        self.error_callback = error_callback or log_error_callback
        self.data_splitter = data_splitter
        self.optimize_thresholds = optimize_thresholds
        self.ensembling = ensembling
        if objective == 'auto':
            objective = get_default_primary_search_objective(
                self.problem_type.value)
        objective = get_objective(objective, return_instance=False)
        self.objective = self._validate_objective(objective)
        if self.data_splitter is not None and not issubclass(
                self.data_splitter.__class__, BaseCrossValidator):
            raise ValueError("Not a valid data splitter")
        if not objective.is_defined_for_problem_type(self.problem_type):
            raise ValueError(
                "Given objective {} is not compatible with a {} problem.".
                format(self.objective.name, self.problem_type.value))
        if additional_objectives is None:
            additional_objectives = get_core_objectives(self.problem_type)
            # if our main objective is part of default set of objectives for problem_type, remove it
            existing_main_objective = next(
                (obj for obj in additional_objectives
                 if obj.name == self.objective.name), None)
            if existing_main_objective is not None:
                additional_objectives.remove(existing_main_objective)
        else:
            additional_objectives = [
                get_objective(o) for o in additional_objectives
            ]
        additional_objectives = [
            self._validate_objective(obj) for obj in additional_objectives
        ]
        self.additional_objectives = additional_objectives
        self.objective_name_to_class = {
            o.name: o
            for o in [self.objective] + self.additional_objectives
        }

        if not isinstance(max_time, (int, float, str, type(None))):
            raise TypeError(
                f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}.."
            )
        if isinstance(max_time, (int, float)) and max_time < 0:
            raise ValueError(
                f"Parameter max_time must be None or non-negative. Received {max_time}."
            )
        if max_batches is not None and max_batches < 0:
            raise ValueError(
                f"Parameter max_batches must be None or non-negative. Received {max_batches}."
            )
        if max_iterations is not None and max_iterations < 0:
            raise ValueError(
                f"Parameter max_iterations must be None or non-negative. Received {max_iterations}."
            )
        self.max_time = convert_to_seconds(max_time) if isinstance(
            max_time, str) else max_time
        self.max_iterations = max_iterations
        self.max_batches = max_batches
        self._pipelines_per_batch = _pipelines_per_batch
        if not self.max_iterations and not self.max_time and not self.max_batches:
            self.max_batches = 1
            logger.info("Using default limit of max_batches=1.\n")

        if patience and (not isinstance(patience, int) or patience < 0):
            raise ValueError(
                "patience value must be a positive integer. Received {} instead"
                .format(patience))

        if tolerance and (tolerance > 1.0 or tolerance < 0.0):
            raise ValueError(
                "tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead"
                .format(tolerance))

        self.patience = patience
        self.tolerance = tolerance or 0.0

        self._results = {
            'pipeline_results': {},
            'search_order': [],
            'errors': []
        }
        self.random_seed = deprecate_arg("random_state", "random_seed",
                                         random_state, random_seed)
        self.n_jobs = n_jobs

        self.plot = None
        try:
            self.plot = PipelineSearchPlots(self)
        except ImportError:
            logger.warning(
                "Unable to import plotly; skipping pipeline search plotting\n")

        self._data_check_results = None

        self.allowed_pipelines = allowed_pipelines
        self.allowed_model_families = allowed_model_families
        self._automl_algorithm = None
        self._start = 0.0
        self._baseline_cv_scores = {}
        self.show_batch_output = False

        self._validate_problem_type()
        self.problem_configuration = self._validate_problem_configuration(
            problem_configuration)
        self._train_best_pipeline = train_best_pipeline
        self._best_pipeline = None
        self._searched = False

        self.X_train = infer_feature_types(X_train)
        self.y_train = infer_feature_types(y_train)

        default_data_splitter = make_data_splitter(
            self.X_train,
            self.y_train,
            self.problem_type,
            self.problem_configuration,
            n_splits=3,
            shuffle=True,
            random_seed=self.random_seed)
        self.data_splitter = self.data_splitter or default_data_splitter
        self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
        self.search_iteration_plot = None
        self._interrupted = False

        self._engine = SequentialEngine(
            self.X_train,
            self.y_train,
            self,
            should_continue_callback=self._should_continue,
            pre_evaluation_callback=self._pre_evaluation_callback,
            post_evaluation_callback=self._post_evaluation_callback)

        if self.allowed_pipelines is None:
            logger.info("Generating pipelines to search over...")
            allowed_estimators = get_estimators(self.problem_type,
                                                self.allowed_model_families)
            logger.debug(
                f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}"
            )
            self.allowed_pipelines = [
                make_pipeline(self.X_train,
                              self.y_train,
                              estimator,
                              self.problem_type,
                              custom_hyperparameters=self.pipeline_parameters)
                for estimator in allowed_estimators
            ]

        if self.allowed_pipelines == []:
            raise ValueError("No allowed pipelines to search")

        run_ensembling = self.ensembling
        if run_ensembling and len(self.allowed_pipelines) == 1:
            logger.warning(
                "Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run."
            )
            run_ensembling = False

        if run_ensembling and self.max_iterations is not None:
            # Baseline + first batch + each pipeline iteration + 1
            first_ensembling_iteration = (
                1 + len(self.allowed_pipelines) +
                len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
            if self.max_iterations < first_ensembling_iteration:
                run_ensembling = False
                logger.warning(
                    f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling."
                )
            else:
                logger.info(
                    f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that."
                )

        if self.max_batches and self.max_iterations is None:
            self.show_batch_output = True
            if run_ensembling:
                ensemble_nth_batch = len(self.allowed_pipelines) + 1
                num_ensemble_batches = (self.max_batches -
                                        1) // ensemble_nth_batch
                if num_ensemble_batches == 0:
                    logger.warning(
                        f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling."
                    )
                else:
                    logger.info(
                        f"Ensembling will run every {ensemble_nth_batch} batches."
                    )

                self.max_iterations = (
                    1 + len(self.allowed_pipelines) +
                    self._pipelines_per_batch *
                    (self.max_batches - 1 - num_ensemble_batches) +
                    num_ensemble_batches)
            else:
                self.max_iterations = 1 + len(
                    self.allowed_pipelines) + (self._pipelines_per_batch *
                                               (self.max_batches - 1))
        self.allowed_model_families = list(
            set([p.model_family for p in (self.allowed_pipelines)]))

        logger.debug(
            f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}"
        )
        logger.debug(
            f"allowed_model_families set to {self.allowed_model_families}")
        if len(self.problem_configuration):
            pipeline_params = {
                **{
                    'pipeline': self.problem_configuration
                },
                **self.pipeline_parameters
            }
        else:
            pipeline_params = self.pipeline_parameters

        self._automl_algorithm = IterativeAlgorithm(
            max_iterations=self.max_iterations,
            allowed_pipelines=self.allowed_pipelines,
            tuner_class=self.tuner_class,
            random_seed=self.random_seed,
            n_jobs=self.n_jobs,
            number_features=self.X_train.shape[1],
            pipelines_per_batch=self._pipelines_per_batch,
            ensembling=run_ensembling,
            pipeline_params=pipeline_params)

    def _pre_evaluation_callback(self, pipeline):
        if self.start_iteration_callback:
            self.start_iteration_callback(pipeline.__class__,
                                          pipeline.parameters, self)
        desc = f"{pipeline.name}"
        if len(desc) > AutoMLSearch._MAX_NAME_LEN:
            desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..."
        desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN)
        batch_number = 1
        if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0:
            batch_number = self._automl_algorithm.batch_number
        update_pipeline(logger, desc,
                        len(self._results['pipeline_results']) + 1,
                        self.max_iterations, self._start, batch_number,
                        self.show_batch_output)

    def _validate_objective(self, objective):
        non_core_objectives = get_non_core_objectives()
        if isinstance(objective, type):
            if objective in non_core_objectives:
                raise ValueError(
                    f"{objective.name.lower()} is not allowed in AutoML! "
                    "Use evalml.objectives.utils.get_core_objective_names() "
                    "to get all objective names allowed in automl.")
            return objective()
        return objective

    @property
    def data_check_results(self):
        """If there are data checks, return any error messages that are found"""
        return self._data_check_results

    def __str__(self):
        def _print_list(obj_list):
            lines = sorted(['\t{}'.format(o.name) for o in obj_list])
            return '\n'.join(lines)

        def _get_funct_name(function):
            if callable(function):
                return function.__name__
            else:
                return None

        search_desc = (
            f"{handle_problem_types(self.problem_type).name} Search\n\n"
            f"Parameters: \n{'='*20}\n"
            f"Objective: {get_objective(self.objective).name}\n"
            f"Max Time: {self.max_time}\n"
            f"Max Iterations: {self.max_iterations}\n"
            f"Max Batches: {self.max_batches}\n"
            f"Allowed Pipelines: \n{_print_list(self.allowed_pipelines or [])}\n"
            f"Patience: {self.patience}\n"
            f"Tolerance: {self.tolerance}\n"
            f"Data Splitting: {self.data_splitter}\n"
            f"Tuner: {self.tuner_class.__name__}\n"
            f"Start Iteration Callback: {_get_funct_name(self.start_iteration_callback)}\n"
            f"Add Result Callback: {_get_funct_name(self.add_result_callback)}\n"
            f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n"
            f"Random Seed: {self.random_seed}\n"
            f"n_jobs: {self.n_jobs}\n"
            f"Optimize Thresholds: {self.optimize_thresholds}\n")

        rankings_desc = ""
        if not self.rankings.empty:
            rankings_str = self.rankings.drop(['parameters'],
                                              axis='columns').to_string()
            rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}"

        return search_desc + rankings_desc

    def _validate_problem_configuration(self, problem_configuration=None):
        if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]:
            required_parameters = {'gap', 'max_delay'}
            if not problem_configuration or not all(
                    p in problem_configuration for p in required_parameters):
                raise ValueError(
                    "user_parameters must be a dict containing values for at least the gap and max_delay "
                    f"parameters. Received {problem_configuration}.")
        return problem_configuration or {}

    def _validate_data_checks(self, data_checks):
        """Validate data_checks parameter.

        Arguments:
            data_checks (DataChecks, list(Datacheck), str, None): Input to validate. If not of the right type,
                raise an exception.

        Returns:
            An instance of DataChecks used to perform checks before search.
        """
        if isinstance(data_checks, DataChecks):
            return data_checks
        elif isinstance(data_checks, list):
            return AutoMLDataChecks(data_checks)
        elif isinstance(data_checks, str):
            if data_checks == "auto":
                return DefaultDataChecks(
                    problem_type=self.problem_type,
                    objective=self.objective,
                    n_splits=self.data_splitter.get_n_splits())
            elif data_checks == "disabled":
                return EmptyDataChecks()
            else:
                raise ValueError(
                    "If data_checks is a string, it must be either 'auto' or 'disabled'. "
                    f"Received '{data_checks}'.")
        elif data_checks is None:
            return EmptyDataChecks()
        else:
            return DataChecks(data_checks)

    def _handle_keyboard_interrupt(self):
        """Presents a prompt to the user asking if they want to stop the search.

        Returns:
            bool: If True, search should terminate early
        """
        leading_char = "\n"
        start_of_loop = time.time()
        while True:
            choice = input(
                leading_char +
                "Do you really want to exit search (y/n)? ").strip().lower()
            if choice == "y":
                logger.info("Exiting AutoMLSearch.")
                return True
            elif choice == "n":
                # So that the time in this loop does not count towards the time budget (if set)
                time_in_loop = time.time() - start_of_loop
                self._start += time_in_loop
                return False
            else:
                leading_char = ""

    def search(self, data_checks="auto", show_iteration_plot=True):
        """Find the best pipeline for the data set.

        Arguments:
            data_checks (DataChecks, list(Datacheck), str, None): A collection of data checks to run before
                automl search. If data checks produce any errors, an exception will be thrown before the
                search begins. If "disabled" or None, `no` data checks will be done.
                If set to "auto", DefaultDataChecks will be done. Default value is set to "auto".

            feature_types (list, optional): list of feature types, either numerical or categorical.
                Categorical features will automatically be encoded

            show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook.
                Disabled by default in non-Jupyter enviroments.
        """
        if self._searched:
            logger.info(
                "AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again."
            )
            return

        # don't show iteration plot outside of a jupyter notebook
        if show_iteration_plot:
            try:
                get_ipython
            except NameError:
                show_iteration_plot = False

        data_checks = self._validate_data_checks(data_checks)
        self._data_check_results = data_checks.validate(
            _convert_woodwork_types_wrapper(self.X_train.to_dataframe()),
            _convert_woodwork_types_wrapper(self.y_train.to_series()))
        for result in self._data_check_results["warnings"]:
            logger.warning(result["message"])
        for result in self._data_check_results["errors"]:
            logger.error(result["message"])
        if self._data_check_results["errors"]:
            raise ValueError(
                "Data checks raised some warnings and/or errors. Please see `self.data_check_results` for more information or pass data_checks='disabled' to search() to disable data checking."
            )

        log_title(logger, "Beginning pipeline search")
        logger.info("Optimizing for %s. " % self.objective.name)
        logger.info("{} score is better.\n".format(
            'Greater' if self.objective.greater_is_better else 'Lower'))
        logger.info(
            f"Using {self._engine.__class__.__name__} to train and score pipelines."
        )

        if self.max_batches is not None:
            logger.info(
                f"Searching up to {self.max_batches} batches for a total of {self.max_iterations} pipelines. "
            )
        elif self.max_iterations is not None:
            logger.info("Searching up to %s pipelines. " % self.max_iterations)
        if self.max_time is not None:
            logger.info(
                "Will stop searching for new pipelines after %d seconds.\n" %
                self.max_time)
        logger.info(
            "Allowed model families: %s\n" %
            ", ".join([model.value for model in self.allowed_model_families]))
        self.search_iteration_plot = None
        if self.plot:
            self.search_iteration_plot = self.plot.search_iteration_plot(
                interactive_plot=show_iteration_plot)

        self._start = time.time()

        try:
            self._add_baseline_pipelines()
        except KeyboardInterrupt:
            if self._handle_keyboard_interrupt():
                self._interrupted = True

        current_batch_pipelines = []
        current_batch_pipeline_scores = []
        new_pipeline_ids = []
        loop_interrupted = False
        while self._should_continue():
            try:
                if not loop_interrupted:
                    current_batch_pipelines = self._automl_algorithm.next_batch(
                    )
            except StopIteration:
                logger.info('AutoML Algorithm out of recommendations, ending')
                break
            try:
                new_pipeline_ids = self._engine.evaluate_batch(
                    current_batch_pipelines)
                loop_interrupted = False
            except KeyboardInterrupt:
                loop_interrupted = True
                if self._handle_keyboard_interrupt():
                    break
            full_rankings = self.full_rankings
            current_batch_idx = full_rankings['id'].isin(new_pipeline_ids)
            current_batch_pipeline_scores = full_rankings[current_batch_idx][
                'score']
            if len(current_batch_pipeline_scores
                   ) and current_batch_pipeline_scores.isna().all():
                raise AutoMLSearchException(
                    f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}."
                )

        self.search_duration = time.time() - self._start
        elapsed_time = time_elapsed(self._start)
        desc = f"\nSearch finished after {elapsed_time}"
        desc = desc.ljust(self._MAX_NAME_LEN)
        logger.info(desc)

        self._find_best_pipeline()
        if self._best_pipeline is not None:
            best_pipeline = self.rankings.iloc[0]
            best_pipeline_name = best_pipeline["pipeline_name"]
            logger.info(f"Best pipeline: {best_pipeline_name}")
            logger.info(
                f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}"
            )
        self._searched = True

    def _find_best_pipeline(self):
        """Finds the best pipeline in the rankings
        If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
        if len(self.rankings) == 0:
            return
        best_pipeline = self.rankings.iloc[0]
        if not (self._best_pipeline and self._best_pipeline
                == self.get_pipeline(best_pipeline['id'])):
            self._best_pipeline = self.get_pipeline(best_pipeline['id'])
            if self._train_best_pipeline:
                X_threshold_tuning = None
                y_threshold_tuning = None
                X_train, y_train = self.X_train, self.y_train
                if is_binary(self.problem_type) and self.objective.is_defined_for_problem_type(self.problem_type) \
                   and self.optimize_thresholds and self.objective.can_optimize_threshold:
                    X_train, X_threshold_tuning, y_train, y_threshold_tuning = split_data(
                        X_train,
                        y_train,
                        self.problem_type,
                        test_size=0.2,
                        random_seed=self.random_seed)
                self._best_pipeline.fit(X_train, y_train)
                tune_binary_threshold(self._best_pipeline, self.objective,
                                      self.problem_type, X_threshold_tuning,
                                      y_threshold_tuning)

    def _num_pipelines(self):
        """Return the number of pipeline evaluations which have been made

        Returns:
            int: the number of pipeline evaluations made in the search
        """
        return len(self._results['pipeline_results'])

    def _should_continue(self):
        """Given the original stopping criterion and current state, should the search continue?

        Returns:
            bool: True if yes, False if no.
        """
        if self._interrupted:
            return False

        # for add_to_rankings
        if self._searched:
            return True

        # Run at least one pipeline for every search
        num_pipelines = self._num_pipelines()
        if num_pipelines == 0:
            return True

        # check max_time and max_iterations
        elapsed = time.time() - self._start
        if self.max_time and elapsed >= self.max_time:
            return False
        elif self.max_iterations and num_pipelines >= self.max_iterations:
            return False

        # check for early stopping
        if self.patience is None or self.tolerance is None:
            return True

        first_id = self._results['search_order'][0]
        best_score = self._results['pipeline_results'][first_id]['score']
        num_without_improvement = 0
        for id in self._results['search_order'][1:]:
            curr_score = self._results['pipeline_results'][id]['score']
            significant_change = abs(
                (curr_score - best_score) / best_score) > self.tolerance
            score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score
            if score_improved and significant_change:
                best_score = curr_score
                num_without_improvement = 0
            else:
                num_without_improvement += 1
            if num_without_improvement >= self.patience:
                logger.info(
                    "\n\n{} iterations without improvement. Stopping search early..."
                    .format(self.patience))
                return False
        return True

    def _validate_problem_type(self):
        for obj in self.additional_objectives:
            if not obj.is_defined_for_problem_type(self.problem_type):
                raise ValueError(
                    "Additional objective {} is not compatible with a {} problem."
                    .format(obj.name, self.problem_type.value))

        for pipeline in self.allowed_pipelines or []:
            if pipeline.problem_type != self.problem_type:
                raise ValueError(
                    "Given pipeline {} is not compatible with problem_type {}."
                    .format(pipeline.name, self.problem_type.value))

    def _add_baseline_pipelines(self):
        """Fits a baseline pipeline to the data.

        This is the first pipeline fit during search.
        """
        if self.problem_type == ProblemTypes.BINARY:
            baseline = ModeBaselineBinaryPipeline(parameters={})
        elif self.problem_type == ProblemTypes.MULTICLASS:
            baseline = ModeBaselineMulticlassPipeline(parameters={})
        elif self.problem_type == ProblemTypes.REGRESSION:
            baseline = MeanBaselineRegressionPipeline(parameters={})
        else:
            pipeline_class = {
                ProblemTypes.TIME_SERIES_REGRESSION:
                TimeSeriesBaselineRegressionPipeline,
                ProblemTypes.TIME_SERIES_MULTICLASS:
                TimeSeriesBaselineMulticlassPipeline,
                ProblemTypes.TIME_SERIES_BINARY:
                TimeSeriesBaselineBinaryPipeline
            }[self.problem_type]
            gap = self.problem_configuration['gap']
            max_delay = self.problem_configuration['max_delay']
            baseline = pipeline_class(
                parameters={
                    "pipeline": {
                        "gap": gap,
                        "max_delay": max_delay
                    },
                    "Time Series Baseline Estimator": {
                        "gap": gap,
                        "max_delay": max_delay
                    }
                })
        self._engine.evaluate_batch([baseline])

    @staticmethod
    def _get_mean_cv_scores_for_all_objectives(cv_data,
                                               objective_name_to_class):
        scores = defaultdict(int)
        n_folds = len(cv_data)
        for fold_data in cv_data:
            for field, value in fold_data['all_objective_scores'].items():
                # The 'all_objective_scores' field contains scores for all objectives
                # but also fields like "# Training" and "# Testing", so we want to exclude them since
                # they are not scores
                if field in objective_name_to_class:
                    scores[field] += value
        return {
            objective: float(score) / n_folds
            for objective, score in scores.items()
        }

    def _post_evaluation_callback(self, pipeline, evaluation_results):
        training_time = evaluation_results['training_time']
        cv_data = evaluation_results['cv_data']
        cv_scores = evaluation_results['cv_scores']
        is_baseline = pipeline.model_family == ModelFamily.BASELINE
        cv_score = cv_scores.mean()

        percent_better_than_baseline = {}
        mean_cv_all_objectives = self._get_mean_cv_scores_for_all_objectives(
            cv_data, self.objective_name_to_class)
        if is_baseline:
            self._baseline_cv_scores = mean_cv_all_objectives
        for obj_name in mean_cv_all_objectives:
            objective_class = self.objective_name_to_class[obj_name]

            # In the event add_to_rankings is called before search _baseline_cv_scores will be empty so we will return
            # nan for the base score.
            percent_better = objective_class.calculate_percent_difference(
                mean_cv_all_objectives[obj_name],
                self._baseline_cv_scores.get(obj_name, np.nan))
            percent_better_than_baseline[obj_name] = percent_better

        pipeline_name = pipeline.name
        high_variance_cv_check = HighVarianceCVDataCheck(threshold=0.2)
        high_variance_cv_check_results = high_variance_cv_check.validate(
            pipeline_name=pipeline_name, cv_scores=cv_scores)
        high_variance_cv = False
        if high_variance_cv_check_results["warnings"]:
            logger.warning(
                high_variance_cv_check_results["warnings"][0]["message"])
            high_variance_cv = True

        pipeline_id = len(self._results['pipeline_results'])
        self._results['pipeline_results'][pipeline_id] = {
            "id":
            pipeline_id,
            "pipeline_name":
            pipeline_name,
            "pipeline_class":
            type(pipeline),
            "pipeline_summary":
            pipeline.summary,
            "parameters":
            pipeline.parameters,
            "score":
            cv_score,
            "high_variance_cv":
            high_variance_cv,
            "training_time":
            training_time,
            "cv_data":
            cv_data,
            "percent_better_than_baseline_all_objectives":
            percent_better_than_baseline,
            "percent_better_than_baseline":
            percent_better_than_baseline[self.objective.name],
            "validation_score":
            cv_scores[0]
        }
        self._results['search_order'].append(pipeline_id)

        if not is_baseline:
            score_to_minimize = -cv_score if self.objective.greater_is_better else cv_score
            try:
                self._automl_algorithm.add_result(
                    score_to_minimize, pipeline,
                    self._results['pipeline_results'][pipeline_id])
            except PipelineNotFoundError:
                pass

        if self.search_iteration_plot:
            self.search_iteration_plot.update()

        if self.add_result_callback:
            self.add_result_callback(
                self._results['pipeline_results'][pipeline_id], pipeline, self)
        return pipeline_id

    def get_pipeline(self, pipeline_id):
        """Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline
        initialized with the parameters used to train that pipeline during automl search.

        Arguments:
            pipeline_id (int): pipeline to retrieve

        Returns:
            PipelineBase: untrained pipeline instance associated with the provided ID
        """
        pipeline_results = self.results['pipeline_results'].get(pipeline_id)
        if pipeline_results is None:
            raise PipelineNotFoundError("Pipeline not found in automl results")
        pipeline_class = pipeline_results.get('pipeline_class')
        parameters = pipeline_results.get('parameters')
        if pipeline_class is None or parameters is None:
            raise PipelineNotFoundError(
                "Pipeline class or parameters not found in automl results")
        pipeline = get_generated_pipeline_class(self.problem_type)
        pipeline.custom_hyperparameters = pipeline_class.custom_hyperparameters
        pipeline.custom_name = pipeline_class.name
        pipeline.component_graph = pipeline_class.component_graph
        return pipeline(parameters, random_seed=self.random_seed)

    def describe_pipeline(self, pipeline_id, return_dict=False):
        """Describe a pipeline

        Arguments:
            pipeline_id (int): pipeline to describe
            return_dict (bool): If True, return dictionary of information
                about pipeline. Defaults to False.

        Returns:
            Description of specified pipeline. Includes information such as
            type of pipeline components, problem, training time, cross validation, etc.
        """
        if pipeline_id not in self._results['pipeline_results']:
            raise PipelineNotFoundError("Pipeline not found")

        pipeline = self.get_pipeline(pipeline_id)
        pipeline_results = self._results['pipeline_results'][pipeline_id]

        pipeline.describe()
        log_subtitle(logger, "Training")
        logger.info("Training for {} problems.".format(pipeline.problem_type))

        if self.optimize_thresholds and self.objective.is_defined_for_problem_type(
                ProblemTypes.BINARY) and self.objective.can_optimize_threshold:
            logger.info(
                "Objective to optimize binary classification pipeline thresholds for: {}"
                .format(self.objective))

        logger.info("Total training time (including CV): %.1f seconds" %
                    pipeline_results["training_time"])
        log_subtitle(logger, "Cross Validation", underline="-")

        all_objective_scores = [
            fold["all_objective_scores"]
            for fold in pipeline_results["cv_data"]
        ]
        all_objective_scores = pd.DataFrame(all_objective_scores)

        for c in all_objective_scores:
            if c in ["# Training", "# Validation"]:
                all_objective_scores[c] = all_objective_scores[c].astype(
                    "object")
                continue

            mean = all_objective_scores[c].mean(axis=0)
            std = all_objective_scores[c].std(axis=0)
            all_objective_scores.loc["mean", c] = mean
            all_objective_scores.loc["std", c] = std
            all_objective_scores.loc[
                "coef of var", c] = std / mean if abs(mean) > 0 else np.inf

        all_objective_scores = all_objective_scores.fillna("-")

        with pd.option_context('display.float_format', '{:.3f}'.format,
                               'expand_frame_repr', False):
            logger.info(all_objective_scores)

        if return_dict:
            return pipeline_results

    def add_to_rankings(self, pipeline):
        """Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run.

        Arguments:
            pipeline (PipelineBase): pipeline to train and evaluate.
        """
        pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name']
                                           == pipeline.name]
        for parameter in pipeline_rows['parameters']:
            if pipeline.parameters == parameter:
                return

        self._engine.evaluate_batch([pipeline])
        self._find_best_pipeline()

    @property
    def results(self):
        """Class that allows access to a copy of the results from `automl_search`.

           Returns: dict containing `pipeline_results`: a dict with results from each pipeline,
                    and `search_order`: a list describing the order the pipelines were searched.
           """
        return copy.deepcopy(self._results)

    @property
    def rankings(self):
        """Returns a pandas.DataFrame with scoring results from the highest-scoring set of parameters used with each pipeline."""
        return self.full_rankings.drop_duplicates(subset="pipeline_name",
                                                  keep="first")

    @property
    def full_rankings(self):
        """Returns a pandas.DataFrame with scoring results from all pipelines searched"""
        ascending = True
        if self.objective.greater_is_better:
            ascending = False

        full_rankings_cols = [
            "id", "pipeline_name", "score", "validation_score",
            "percent_better_than_baseline", "high_variance_cv", "parameters"
        ]
        if not self._results['pipeline_results']:
            return pd.DataFrame(columns=full_rankings_cols)

        rankings_df = pd.DataFrame(self._results['pipeline_results'].values())
        rankings_df = rankings_df[full_rankings_cols]
        rankings_df.sort_values("score", ascending=ascending, inplace=True)
        rankings_df.reset_index(drop=True, inplace=True)
        return rankings_df

    @property
    def best_pipeline(self):
        """Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.

        Returns:
            PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
        """
        if not self._best_pipeline:
            raise PipelineNotFoundError(
                "automl search must be run before selecting `best_pipeline`.")

        return self._best_pipeline

    def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL):
        """Saves AutoML object at file path

        Arguments:
            file_path (str): location to save file
            pickle_protocol (int): the pickle data stream format.

        Returns:
            None
        """
        with open(file_path, 'wb') as f:
            cloudpickle.dump(self, f, protocol=pickle_protocol)

    @staticmethod
    def load(file_path):
        """Loads AutoML object at file path

        Arguments:
            file_path (str): location to find file to load

        Returns:
            AutoSearchBase object
        """
        with open(file_path, 'rb') as f:
            return cloudpickle.load(f)
예제 #15
0
def test_iterative_algorithm_results(mock_stack, ensembling_value,
                                     dummy_binary_pipeline_classes):
    dummy_binary_pipeline_classes = dummy_binary_pipeline_classes()
    algo = IterativeAlgorithm(allowed_pipelines=dummy_binary_pipeline_classes,
                              ensembling=ensembling_value)
    assert algo.pipeline_number == 0
    assert algo.batch_number == 0
    assert algo.allowed_pipelines == dummy_binary_pipeline_classes

    # initial batch contains one of each pipeline, with default parameters
    next_batch = algo.next_batch()
    assert len(next_batch) == len(dummy_binary_pipeline_classes)
    assert [p.__class__ for p in next_batch] == dummy_binary_pipeline_classes
    assert algo.pipeline_number == len(dummy_binary_pipeline_classes)
    assert algo.batch_number == 1
    assert all(
        [p.parameters == p.__class__.default_parameters for p in next_batch])
    # the "best" score will be the 1st dummy pipeline
    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline)

    # subsequent batches contain pipelines_per_batch copies of one pipeline, moving from best to worst from the first batch
    last_batch_number = algo.batch_number
    last_pipeline_number = algo.pipeline_number
    all_parameters = []

    for i in range(1, 5):
        for _ in range(len(dummy_binary_pipeline_classes)):
            next_batch = algo.next_batch()
            assert len(next_batch) == algo.pipelines_per_batch
            num_pipelines_classes = (
                len(dummy_binary_pipeline_classes) +
                1) if ensembling_value else len(dummy_binary_pipeline_classes)
            cls = dummy_binary_pipeline_classes[(algo.batch_number - 2) %
                                                num_pipelines_classes]
            assert [p.__class__ for p in next_batch] == [cls] * len(next_batch)
            assert all([
                p.parameters['Mock Classifier']['n_jobs'] == -1
                for p in next_batch
            ])
            assert all((p.random_seed == algo.random_seed) for p in next_batch)
            assert algo.pipeline_number == last_pipeline_number + len(
                next_batch)
            last_pipeline_number = algo.pipeline_number
            assert algo.batch_number == last_batch_number + 1
            last_batch_number = algo.batch_number
            all_parameters.extend([p.parameters for p in next_batch])
            scores = -np.arange(0, len(next_batch))
            for score, pipeline in zip(scores, next_batch):
                algo.add_result(score, pipeline)
        assert any([
            p != dummy_binary_pipeline_classes[0]({}).parameters
            for p in all_parameters
        ])

        if ensembling_value:
            # check next batch is stacking ensemble batch
            assert algo.batch_number == (len(dummy_binary_pipeline_classes) +
                                         1) * i
            next_batch = algo.next_batch()
            assert len(next_batch) == 1
            assert algo.batch_number == last_batch_number + 1
            last_batch_number = algo.batch_number
            assert algo.pipeline_number == last_pipeline_number + 1
            last_pipeline_number = algo.pipeline_number
            scores = np.arange(0, len(next_batch))
            for score, pipeline in zip(scores, next_batch):
                algo.add_result(score, pipeline)
            assert pipeline.model_family == ModelFamily.ENSEMBLE
            assert pipeline.random_seed == algo.random_seed
            stack_args = mock_stack.call_args[1]['estimators']
            estimators_used_in_ensemble = [args[1] for args in stack_args]
            random_seeds_the_same = [
                (estimator.pipeline.random_seed == algo.random_seed)
                for estimator in estimators_used_in_ensemble
            ]
            assert all(random_seeds_the_same)
예제 #16
0
def test_iterative_algorithm_frozen_parameters():
    class MockEstimator(Estimator):
        name = "Mock Classifier"
        model_family = ModelFamily.RANDOM_FOREST
        supported_problem_types = [
            ProblemTypes.BINARY, ProblemTypes.MULTICLASS
        ]
        hyperparameter_ranges = {
            'dummy_int_parameter': Integer(1, 10),
            'dummy_categorical_parameter':
            Categorical(["random", "dummy", "test"]),
            'dummy_real_parameter': Real(0, 1)
        }

        def __init__(self,
                     dummy_int_parameter=0,
                     dummy_categorical_parameter='dummy',
                     dummy_real_parameter=1.0,
                     n_jobs=-1,
                     random_seed=0,
                     **kwargs):
            super().__init__(parameters={
                'dummy_int_parameter': dummy_int_parameter,
                'dummy_categorical_parameter': dummy_categorical_parameter,
                'dummy_real_parameter': dummy_real_parameter,
                **kwargs, 'n_jobs': n_jobs
            },
                             component_obj=None,
                             random_seed=random_seed)

    pipeline = BinaryClassificationPipeline([MockEstimator])
    algo = IterativeAlgorithm(allowed_pipelines=[pipeline, pipeline, pipeline],
                              pipeline_params={
                                  'pipeline': {
                                      'date_index': "Date",
                                      "gap": 2,
                                      "max_delay": 10
                                  }
                              },
                              random_seed=0,
                              _frozen_pipeline_parameters={
                                  "Mock Classifier": {
                                      'dummy_int_parameter': 6,
                                      'dummy_categorical_parameter': "random",
                                      'dummy_real_parameter': 0.1
                                  }
                              })

    next_batch = algo.next_batch()
    assert all([
        p.parameters['pipeline'] == {
            'date_index': "Date",
            "gap": 2,
            "max_delay": 10
        } for p in next_batch
    ])
    assert all([
        p.parameters['Mock Classifier'] == {
            'dummy_int_parameter': 6,
            'dummy_categorical_parameter': "random",
            'dummy_real_parameter': 0.1,
            "n_jobs": -1
        } for p in next_batch
    ])

    scores = np.arange(0, len(next_batch))
    for score, pipeline in zip(scores, next_batch):
        algo.add_result(score, pipeline, {"id": algo.pipeline_number})

    # make sure that future batches remain in the hyperparam range
    for i in range(1, 5):
        next_batch = algo.next_batch()
        assert all([
            p.parameters['Mock Classifier'] == {
                'dummy_int_parameter': 6,
                'dummy_categorical_parameter': "random",
                'dummy_real_parameter': 0.1,
                "n_jobs": -1
            } for p in next_batch
        ])