def test_iterative_algorithm_sampling_params(problem_type, mock_imbalanced_data_X_y): X, y = mock_imbalanced_data_X_y(problem_type, "some", 'small') estimators = get_estimators(problem_type, None) pipelines = [ make_pipeline(X, y, e, problem_type, sampler_name='Undersampler') for e in estimators ] algo = IterativeAlgorithm( allowed_pipelines=pipelines, random_seed=0, _frozen_pipeline_parameters={"Undersampler": { "sampling_ratio": 0.5 }}) next_batch = algo.next_batch() for p in next_batch: for component in p._component_graph: if "sampler" in component.name: assert component.parameters["sampling_ratio"] == 0.5 scores = np.arange(0, len(next_batch)) for score, pipeline in zip(scores, next_batch): algo.add_result(score, pipeline, {"id": algo.pipeline_number}) # # make sure that future batches remain in the hyperparam range for i in range(1, 5): next_batch = algo.next_batch() for p in next_batch: for component in p._component_graph: if "sampler" in component.name: assert component.parameters["sampling_ratio"] == 0.5
def test_callback(X_y_regression): X, y = X_y_regression counts = { "start_iteration_callback": 0, "add_result_callback": 0, } def start_iteration_callback(pipeline_class, parameters, automl_obj, counts=counts): counts["start_iteration_callback"] += 1 def add_result_callback(results, trained_pipeline, automl_obj, counts=counts): counts["add_result_callback"] += 1 max_iterations = 3 automl = AutoMLSearch(X_train=X, y_train=y, problem_type='regression', objective="R2", max_iterations=max_iterations, start_iteration_callback=start_iteration_callback, add_result_callback=add_result_callback, n_jobs=1) automl.search() assert counts["start_iteration_callback"] == len( get_estimators("regression")) + 1 assert counts["add_result_callback"] == max_iterations
def test_automl_allowed_pipelines_init_allowed_both_not_specified_multi( mock_fit, mock_score, X_y_multi, assert_allowed_pipelines_equal_helper): X, y = X_y_multi automl = AutoMLSearch(X_train=X, y_train=y, problem_type='multiclass', allowed_pipelines=None, allowed_model_families=None) mock_score.return_value = {automl.objective.name: 1.0} expected_pipelines = [ make_pipeline(X, y, estimator, ProblemTypes.MULTICLASS) for estimator in get_estimators(ProblemTypes.MULTICLASS, model_families=None) ] assert_allowed_pipelines_equal_helper(automl.allowed_pipelines, expected_pipelines) automl.search() assert_allowed_pipelines_equal_helper(automl.allowed_pipelines, expected_pipelines) assert set(automl.allowed_model_families) == set( [p.model_family for p in expected_pipelines]) mock_fit.assert_called() mock_score.assert_called()
def test_automl_allowed_pipelines_specified_allowed_model_families_binary( mock_fit, mock_score, X_y_binary, assert_allowed_pipelines_equal_helper): X, y = X_y_binary automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', allowed_pipelines=None, allowed_model_families=[ModelFamily.RANDOM_FOREST]) mock_score.return_value = {automl.objective.name: 1.0} expected_pipelines = [ make_pipeline(X, y, estimator, ProblemTypes.BINARY) for estimator in get_estimators( ProblemTypes.BINARY, model_families=[ModelFamily.RANDOM_FOREST]) ] assert_allowed_pipelines_equal_helper(automl.allowed_pipelines, expected_pipelines) automl.search() assert_allowed_pipelines_equal_helper(automl.allowed_pipelines, expected_pipelines) assert set(automl.allowed_model_families) == set( [ModelFamily.RANDOM_FOREST]) mock_fit.assert_called() mock_score.assert_called() mock_fit.reset_mock() mock_score.reset_mock() automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', allowed_pipelines=None, allowed_model_families=['random_forest']) expected_pipelines = [ make_pipeline(X, y, estimator, ProblemTypes.BINARY) for estimator in get_estimators( ProblemTypes.BINARY, model_families=[ModelFamily.RANDOM_FOREST]) ] assert_allowed_pipelines_equal_helper(automl.allowed_pipelines, expected_pipelines) automl.search() assert_allowed_pipelines_equal_helper(automl.allowed_pipelines, expected_pipelines) assert set(automl.allowed_model_families) == set( [ModelFamily.RANDOM_FOREST]) mock_fit.assert_called() mock_score.assert_called()
def test_all_estimators_check_fit_input_type_regression(data_type, X_y_regression, make_data_type, helper_functions): X, y = X_y_regression X = make_data_type(data_type, X) y = make_data_type(data_type, y) estimators_to_check = [estimator for estimator in get_estimators('regression')] for component_class in estimators_to_check: component = helper_functions.safe_init_component_with_njobs_1(component_class) component.fit(X, y) component.predict(X)
def make_pipeline(X, y, estimator, problem_type, custom_hyperparameters=None, text_columns=None): """Given input data, target data, an estimator class and the problem type, generates a pipeline class with a preprocessing chain which was recommended based on the inputs. The pipeline will be a subclass of the appropriate pipeline base class for the specified problem_type. Arguments: X (pd.DataFrame, ww.DataTable): The input data of shape [n_samples, n_features] y (pd.Series, ww.DataColumn): The target data of length [n_samples] estimator (Estimator): Estimator for pipeline problem_type (ProblemTypes or str): Problem type for pipeline to generate custom_hyperparameters (dictionary): Dictionary of custom hyperparameters, with component name as key and dictionary of parameters as the value text_columns (list): feature names which should be treated as text features. Defaults to None. Returns: class: PipelineBase subclass with dynamically generated preprocessing components and specified estimator """ X = _convert_to_woodwork_structure(X) y = _convert_to_woodwork_structure(y) problem_type = handle_problem_types(problem_type) if estimator not in get_estimators(problem_type): raise ValueError( f"{estimator.name} is not a valid estimator for problem type") preprocessing_components = _get_preprocessing_components( X, y, problem_type, text_columns, estimator) complete_component_graph = preprocessing_components + [estimator] if custom_hyperparameters and not isinstance(custom_hyperparameters, dict): raise ValueError( f"if custom_hyperparameters provided, must be dictionary. Received {type(custom_hyperparameters)}" ) hyperparameters = custom_hyperparameters base_class = _get_pipeline_base_class(problem_type) class GeneratedPipeline(base_class): custom_name = f"{estimator.name} w/ {' + '.join([component.name for component in preprocessing_components])}" component_graph = complete_component_graph custom_hyperparameters = hyperparameters return GeneratedPipeline
def test_iterative_algorithm_first_batch_order(problem_type, X_y_binary, has_minimal_dependencies): X, y = X_y_binary estimators = get_estimators(problem_type, None) pipelines = [make_pipeline(X, y, e, problem_type) for e in estimators] algo = IterativeAlgorithm(allowed_pipelines=pipelines) # initial batch contains one of each pipeline, with default parameters next_batch = algo.next_batch() estimators_in_first_batch = [p.estimator.name for p in next_batch] if problem_type == ProblemTypes.REGRESSION: final_estimators = [ 'XGBoost Regressor', 'LightGBM Regressor', 'CatBoost Regressor' ] else: final_estimators = [ 'XGBoost Classifier', 'LightGBM Classifier', 'CatBoost Classifier' ] if has_minimal_dependencies: final_estimators = [] if problem_type == ProblemTypes.REGRESSION: assert estimators_in_first_batch == [ 'Linear Regressor', 'Elastic Net Regressor', 'Decision Tree Regressor', 'Extra Trees Regressor', 'Random Forest Regressor' ] + final_estimators if problem_type == ProblemTypes.BINARY: assert estimators_in_first_batch == [ 'Elastic Net Classifier', 'Logistic Regression Classifier', 'Decision Tree Classifier', 'Extra Trees Classifier', 'Random Forest Classifier' ] + final_estimators if problem_type == ProblemTypes.MULTICLASS: assert estimators_in_first_batch == [ 'Elastic Net Classifier', 'Logistic Regression Classifier', 'Decision Tree Classifier', 'Extra Trees Classifier', 'Random Forest Classifier' ] + final_estimators
def test_automl_pickle_generated_pipeline(mock_regression_fit, mock_regression_score, X_y_regression): mock_regression_score.return_value = {"R2": 1.0} class RegressionPipelineCustom(RegressionPipeline): custom_name = "Custom Regression Name" component_graph = ["Imputer", "Linear Regressor"] custom_hyperparameters = { "Imputer": { "numeric_impute_strategy": "most_frequent" } } X, y = X_y_regression pipeline = GeneratedPipelineRegression allowed_estimators = get_estimators('regression') allowed_pipelines = [ make_pipeline(X, y, estimator, problem_type='regression') for estimator in allowed_estimators ] allowed_pipelines.append(RegressionPipelineCustom) a = AutoMLSearch(X_train=X, y_train=y, problem_type='regression', allowed_pipelines=allowed_pipelines) a.search() a.add_to_rankings(RegressionPipelineCustom({})) seen_name = False for i, row in a.rankings.iterrows(): automl_pipeline = a.get_pipeline(row['id']) assert automl_pipeline.__class__ == pipeline assert pickle.loads(pickle.dumps(automl_pipeline)) if automl_pipeline.custom_name == RegressionPipelineCustom.custom_name: seen_name = True assert automl_pipeline.custom_hyperparameters == RegressionPipelineCustom.custom_hyperparameters assert automl_pipeline.component_graph == RegressionPipelineCustom.component_graph assert seen_name
def make_pipeline(X, y, estimator, problem_type, parameters=None, custom_hyperparameters=None, sampler_name=None): """Given input data, target data, an estimator class and the problem type, generates a pipeline class with a preprocessing chain which was recommended based on the inputs. The pipeline will be a subclass of the appropriate pipeline base class for the specified problem_type. Arguments: X (pd.DataFrame, ww.DataTable): The input data of shape [n_samples, n_features] y (pd.Series, ww.DataColumn): The target data of length [n_samples] estimator (Estimator): Estimator for pipeline problem_type (ProblemTypes or str): Problem type for pipeline to generate parameters (dict): Dictionary with component names as keys and dictionary of that component's parameters as values. An empty dictionary or None implies using all default values for component parameters. custom_hyperparameters (dictionary): Dictionary of custom hyperparameters, with component name as key and dictionary of parameters as the value sampler_name (str): The name of the sampler component to add to the pipeline. Only used in classification problems. Defaults to None Returns: PipelineBase object: PipelineBase instance with dynamically generated preprocessing components and specified estimator """ X = infer_feature_types(X) y = infer_feature_types(y) problem_type = handle_problem_types(problem_type) if estimator not in get_estimators(problem_type): raise ValueError(f"{estimator.name} is not a valid estimator for problem type") if not is_classification(problem_type) and sampler_name is not None: raise ValueError(f"Sampling is unsupported for problem_type {str(problem_type)}") preprocessing_components = _get_preprocessing_components(X, y, problem_type, estimator, sampler_name) complete_component_graph = preprocessing_components + [estimator] if custom_hyperparameters and not isinstance(custom_hyperparameters, dict): raise ValueError(f"if custom_hyperparameters provided, must be dictionary. Received {type(custom_hyperparameters)}") base_class = _get_pipeline_base_class(problem_type) return base_class(complete_component_graph, parameters=parameters, custom_hyperparameters=custom_hyperparameters)
def __init__(self, X_train=None, y_train=None, problem_type=None, objective='auto', max_iterations=None, max_time=None, patience=None, tolerance=None, data_splitter=None, allowed_pipelines=None, allowed_model_families=None, start_iteration_callback=None, add_result_callback=None, error_callback=None, additional_objectives=None, random_state=None, random_seed=0, n_jobs=-1, tuner_class=None, optimize_thresholds=False, ensembling=False, max_batches=None, problem_configuration=None, train_best_pipeline=True, pipeline_parameters=None, _pipelines_per_batch=5): """Automated pipeline search Arguments: X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required. y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks. problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list. objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time. When set to 'auto', chooses: - LogLossBinary for binary classification problems, - LogLossMulticlass for multiclass classification problems, and - R2 for regression problems. max_iterations (int): Maximum number of iterations to search. If max_iterations and max_time is not set, then max_iterations will default to max_iterations of 5. max_time (int, str): Maximum time to search for pipelines. This will not start a new pipeline search after the duration has elapsed. If it is an integer, then the time will be in seconds. For strings, time can be specified as seconds, minutes, or hours. patience (int): Number of iterations without improvement to stop search early. Must be positive. If None, early stopping is disabled. Defaults to None. tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping. Only applicable if patience is not None. Defaults to None. allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search. The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause allowed_model_families to be ignored. allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary` to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided, this parameter will be ignored. data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold. tuner_class: The tuner class to use. Defaults to SKOptTuner. start_iteration_callback (callable): Function called before each pipeline training iteration. Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object. add_result_callback (callable): Function called after each pipeline training iteration. Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object. error_callback (callable): Function called when `search()` errors and raises an Exception. Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object. Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default. Defaults to None, which will call `log_error_callback`. additional_objectives (list): Custom set of objectives to score on. Will override default objectives for problem type if not empty. random_state (int): Deprecated - use random_seed instead. random_seed (int): Seed for the random number generator. Defaults to 0. n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines. None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over. If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False. max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and max_iterations have precedence over stopping the search. problem_configuration (dict, None): Additional parameters needed to configure the search. For example, in time series problems, values should be passed in for the gap and max_delay variables. train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True _pipelines_per_batch (int): The number of pipelines to train for every batch after the first one. The first batch will train a baseline pipline + one of each pipeline family allowed in the search. """ if X_train is None: raise ValueError( 'Must specify training data as a 2d array using the X_train argument' ) if y_train is None: raise ValueError( 'Must specify training data target values as a 1d vector using the y_train argument' ) try: self.problem_type = handle_problem_types(problem_type) except ValueError: raise ValueError( 'choose one of (binary, multiclass, regression) as problem_type' ) self.tuner_class = tuner_class or SKOptTuner self.start_iteration_callback = start_iteration_callback self.add_result_callback = add_result_callback self.error_callback = error_callback or log_error_callback self.data_splitter = data_splitter self.optimize_thresholds = optimize_thresholds self.ensembling = ensembling if objective == 'auto': objective = get_default_primary_search_objective( self.problem_type.value) objective = get_objective(objective, return_instance=False) self.objective = self._validate_objective(objective) if self.data_splitter is not None and not issubclass( self.data_splitter.__class__, BaseCrossValidator): raise ValueError("Not a valid data splitter") if not objective.is_defined_for_problem_type(self.problem_type): raise ValueError( "Given objective {} is not compatible with a {} problem.". format(self.objective.name, self.problem_type.value)) if additional_objectives is None: additional_objectives = get_core_objectives(self.problem_type) # if our main objective is part of default set of objectives for problem_type, remove it existing_main_objective = next( (obj for obj in additional_objectives if obj.name == self.objective.name), None) if existing_main_objective is not None: additional_objectives.remove(existing_main_objective) else: additional_objectives = [ get_objective(o) for o in additional_objectives ] additional_objectives = [ self._validate_objective(obj) for obj in additional_objectives ] self.additional_objectives = additional_objectives self.objective_name_to_class = { o.name: o for o in [self.objective] + self.additional_objectives } if not isinstance(max_time, (int, float, str, type(None))): raise TypeError( f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}.." ) if isinstance(max_time, (int, float)) and max_time < 0: raise ValueError( f"Parameter max_time must be None or non-negative. Received {max_time}." ) if max_batches is not None and max_batches < 0: raise ValueError( f"Parameter max_batches must be None or non-negative. Received {max_batches}." ) if max_iterations is not None and max_iterations < 0: raise ValueError( f"Parameter max_iterations must be None or non-negative. Received {max_iterations}." ) self.max_time = convert_to_seconds(max_time) if isinstance( max_time, str) else max_time self.max_iterations = max_iterations self.max_batches = max_batches self._pipelines_per_batch = _pipelines_per_batch if not self.max_iterations and not self.max_time and not self.max_batches: self.max_batches = 1 logger.info("Using default limit of max_batches=1.\n") if patience and (not isinstance(patience, int) or patience < 0): raise ValueError( "patience value must be a positive integer. Received {} instead" .format(patience)) if tolerance and (tolerance > 1.0 or tolerance < 0.0): raise ValueError( "tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead" .format(tolerance)) self.patience = patience self.tolerance = tolerance or 0.0 self._results = { 'pipeline_results': {}, 'search_order': [], 'errors': [] } self.random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed) self.n_jobs = n_jobs self.plot = None try: self.plot = PipelineSearchPlots(self) except ImportError: logger.warning( "Unable to import plotly; skipping pipeline search plotting\n") self._data_check_results = None self.allowed_pipelines = allowed_pipelines self.allowed_model_families = allowed_model_families self._automl_algorithm = None self._start = 0.0 self._baseline_cv_scores = {} self.show_batch_output = False self._validate_problem_type() self.problem_configuration = self._validate_problem_configuration( problem_configuration) self._train_best_pipeline = train_best_pipeline self._best_pipeline = None self._searched = False self.X_train = infer_feature_types(X_train) self.y_train = infer_feature_types(y_train) default_data_splitter = make_data_splitter( self.X_train, self.y_train, self.problem_type, self.problem_configuration, n_splits=3, shuffle=True, random_seed=self.random_seed) self.data_splitter = self.data_splitter or default_data_splitter self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {} self.search_iteration_plot = None self._interrupted = False self._engine = SequentialEngine( self.X_train, self.y_train, self, should_continue_callback=self._should_continue, pre_evaluation_callback=self._pre_evaluation_callback, post_evaluation_callback=self._post_evaluation_callback) if self.allowed_pipelines is None: logger.info("Generating pipelines to search over...") allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families) logger.debug( f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}" ) self.allowed_pipelines = [ make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators ] if self.allowed_pipelines == []: raise ValueError("No allowed pipelines to search") run_ensembling = self.ensembling if run_ensembling and len(self.allowed_pipelines) == 1: logger.warning( "Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run." ) run_ensembling = False if run_ensembling and self.max_iterations is not None: # Baseline + first batch + each pipeline iteration + 1 first_ensembling_iteration = ( 1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1) if self.max_iterations < first_ensembling_iteration: run_ensembling = False logger.warning( f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling." ) else: logger.info( f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that." ) if self.max_batches and self.max_iterations is None: self.show_batch_output = True if run_ensembling: ensemble_nth_batch = len(self.allowed_pipelines) + 1 num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch if num_ensemble_batches == 0: logger.warning( f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling." ) else: logger.info( f"Ensembling will run every {ensemble_nth_batch} batches." ) self.max_iterations = ( 1 + len(self.allowed_pipelines) + self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) + num_ensemble_batches) else: self.max_iterations = 1 + len( self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1)) self.allowed_model_families = list( set([p.model_family for p in (self.allowed_pipelines)])) logger.debug( f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}" ) logger.debug( f"allowed_model_families set to {self.allowed_model_families}") if len(self.problem_configuration): pipeline_params = { **{ 'pipeline': self.problem_configuration }, **self.pipeline_parameters } else: pipeline_params = self.pipeline_parameters self._automl_algorithm = IterativeAlgorithm( max_iterations=self.max_iterations, allowed_pipelines=self.allowed_pipelines, tuner_class=self.tuner_class, random_seed=self.random_seed, n_jobs=self.n_jobs, number_features=self.X_train.shape[1], pipelines_per_batch=self._pipelines_per_batch, ensembling=run_ensembling, pipeline_params=pipeline_params)