def test_scikit_learn_wrapper_invalid_problem_type(): evalml_pipeline = make_pipeline_from_components([RandomForestClassifier()], ProblemTypes.MULTICLASS) evalml_pipeline.problem_type = None with pytest.raises( ValueError, match="Could not wrap EvalML object in scikit-learn wrapper."): scikit_learn_wrapped_estimator(evalml_pipeline)
def __init__(self, input_pipelines=None, final_estimator=None, cv=None, n_jobs=None, random_state=None, random_seed=0, **kwargs): """Stacked ensemble base class. Arguments: input_pipelines (list(PipelineBase or subclass obj)): List of pipeline instances to use as the base estimators. This must not be None or an empty list or else EnsembleMissingPipelinesError will be raised. final_estimator (Estimator or subclass): The estimator used to combine the base estimators. cv (int, cross-validation generator or an iterable): Determines the cross-validation splitting strategy used to train final_estimator. For int/None inputs, if the estimator is a classifier and y is either binary or multiclass, StratifiedKFold is used. In all other cases, KFold is used. Possible inputs for cv are: - None: 5-fold cross validation - int: the number of folds in a (Stratified) KFold - An scikit-learn cross-validation generator object - An iterable yielding (train, test) splits n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines. None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Defaults to None. - Note: there could be some multi-process errors thrown for values of `n_jobs != 1`. If this is the case, please use `n_jobs = 1`. random_state (None, int): Deprecated - use random_seed instead. random_seed (int): Seed for the random number generator. Defaults to 0. """ if not input_pipelines: raise EnsembleMissingPipelinesError("`input_pipelines` must not be None or an empty list.") if [pipeline for pipeline in input_pipelines if pipeline.model_family in _nonstackable_model_families]: raise ValueError("Pipelines with any of the following model families cannot be used as base pipelines: {}".format(_nonstackable_model_families)) parameters = { "input_pipelines": input_pipelines, "final_estimator": final_estimator, "cv": cv, "n_jobs": n_jobs } parameters.update(kwargs) if len(set([pipeline.problem_type for pipeline in input_pipelines])) > 1: raise ValueError("All pipelines must have the same problem type.") random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed) cv = cv or self._default_cv(n_splits=3, random_state=random_seed, shuffle=True) estimators = [scikit_learn_wrapped_estimator(pipeline) for pipeline in input_pipelines] final_estimator = scikit_learn_wrapped_estimator(final_estimator or self._default_final_estimator()) sklearn_parameters = { "estimators": [(f"({idx})", estimator) for idx, estimator in enumerate(estimators)], "final_estimator": final_estimator, "cv": cv, "n_jobs": n_jobs } sklearn_parameters.update(kwargs) super().__init__(parameters=parameters, component_obj=self._stacking_estimator_class(**sklearn_parameters), random_seed=random_seed)
def test_scikit_learn_wrapper(X_y_binary, X_y_multi, X_y_regression, ts_data): for estimator in [estimator for estimator in _all_estimators() if estimator.model_family != ModelFamily.ENSEMBLE]: for problem_type in estimator.supported_problem_types: if problem_type == ProblemTypes.BINARY: X, y = X_y_binary num_classes = 2 pipeline_class = BinaryClassificationPipeline elif problem_type == ProblemTypes.MULTICLASS: X, y = X_y_multi num_classes = 3 pipeline_class = MulticlassClassificationPipeline elif problem_type == ProblemTypes.REGRESSION: X, y = X_y_regression pipeline_class = RegressionPipeline elif problem_type in [ProblemTypes.TIME_SERIES_REGRESSION, ProblemTypes.TIME_SERIES_MULTICLASS, ProblemTypes.TIME_SERIES_BINARY]: continue evalml_pipeline = pipeline_class([estimator]) scikit_estimator = scikit_learn_wrapped_estimator(evalml_pipeline) scikit_estimator.fit(X, y) y_pred = scikit_estimator.predict(X) assert len(y_pred) == len(y) assert not np.isnan(y_pred).all() if problem_type in [ProblemTypes.BINARY, ProblemTypes.MULTICLASS]: y_pred_proba = scikit_estimator.predict_proba(X) assert y_pred_proba.shape == (len(y), num_classes) assert not np.isnan(y_pred_proba).all().all()
def test_scikit_learn_wrapper(X_y_binary, X_y_multi, X_y_regression): for estimator in [ estimator for estimator in _all_estimators() if estimator.model_family != ModelFamily.ENSEMBLE ]: for problem_type in estimator.supported_problem_types: if problem_type == ProblemTypes.BINARY: X, y = X_y_binary num_classes = 2 elif problem_type == ProblemTypes.MULTICLASS: X, y = X_y_multi num_classes = 3 elif problem_type == ProblemTypes.REGRESSION: X, y = X_y_regression elif problem_type in [ ProblemTypes.TIME_SERIES_REGRESSION, ProblemTypes.TIME_SERIES_MULTICLASS, ProblemTypes.TIME_SERIES_BINARY ]: # Skipping because make_pipeline_from_components does not yet work for time series. continue evalml_pipeline = make_pipeline_from_components([estimator()], problem_type) scikit_estimator = scikit_learn_wrapped_estimator(evalml_pipeline) scikit_estimator.fit(X, y) y_pred = scikit_estimator.predict(X) assert len(y_pred) == len(y) assert not np.isnan(y_pred).all() if problem_type in [ProblemTypes.BINARY, ProblemTypes.MULTICLASS]: y_pred_proba = scikit_estimator.predict_proba(X) assert y_pred_proba.shape == (len(y), num_classes) assert not np.isnan(y_pred_proba).all().all()
def test_scikit_learn_wrapper_invalid_problem_type(): evalml_pipeline = MulticlassClassificationPipeline([RandomForestClassifier]) evalml_pipeline.problem_type = None with pytest.raises(ValueError, match="Could not wrap EvalML object in scikit-learn wrapper."): scikit_learn_wrapped_estimator(evalml_pipeline)