Пример #1
0
 def __init__(self,
              n_estimators=100,
              max_features="auto",
              max_depth=6,
              min_samples_split=2,
              min_weight_fraction_leaf=0.0,
              n_jobs=-1,
              random_state=None,
              random_seed=0,
              **kwargs):
     parameters = {
         "n_estimators": n_estimators,
         "max_features": max_features,
         "max_depth": max_depth,
         "min_samples_split": min_samples_split,
         "min_weight_fraction_leaf": min_weight_fraction_leaf,
         "n_jobs": n_jobs
     }
     parameters.update(kwargs)
     random_seed = deprecate_arg("random_state", "random_seed",
                                 random_state, random_seed)
     et_classifier = SKExtraTreesClassifier(random_state=random_seed,
                                            **parameters)
     super().__init__(parameters=parameters,
                      component_obj=et_classifier,
                      random_seed=random_seed)
Пример #2
0
 def __init__(self,
              alpha=0.5,
              l1_ratio=0.5,
              n_jobs=-1,
              max_iter=1000,
              random_state=None,
              random_seed=0,
              penalty='elasticnet',
              **kwargs):
     parameters = {
         'alpha': alpha,
         'l1_ratio': l1_ratio,
         'n_jobs': n_jobs,
         'max_iter': max_iter,
         'penalty': penalty
     }
     if kwargs.get('loss', 'log') != 'log':
         warnings.warn(
             "Parameter loss is being set to 'log' so that ElasticNetClassifier can predict probabilities"
             f". Originally received '{kwargs['loss']}'.")
     kwargs["loss"] = "log"
     parameters.update(kwargs)
     random_seed = deprecate_arg("random_state", "random_seed",
                                 random_state, random_seed)
     en_classifier = SKElasticNetClassifier(random_state=random_seed,
                                            **parameters)
     super().__init__(parameters=parameters,
                      component_obj=en_classifier,
                      random_seed=random_seed)
Пример #3
0
    def __init__(self,
                 variance=0.95,
                 n_components=None,
                 random_state=None,
                 random_seed=0,
                 **kwargs):
        """Initalizes an transformer that reduces the number of features using PCA."

        Arguments:
            variance (float): The percentage of the original data variance that should be preserved when reducing the
                              number of features.
            n_components (int): The number of features to maintain after computing SVD. Defaults to None, but will override
                                variance variable if set.
        """
        parameters = {"variance": variance, "n_components": n_components}
        parameters.update(kwargs)
        random_seed = deprecate_arg("random_state", "random_seed",
                                    random_state, random_seed)
        if n_components:
            pca = SkPCA(n_components=n_components,
                        random_state=random_seed,
                        **kwargs)
        else:
            pca = SkPCA(n_components=variance,
                        random_state=random_seed,
                        **kwargs)
        super().__init__(parameters=parameters,
                         component_obj=pca,
                         random_seed=random_seed)
Пример #4
0
    def __init__(self, parameters, random_state=None, random_seed=0):
        """Machine learning pipeline made out of transformers and a estimator.

        Required Class Variables:
            component_graph (list): List of components in order. Accepts strings or ComponentBase subclasses in the list

        Arguments:
            parameters (dict): Dictionary with component names as keys and dictionary of that component's parameters as values.
                 An empty dictionary {} implies using all default values for component parameters.
            random_state (int): Seed for the random number generator. Defaults to 0.
        """
        self.random_seed = deprecate_arg("random_state", "random_seed",
                                         random_state, random_seed)
        if isinstance(self.component_graph, list):  # Backwards compatibility
            self._component_graph = ComponentGraph().from_list(
                self.component_graph, random_seed=self.random_seed)
        else:
            self._component_graph = ComponentGraph(
                component_dict=self.component_graph,
                random_seed=self.random_seed)
        self._component_graph.instantiate(parameters)

        self.input_feature_names = {}
        self.input_target_name = None

        final_component = self._component_graph.get_last_component()
        self.estimator = final_component if isinstance(final_component,
                                                       Estimator) else None
        self._estimator_name = self._component_graph.compute_order[
            -1] if self.estimator is not None else None

        self._validate_estimator_problem_type()
        self._is_fitted = False
        self._pipeline_params = parameters.get("pipeline", {})
Пример #5
0
    def __init__(self, boosting_type="gbdt", learning_rate=0.1, n_estimators=100, max_depth=0, num_leaves=31,
                 min_child_samples=20, n_jobs=-1, random_state=None, random_seed=0,
                 bagging_fraction=0.9, bagging_freq=0, **kwargs):
        random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
        parameters = {"boosting_type": boosting_type,
                      "learning_rate": learning_rate,
                      "n_estimators": n_estimators,
                      "max_depth": max_depth,
                      "num_leaves": num_leaves,
                      "min_child_samples": min_child_samples,
                      "n_jobs": n_jobs,
                      "bagging_freq": bagging_freq,
                      "bagging_fraction": bagging_fraction}
        parameters.update(kwargs)
        lg_parameters = copy.copy(parameters)
        # when boosting type is random forest (rf), LightGBM requires bagging_freq == 1 and  0 < bagging_fraction < 1.0
        if boosting_type == "rf":
            lg_parameters['bagging_freq'] = 1
        # when boosting type is goss, LightGBM requires bagging_fraction == 1
        elif boosting_type == "goss":
            lg_parameters['bagging_fraction'] = 1
        # avoid lightgbm warnings having to do with parameter aliases
        if lg_parameters['bagging_freq'] is not None or lg_parameters['bagging_fraction'] is not None:
            lg_parameters.update({'subsample': None, 'subsample_freq': None})

        lgbm_error_msg = "LightGBM is not installed. Please install using `pip install lightgbm`."
        lgbm = import_or_raise("lightgbm", error_msg=lgbm_error_msg)
        self._ordinal_encoder = None
        self._label_encoder = None

        lgbm_classifier = lgbm.sklearn.LGBMClassifier(random_state=random_seed, **lg_parameters)

        super().__init__(parameters=parameters,
                         component_obj=lgbm_classifier,
                         random_seed=random_seed)
Пример #6
0
    def __init__(self,
                 n_estimators=10,
                 eta=0.03,
                 max_depth=6,
                 bootstrap_type=None,
                 silent=False,
                 allow_writing_files=False,
                 random_state=None,
                 random_seed=0,
                 **kwargs):
        random_seed = deprecate_arg("random_state", "random_seed",
                                    random_state, random_seed)
        parameters = {
            "n_estimators": n_estimators,
            "eta": eta,
            "max_depth": max_depth,
            'bootstrap_type': bootstrap_type,
            'silent': silent,
            'allow_writing_files': allow_writing_files
        }
        parameters.update(kwargs)

        cb_error_msg = "catboost is not installed. Please install using `pip install catboost.`"
        catboost = import_or_raise("catboost", error_msg=cb_error_msg)
        # catboost will choose an intelligent default for bootstrap_type, so only set if provided
        cb_parameters = copy.copy(parameters)
        if bootstrap_type is None:
            cb_parameters.pop('bootstrap_type')
        cb_regressor = catboost.CatBoostRegressor(**cb_parameters,
                                                  random_seed=random_seed)
        super().__init__(parameters=parameters,
                         component_obj=cb_regressor,
                         random_seed=random_seed)
Пример #7
0
    def __init__(self, pipeline_hyperparameter_ranges, random_state=None, random_seed=0):
        """Init SkOptTuner

        Arguments:
            pipeline_hyperparameter_ranges (dict): A set of hyperparameter ranges corresponding to a pipeline's parameters
            random_state (int): The random state. Defaults to 0.
        """
        random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
        super().__init__(pipeline_hyperparameter_ranges, random_seed=random_seed)
        self.opt = Optimizer(self._search_space_ranges, "ET", acq_optimizer="sampling", random_state=random_seed)
Пример #8
0
 def __init__(self,
              parameters=None,
              component_obj=None,
              random_state=None,
              random_seed=0,
              **kwargs):
     self.random_seed = deprecate_arg("random_state", "random_seed",
                                      random_state, random_seed)
     self._component_obj = component_obj
     self._parameters = parameters or {}
     self._is_fitted = False
Пример #9
0
    def __init__(self, n_estimators=100, max_depth=6, n_jobs=-1, random_state=None, random_seed=0, **kwargs):
        parameters = {"n_estimators": n_estimators,
                      "max_depth": max_depth,
                      "n_jobs": n_jobs}
        parameters.update(kwargs)
        random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)

        rf_regressor = SKRandomForestRegressor(random_state=random_seed,
                                               **parameters)
        super().__init__(parameters=parameters,
                         component_obj=rf_regressor,
                         random_seed=random_seed)
Пример #10
0
def make_data_splitter(X,
                       y,
                       problem_type,
                       problem_configuration=None,
                       n_splits=3,
                       shuffle=True,
                       random_state=None,
                       random_seed=0):
    """Given the training data and ML problem parameters, compute a data splitting method to use during AutoML search.

    Arguments:
        X (ww.DataTable, pd.DataFrame): The input training data of shape [n_samples, n_features].
        y (ww.DataColumn, pd.Series): The target training data of length [n_samples].
        problem_type (ProblemType): The type of machine learning problem.
        problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
            in time series problems, values should be passed in for the gap and max_delay variables. Defaults to None.
        n_splits (int, None): The number of CV splits, if applicable. Defaults to 3.
        shuffle (bool): Whether or not to shuffle the data before splitting, if applicable. Defaults to True.
        random_state (None, int): Deprecated - use random_seed instead.
        random_seed (int): Seed for the random number generator. Defaults to 0.

    Returns:
        sklearn.model_selection.BaseCrossValidator: Data splitting method.
    """
    random_seed = deprecate_arg("random_state", "random_seed", random_state,
                                random_seed)
    problem_type = handle_problem_types(problem_type)
    if is_time_series(problem_type):
        if not problem_configuration:
            raise ValueError(
                "problem_configuration is required for time series problem types"
            )
        return TimeSeriesSplit(
            n_splits=n_splits,
            gap=problem_configuration.get('gap'),
            max_delay=problem_configuration.get('max_delay'))
    if X.shape[0] > _LARGE_DATA_ROW_THRESHOLD:
        if problem_type == ProblemTypes.REGRESSION:
            return TrainingValidationSplit(
                test_size=_LARGE_DATA_PERCENT_VALIDATION, shuffle=shuffle)
        elif problem_type in [ProblemTypes.BINARY, ProblemTypes.MULTICLASS]:
            return BalancedClassificationDataTVSplit(
                test_size=_LARGE_DATA_PERCENT_VALIDATION,
                shuffle=shuffle,
                random_seed=random_seed)
    if problem_type == ProblemTypes.REGRESSION:
        return KFold(n_splits=n_splits,
                     random_state=random_seed,
                     shuffle=shuffle)
    elif problem_type in [ProblemTypes.BINARY, ProblemTypes.MULTICLASS]:
        return BalancedClassificationDataCVSplit(n_splits=n_splits,
                                                 random_seed=random_seed,
                                                 shuffle=shuffle)
Пример #11
0
    def __init__(self, input_pipelines=None, final_estimator=None, cv=None, n_jobs=None,
                 random_state=None, random_seed=0, **kwargs):
        """Stacked ensemble base class.

        Arguments:
            input_pipelines (list(PipelineBase or subclass obj)): List of pipeline instances to use as the base estimators.
                This must not be None or an empty list or else EnsembleMissingPipelinesError will be raised.
            final_estimator (Estimator or subclass): The estimator used to combine the base estimators.
            cv (int, cross-validation generator or an iterable): Determines the cross-validation splitting strategy used to train final_estimator.
                For int/None inputs, if the estimator is a classifier and y is either binary or multiclass, StratifiedKFold is used. In all other cases, KFold is used.
                Possible inputs for cv are:

                - None: 5-fold cross validation
                - int: the number of folds in a (Stratified) KFold
                - An scikit-learn cross-validation generator object
                - An iterable yielding (train, test) splits
            n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
                None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
                Defaults to None.
                - Note: there could be some multi-process errors thrown for values of `n_jobs != 1`. If this is the case, please use `n_jobs = 1`.
            random_state (None, int): Deprecated - use random_seed instead.
            random_seed (int): Seed for the random number generator. Defaults to 0.
        """
        if not input_pipelines:
            raise EnsembleMissingPipelinesError("`input_pipelines` must not be None or an empty list.")
        if [pipeline for pipeline in input_pipelines if pipeline.model_family in _nonstackable_model_families]:
            raise ValueError("Pipelines with any of the following model families cannot be used as base pipelines: {}".format(_nonstackable_model_families))

        parameters = {
            "input_pipelines": input_pipelines,
            "final_estimator": final_estimator,
            "cv": cv,
            "n_jobs": n_jobs
        }
        parameters.update(kwargs)

        if len(set([pipeline.problem_type for pipeline in input_pipelines])) > 1:
            raise ValueError("All pipelines must have the same problem type.")

        random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
        cv = cv or self._default_cv(n_splits=3, random_state=random_seed, shuffle=True)
        estimators = [scikit_learn_wrapped_estimator(pipeline) for pipeline in input_pipelines]
        final_estimator = scikit_learn_wrapped_estimator(final_estimator or self._default_final_estimator())
        sklearn_parameters = {
            "estimators": [(f"({idx})", estimator) for idx, estimator in enumerate(estimators)],
            "final_estimator": final_estimator,
            "cv": cv,
            "n_jobs": n_jobs
        }
        sklearn_parameters.update(kwargs)
        super().__init__(parameters=parameters,
                         component_obj=self._stacking_estimator_class(**sklearn_parameters),
                         random_seed=random_seed)
Пример #12
0
def make_pipeline_from_components(component_instances,
                                  problem_type,
                                  custom_name=None,
                                  random_state=None,
                                  random_seed=0):
    """Given a list of component instances and the problem type, an pipeline instance is generated with the component instances.
    The pipeline will be a subclass of the appropriate pipeline base class for the specified problem_type. The pipeline will be
    untrained, even if the input components are already trained. A custom name for the pipeline can optionally be specified;
    otherwise the default pipeline name will be 'Templated Pipeline'.

   Arguments:
        component_instances (list): a list of all of the components to include in the pipeline
        problem_type (str or ProblemTypes): problem type for the pipeline to generate
        custom_name (string): a name for the new pipeline
        random_state(int): Deprecated. Use random_seed instead.
        random_seed (int): Random seed used to intialize the pipeline.

    Returns:
        Pipeline instance with component instances and specified estimator created from given random state.

    Example:
        >>> components = [Imputer(), StandardScaler(), RandomForestClassifier()]
        >>> pipeline = make_pipeline_from_components(components, problem_type="binary")
        >>> pipeline.describe()

    """
    random_seed = deprecate_arg("random_state", "random_seed", random_state,
                                random_seed)
    for i, component in enumerate(component_instances):
        if not isinstance(component, ComponentBase):
            raise TypeError(
                "Every element of `component_instances` must be an instance of ComponentBase"
            )
        if i == len(component_instances) - 1 and not isinstance(
                component, Estimator):
            raise ValueError(
                "Pipeline needs to have an estimator at the last position of the component list"
            )

    if custom_name and not isinstance(custom_name, str):
        raise TypeError("Custom pipeline name must be a string")
    pipeline_name = custom_name
    problem_type = handle_problem_types(problem_type)

    class TemplatedPipeline(_get_pipeline_base_class(problem_type)):
        custom_name = pipeline_name
        component_graph = [c.__class__ for c in component_instances]

    return TemplatedPipeline(
        {c.name: c.parameters
         for c in component_instances},
        random_seed=random_seed)
Пример #13
0
    def __init__(self, random_state=None, random_seed=0, **kwargs):
        """Creates a transformer to perform TF-IDF transformation and Singular Value Decomposition for text columns.

        Arguments:
            random_state (None, int): Deprecated - use random_seed instead.
            random_seed (int): Seed for the random number generator. Defaults to 0.
        """
        random_seed = deprecate_arg("random_state", "random_seed",
                                    random_state, random_seed)
        self._lsa_pipeline = make_pipeline(
            TfidfVectorizer(), TruncatedSVD(random_state=random_seed))
        self._provenance = {}
        super().__init__(random_seed=random_seed, **kwargs)
Пример #14
0
 def __init__(self, alpha=0.5, l1_ratio=0.5, max_iter=1000, normalize=False,
              random_state=None, random_seed=0, **kwargs):
     parameters = {'alpha': alpha,
                   'l1_ratio': l1_ratio,
                   'max_iter': max_iter,
                   'normalize': normalize}
     parameters.update(kwargs)
     random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
     en_regressor = SKElasticNet(random_state=random_seed,
                                 **parameters)
     super().__init__(parameters=parameters,
                      component_obj=en_regressor,
                      random_seed=random_seed)
Пример #15
0
    def __init__(self,
                 pipeline_hyperparameter_ranges,
                 n_points=10,
                 random_state=None,
                 random_seed=0):
        """ Generate all of the possible points to search for in the grid

        Arguments:
            pipeline_hyperparameter_ranges (dict): a set of hyperparameter ranges corresponding to a pipeline's parameters
            n_points (int): The number of points to sample from along each dimension
                defined in the ``space`` argument
            random_state (int): Deprecated. Use random_seed instead.
            random_seed (int): Seed for random number generator. Unused in this class, defaults to 0.
        """
        random_seed = deprecate_arg("random_state", "random_seed",
                                    random_state, random_seed)
        super().__init__(pipeline_hyperparameter_ranges,
                         random_seed=random_seed)
        raw_dimensions = list()
        for dimension in self._search_space_ranges:
            # Categorical dimension
            if isinstance(dimension, list):
                range_values = dimension
            elif isinstance(dimension, (Real, Integer, tuple)):
                if isinstance(dimension, (tuple)) and isinstance(
                        dimension[0],
                    (int, float)) and isinstance(dimension[1], (int, float)):
                    if dimension[1] > dimension[0]:
                        low = dimension[0]
                        high = dimension[1]
                    else:
                        error_text = "Upper bound must be greater than lower bound. Parameter lower bound is {0} and upper bound is {1}"
                        error_text = error_text.format(dimension[0],
                                                       dimension[1])
                        raise ValueError(error_text)
                else:
                    low = dimension.low
                    high = dimension.high
                delta = (high - low) / (n_points - 1)
                if isinstance(dimension, Integer):
                    range_values = [
                        int((x * delta) + low) for x in range(n_points)
                    ]
                else:
                    range_values = [(x * delta) + low for x in range(n_points)]
            raw_dimensions.append(range_values)
        self._grid_points = itertools.product(*raw_dimensions)
        self.curr_params = None
Пример #16
0
    def __init__(self, random_state=None, random_seed=0, **kwargs):
        """Extracts features from text columns using featuretools' nlp_primitives

        Arguments:
            random_state (None, int): Deprecated - use random_seed instead.
            random_seed (int): Seed for the random number generator. Defaults to 0.
        """
        random_seed = deprecate_arg("random_state", "random_seed",
                                    random_state, random_seed)
        self._trans = [
            nlp_primitives.DiversityScore,
            nlp_primitives.MeanCharactersPerWord, nlp_primitives.PolarityScore
        ]
        self._features = None
        self._lsa = LSA(random_seed=random_seed)
        self._primitives_provenance = {}
        super().__init__(random_seed=random_seed, **kwargs)
Пример #17
0
    def __init__(self,
                 C=1.0,
                 kernel="rbf",
                 gamma="scale",
                 random_state=None,
                 random_seed=0,
                 **kwargs):
        parameters = {"C": C, "kernel": kernel, "gamma": gamma}
        parameters.update(kwargs)

        # SVR doesn't take a random_state arg
        svm_regressor = SVR(**parameters)
        random_seed = deprecate_arg("random_state", "random_seed",
                                    random_state, random_seed)
        super().__init__(parameters=parameters,
                         component_obj=svm_regressor,
                         random_seed=random_seed)
Пример #18
0
 def __init__(self,
              fit_intercept=True,
              normalize=False,
              n_jobs=-1,
              random_state=None,
              random_seed=0,
              **kwargs):
     parameters = {
         'fit_intercept': fit_intercept,
         'normalize': normalize,
         'n_jobs': n_jobs
     }
     parameters.update(kwargs)
     linear_regressor = SKLinearRegression(**parameters)
     random_seed = deprecate_arg("random_state", "random_seed",
                                 random_state, random_seed)
     super().__init__(parameters=parameters,
                      component_obj=linear_regressor,
                      random_seed=random_seed)
Пример #19
0
 def __init__(self,
              C=1.0,
              kernel="rbf",
              gamma="scale",
              probability=True,
              random_state=None,
              random_seed=0,
              **kwargs):
     parameters = {"C": C,
                   "kernel": kernel,
                   "gamma": gamma,
                   "probability": probability}
     parameters.update(kwargs)
     random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
     svm_classifier = SVC(random_state=random_seed,
                          **parameters)
     super().__init__(parameters=parameters,
                      component_obj=svm_classifier,
                      random_seed=random_seed)
Пример #20
0
    def __init__(self, pipeline_hyperparameter_ranges, random_state=None, random_seed=0, with_replacement=False, replacement_max_attempts=10):
        """ Sets up check for duplication if needed.

        Arguments:
            pipeline_hyperparameter_ranges (dict): a set of hyperparameter ranges corresponding to a pipeline's parameters
            random_state (int): Unused in this class. Defaults to 0.
            with_replacement (bool): If false, only unique hyperparameters will be shown
            replacement_max_attempts (int): The maximum number of tries to get a unique
                set of random parameters. Only used if tuner is initalized with
                with_replacement=True
        """
        random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
        super().__init__(pipeline_hyperparameter_ranges, random_seed=random_seed)
        self._space = Space(self._search_space_ranges)
        self._random_state = get_random_state(random_seed)
        self._with_replacement = with_replacement
        self._replacement_max_attempts = replacement_max_attempts
        self._used_parameters = set()
        self._used_parameters.add(())
        self.curr_params = None
Пример #21
0
 def __init__(self,
              n_neighbors=5,
              weights="uniform",
              algorithm="auto",
              leaf_size=30,
              p=2,
              random_state=None,
              random_seed=0,
              **kwargs):
     parameters = {
         "n_neighbors": n_neighbors,
         "weights": weights,
         "algorithm": algorithm,
         "leaf_size": leaf_size,
         "p": p
     }
     parameters.update(kwargs)
     knn_classifier = SKKNeighborsClassifier(**parameters)
     random_seed = deprecate_arg("random_state", "random_seed",
                                 random_state, random_seed)
     super().__init__(parameters=parameters,
                      component_obj=knn_classifier,
                      random_seed=random_seed)
    def __init__(self, gap=1, random_state=None, random_seed=0, **kwargs):
        """Baseline time series estimator that predicts using the naive forecasting approach.

        Arguments:
            gap (int): Gap between prediction date and target date and must be a positive integer. If gap is 0, target date will be shifted ahead by 1 time period.
            random_state (None, int): Deprecated - use random_seed instead.
            random_seed (int): Seed for the random number generator. Defaults to 0.

        """

        self._prediction_value = None
        self._num_features = None
        self.gap = gap

        if gap < 0:
            raise ValueError(f'gap value must be a positive integer. {gap} was provided.')

        parameters = {"gap": gap}
        parameters.update(kwargs)
        random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
        super().__init__(parameters=parameters,
                         component_obj=None,
                         random_seed=random_seed)
Пример #23
0
def split_data(X, y, problem_type, problem_configuration=None, test_size=.2, random_state=None, random_seed=0):
    """Splits data into train and test sets.

    Arguments:
        X (ww.DataTable, pd.DataFrame or np.ndarray): data of shape [n_samples, n_features]
        y (ww.DataColumn, pd.Series, or np.ndarray): target data of length [n_samples]
        problem_type (str or ProblemTypes): type of supervised learning problem. see evalml.problem_types.problemtype.all_problem_types for a full list.
        problem_configuration (dict): Additional parameters needed to configure the search. For example,
            in time series problems, values should be passed in for the gap and max_delay variables.
        test_size (float): What percentage of data points should be included in the test set. Defaults to 0.2 (20%).
        random_state (None, int): Deprecated - use random_seed instead.
        random_seed (int): Seed for the random number generator. Defaults to 0.

    Returns:
        ww.DataTable, ww.DataTable, ww.DataColumn, ww.DataColumn: Feature and target data each split into train and test sets
    """

    random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
    X = infer_feature_types(X)
    y = infer_feature_types(y)

    data_splitter = None
    if is_time_series(problem_type):
        data_splitter = TrainingValidationSplit(test_size=test_size, shuffle=False, stratify=None, random_state=random_seed)
    elif is_regression(problem_type):
        data_splitter = ShuffleSplit(n_splits=1, test_size=test_size, random_state=random_seed)
    elif is_classification(problem_type):
        data_splitter = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=random_seed)

    train, test = next(data_splitter.split(X.to_dataframe(), y.to_series()))

    X_train = X.iloc[train]
    X_test = X.iloc[test]
    y_train = y.iloc[train]
    y_test = y.iloc[test]

    return X_train, X_test, y_train, y_test
Пример #24
0
    def __init__(self,
                 number_features=None,
                 n_estimators=10,
                 max_depth=None,
                 percent_features=0.5,
                 threshold=-np.inf,
                 n_jobs=-1,
                 random_state=None,
                 random_seed=0,
                 **kwargs):
        random_seed = deprecate_arg("random_state", "random_seed",
                                    random_state, random_seed)

        parameters = {
            "number_features": number_features,
            "n_estimators": n_estimators,
            "max_depth": max_depth,
            "percent_features": percent_features,
            "threshold": threshold,
            "n_jobs": n_jobs
        }
        parameters.update(kwargs)

        estimator = SKRandomForestRegressor(random_state=random_seed,
                                            n_estimators=n_estimators,
                                            max_depth=max_depth,
                                            n_jobs=n_jobs)
        max_features = max(1, int(
            percent_features * number_features)) if number_features else None
        feature_selection = SkSelect(estimator=estimator,
                                     max_features=max_features,
                                     threshold=threshold,
                                     **kwargs)
        super().__init__(parameters=parameters,
                         component_obj=feature_selection,
                         random_seed=random_seed)
Пример #25
0
 def __init__(self,
              criterion="mse",
              max_features="auto",
              max_depth=6,
              min_samples_split=2,
              min_weight_fraction_leaf=0.0,
              random_state=None,
              random_seed=0,
              **kwargs):
     parameters = {
         "criterion": criterion,
         "max_features": max_features,
         "max_depth": max_depth,
         "min_samples_split": min_samples_split,
         "min_weight_fraction_leaf": min_weight_fraction_leaf
     }
     parameters.update(kwargs)
     random_seed = deprecate_arg("random_state", "random_seed",
                                 random_state, random_seed)
     dt_regressor = SKDecisionTreeRegressor(random_state=random_seed,
                                            **parameters)
     super().__init__(parameters=parameters,
                      component_obj=dt_regressor,
                      random_seed=random_seed)
Пример #26
0
 def __init__(self,
              penalty="l2",
              C=1.0,
              n_jobs=-1,
              multi_class="auto",
              solver="lbfgs",
              random_state=None,
              random_seed=0,
              **kwargs):
     parameters = {
         "penalty": penalty,
         "C": C,
         "n_jobs": n_jobs,
         "multi_class": multi_class,
         "solver": solver
     }
     parameters.update(kwargs)
     random_seed = deprecate_arg("random_state", "random_seed",
                                 random_state, random_seed)
     lr_classifier = LogisticRegression(random_state=random_seed,
                                        **parameters)
     super().__init__(parameters=parameters,
                      component_obj=lr_classifier,
                      random_seed=random_seed)
Пример #27
0
    def __init__(self,
                 X_train=None,
                 y_train=None,
                 problem_type=None,
                 objective='auto',
                 max_iterations=None,
                 max_time=None,
                 patience=None,
                 tolerance=None,
                 data_splitter=None,
                 allowed_pipelines=None,
                 allowed_model_families=None,
                 start_iteration_callback=None,
                 add_result_callback=None,
                 error_callback=None,
                 additional_objectives=None,
                 random_state=None,
                 random_seed=0,
                 n_jobs=-1,
                 tuner_class=None,
                 optimize_thresholds=False,
                 ensembling=False,
                 max_batches=None,
                 problem_configuration=None,
                 train_best_pipeline=True,
                 pipeline_parameters=None,
                 _pipelines_per_batch=5):
        """Automated pipeline search

        Arguments:
            X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.

            y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.

            problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.

            objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
                When set to 'auto', chooses:

                - LogLossBinary for binary classification problems,
                - LogLossMulticlass for multiclass classification problems, and
                - R2 for regression problems.

            max_iterations (int): Maximum number of iterations to search. If max_iterations and
                max_time is not set, then max_iterations will default to max_iterations of 5.

            max_time (int, str): Maximum time to search for pipelines.
                This will not start a new pipeline search after the duration
                has elapsed. If it is an integer, then the time will be in seconds.
                For strings, time can be specified as seconds, minutes, or hours.

            patience (int): Number of iterations without improvement to stop search early. Must be positive.
                If None, early stopping is disabled. Defaults to None.

            tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
                Only applicable if patience is not None. Defaults to None.

            allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
                The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
                allowed_model_families to be ignored.

            allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
                model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
                to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
                this parameter will be ignored.

            data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.

            tuner_class: The tuner class to use. Defaults to SKOptTuner.

            start_iteration_callback (callable): Function called before each pipeline training iteration.
                Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.

            add_result_callback (callable): Function called after each pipeline training iteration.
                Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.

            error_callback (callable): Function called when `search()` errors and raises an Exception.
                Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
                Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
                Defaults to None, which will call `log_error_callback`.

            additional_objectives (list): Custom set of objectives to score on.
                Will override default objectives for problem type if not empty.

            random_state (int): Deprecated - use random_seed instead.

            random_seed (int): Seed for the random number generator. Defaults to 0.

            n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
                None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.

            ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
                If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.

            max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
                max_iterations have precedence over stopping the search.

            problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
                in time series problems, values should be passed in for the gap and max_delay variables.

            train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True

            _pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
                The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
        """
        if X_train is None:
            raise ValueError(
                'Must specify training data as a 2d array using the X_train argument'
            )
        if y_train is None:
            raise ValueError(
                'Must specify training data target values as a 1d vector using the y_train argument'
            )
        try:
            self.problem_type = handle_problem_types(problem_type)
        except ValueError:
            raise ValueError(
                'choose one of (binary, multiclass, regression) as problem_type'
            )

        self.tuner_class = tuner_class or SKOptTuner
        self.start_iteration_callback = start_iteration_callback
        self.add_result_callback = add_result_callback
        self.error_callback = error_callback or log_error_callback
        self.data_splitter = data_splitter
        self.optimize_thresholds = optimize_thresholds
        self.ensembling = ensembling
        if objective == 'auto':
            objective = get_default_primary_search_objective(
                self.problem_type.value)
        objective = get_objective(objective, return_instance=False)
        self.objective = self._validate_objective(objective)
        if self.data_splitter is not None and not issubclass(
                self.data_splitter.__class__, BaseCrossValidator):
            raise ValueError("Not a valid data splitter")
        if not objective.is_defined_for_problem_type(self.problem_type):
            raise ValueError(
                "Given objective {} is not compatible with a {} problem.".
                format(self.objective.name, self.problem_type.value))
        if additional_objectives is None:
            additional_objectives = get_core_objectives(self.problem_type)
            # if our main objective is part of default set of objectives for problem_type, remove it
            existing_main_objective = next(
                (obj for obj in additional_objectives
                 if obj.name == self.objective.name), None)
            if existing_main_objective is not None:
                additional_objectives.remove(existing_main_objective)
        else:
            additional_objectives = [
                get_objective(o) for o in additional_objectives
            ]
        additional_objectives = [
            self._validate_objective(obj) for obj in additional_objectives
        ]
        self.additional_objectives = additional_objectives
        self.objective_name_to_class = {
            o.name: o
            for o in [self.objective] + self.additional_objectives
        }

        if not isinstance(max_time, (int, float, str, type(None))):
            raise TypeError(
                f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}.."
            )
        if isinstance(max_time, (int, float)) and max_time < 0:
            raise ValueError(
                f"Parameter max_time must be None or non-negative. Received {max_time}."
            )
        if max_batches is not None and max_batches < 0:
            raise ValueError(
                f"Parameter max_batches must be None or non-negative. Received {max_batches}."
            )
        if max_iterations is not None and max_iterations < 0:
            raise ValueError(
                f"Parameter max_iterations must be None or non-negative. Received {max_iterations}."
            )
        self.max_time = convert_to_seconds(max_time) if isinstance(
            max_time, str) else max_time
        self.max_iterations = max_iterations
        self.max_batches = max_batches
        self._pipelines_per_batch = _pipelines_per_batch
        if not self.max_iterations and not self.max_time and not self.max_batches:
            self.max_batches = 1
            logger.info("Using default limit of max_batches=1.\n")

        if patience and (not isinstance(patience, int) or patience < 0):
            raise ValueError(
                "patience value must be a positive integer. Received {} instead"
                .format(patience))

        if tolerance and (tolerance > 1.0 or tolerance < 0.0):
            raise ValueError(
                "tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead"
                .format(tolerance))

        self.patience = patience
        self.tolerance = tolerance or 0.0

        self._results = {
            'pipeline_results': {},
            'search_order': [],
            'errors': []
        }
        self.random_seed = deprecate_arg("random_state", "random_seed",
                                         random_state, random_seed)
        self.n_jobs = n_jobs

        self.plot = None
        try:
            self.plot = PipelineSearchPlots(self)
        except ImportError:
            logger.warning(
                "Unable to import plotly; skipping pipeline search plotting\n")

        self._data_check_results = None

        self.allowed_pipelines = allowed_pipelines
        self.allowed_model_families = allowed_model_families
        self._automl_algorithm = None
        self._start = 0.0
        self._baseline_cv_scores = {}
        self.show_batch_output = False

        self._validate_problem_type()
        self.problem_configuration = self._validate_problem_configuration(
            problem_configuration)
        self._train_best_pipeline = train_best_pipeline
        self._best_pipeline = None
        self._searched = False

        self.X_train = infer_feature_types(X_train)
        self.y_train = infer_feature_types(y_train)

        default_data_splitter = make_data_splitter(
            self.X_train,
            self.y_train,
            self.problem_type,
            self.problem_configuration,
            n_splits=3,
            shuffle=True,
            random_seed=self.random_seed)
        self.data_splitter = self.data_splitter or default_data_splitter
        self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
        self.search_iteration_plot = None
        self._interrupted = False

        self._engine = SequentialEngine(
            self.X_train,
            self.y_train,
            self,
            should_continue_callback=self._should_continue,
            pre_evaluation_callback=self._pre_evaluation_callback,
            post_evaluation_callback=self._post_evaluation_callback)

        if self.allowed_pipelines is None:
            logger.info("Generating pipelines to search over...")
            allowed_estimators = get_estimators(self.problem_type,
                                                self.allowed_model_families)
            logger.debug(
                f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}"
            )
            self.allowed_pipelines = [
                make_pipeline(self.X_train,
                              self.y_train,
                              estimator,
                              self.problem_type,
                              custom_hyperparameters=self.pipeline_parameters)
                for estimator in allowed_estimators
            ]

        if self.allowed_pipelines == []:
            raise ValueError("No allowed pipelines to search")

        run_ensembling = self.ensembling
        if run_ensembling and len(self.allowed_pipelines) == 1:
            logger.warning(
                "Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run."
            )
            run_ensembling = False

        if run_ensembling and self.max_iterations is not None:
            # Baseline + first batch + each pipeline iteration + 1
            first_ensembling_iteration = (
                1 + len(self.allowed_pipelines) +
                len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
            if self.max_iterations < first_ensembling_iteration:
                run_ensembling = False
                logger.warning(
                    f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling."
                )
            else:
                logger.info(
                    f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that."
                )

        if self.max_batches and self.max_iterations is None:
            self.show_batch_output = True
            if run_ensembling:
                ensemble_nth_batch = len(self.allowed_pipelines) + 1
                num_ensemble_batches = (self.max_batches -
                                        1) // ensemble_nth_batch
                if num_ensemble_batches == 0:
                    logger.warning(
                        f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling."
                    )
                else:
                    logger.info(
                        f"Ensembling will run every {ensemble_nth_batch} batches."
                    )

                self.max_iterations = (
                    1 + len(self.allowed_pipelines) +
                    self._pipelines_per_batch *
                    (self.max_batches - 1 - num_ensemble_batches) +
                    num_ensemble_batches)
            else:
                self.max_iterations = 1 + len(
                    self.allowed_pipelines) + (self._pipelines_per_batch *
                                               (self.max_batches - 1))
        self.allowed_model_families = list(
            set([p.model_family for p in (self.allowed_pipelines)]))

        logger.debug(
            f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}"
        )
        logger.debug(
            f"allowed_model_families set to {self.allowed_model_families}")
        if len(self.problem_configuration):
            pipeline_params = {
                **{
                    'pipeline': self.problem_configuration
                },
                **self.pipeline_parameters
            }
        else:
            pipeline_params = self.pipeline_parameters

        self._automl_algorithm = IterativeAlgorithm(
            max_iterations=self.max_iterations,
            allowed_pipelines=self.allowed_pipelines,
            tuner_class=self.tuner_class,
            random_seed=self.random_seed,
            n_jobs=self.n_jobs,
            number_features=self.X_train.shape[1],
            pipelines_per_batch=self._pipelines_per_batch,
            ensembling=run_ensembling,
            pipeline_params=pipeline_params)