コード例 #1
0
    def __init__(
        self,
        idx_original: int,
        model: base.Estimator,
        metric: Metric,
        created_on: int,
        drift_detector: base.DriftDetector,
        warning_detector: base.DriftDetector,
        is_background_learner,
        rng: np.random.Generator,
        features=None,
    ):
        self.idx_original = idx_original
        self.created_on = created_on
        self.model = model.clone()
        self.metric = copy.deepcopy(metric)
        # Make sure that the metric is not initialized, e.g. when creating background learners.
        if isinstance(self.metric, MultiClassMetric):
            self.metric.cm.reset()
        else:
            self.metric.__init__()

        # Store current model subspace representation of the original instances
        self.features = features

        # Drift and warning detection
        if drift_detector is not None:
            self.disable_drift_detector = False
            self.drift_detector = drift_detector.clone(
            )  # Actual detector used
        else:
            self.disable_drift_detector = True
            self.drift_detector = None

        if warning_detector is not None:
            self.disable_background_learner = False
            self.warning_detector = warning_detector.clone(
            )  # Actual detector used
        else:
            self.disable_background_learner = True
            self.warning_detector = None

        # Background learner
        self.is_background_learner = is_background_learner

        # Statistics
        self.n_drifts_detected = 0
        self.n_warnings_detected = 0

        # Random number generator (initialized)
        self.rng = rng

        # Background learner
        self._background_learner = (
            None)  # type: typing.Optional[BaseSRPClassifier, BaseSRPRegressor]
コード例 #2
0
ファイル: __init__.py プロジェクト: online-ml/river
def check_estimator(model: Estimator):
    """Check if a model adheres to `river`'s conventions.

    This will run a series of unit tests. The nature of the unit tests depends on the type of
    model.

    Parameters
    ----------
    model

    """
    for check in yield_checks(model):
        if check.__name__ in model._unit_test_skips():
            continue
        check(copy.deepcopy(model))
コード例 #3
0
def expand_param_grid(model: base.Estimator,
                      grid: dict) -> typing.List[base.Estimator]:
    """Expands a grid of parameters.

    This method can be used to generate a list of model parametrizations from a dictionary where
    each parameter is associated with a list of possible parameters. In other words, it expands a
    grid of parameters.

    Typically, this method can be used to create copies of a given model with different parameter
    choices. The models can then be used as part of a model selection process, such as a
    `selection.SuccessiveHalvingClassifier` or a `selection.EWARegressor`.

    The syntax for the parameter grid is quite flexible. It allows nesting parameters and can
    therefore be used to generate parameters for a pipeline.

    Parameters
    ----------
    model
    grid
        The grid of parameters to expand. The provided dictionary can be nested. The only
        requirement is that the values at the leaves need to be lists.

    Examples
    --------

    As an initial example, we can expand a grid of parameters for a single model.

    >>> from river import linear_model
    >>> from river import optim
    >>> from river import utils

    >>> model = linear_model.LinearRegression()

    >>> grid = {'optimizer': [optim.SGD(.1), optim.SGD(.01), optim.SGD(.001)]}
    >>> models = utils.expand_param_grid(model, grid)
    >>> len(models)
    3

    >>> models[0]
    LinearRegression (
      optimizer=SGD (
        lr=Constant (
          learning_rate=0.1
        )
      )
      loss=Squared ()
      l2=0.
      intercept_init=0.
      intercept_lr=Constant (
        learning_rate=0.01
      )
      clip_gradient=1e+12
      initializer=Zeros ()
    )

    You can expand parameters for multiple choices like so:

    >>> grid = {
    ...     'optimizer': [
    ...         (optim.SGD, {'lr': [.1, .01, .001]}),
    ...         (optim.Adam, {'lr': [.1, .01, .01]})
    ...     ]
    ... }
    >>> models = utils.expand_param_grid(model, grid)
    >>> len(models)
    6

    You may specify a grid of parameters for a pipeline via nesting:

    >>> from river import feature_extraction

    >>> model = (
    ...     feature_extraction.BagOfWords() |
    ...     linear_model.LinearRegression()
    ... )

    >>> grid = {
    ...     'BagOfWords': {
    ...         'strip_accents': [False, True]
    ...     },
    ...     'LinearRegression': {
    ...         'optimizer': [
    ...             (optim.SGD, {'lr': [.1, .01]}),
    ...             (optim.Adam, {'lr': [.1, .01]})
    ...         ]
    ...     }
    ... }

    >>> models = utils.expand_param_grid(model, grid)
    >>> len(models)
    8

    """

    return [model._set_params(params) for params in _expand_param_grid(grid)]
コード例 #4
0
ファイル: estimator.py プロジェクト: kulbachcedric/EvOAutoML
 def _initialize_model(self, model: base.Estimator, params):
     model = copy.deepcopy(model)
     model._set_params(params)
     return model