Esempio n. 1
0
def test_should_prune() -> None:

    pruner = DeterministicPruner(True)
    study = create_study(pruner=pruner)
    trial = Trial(study, study._storage.create_new_trial(study._study_id))
    trial.report(1, 1)
    assert trial.should_prune()
Esempio n. 2
0
    def _cross_validate_with_pruning(
            self, trial: Trial,
            estimator: "BaseEstimator") -> Mapping[str, OneDimArrayLikeType]:

        if is_classifier(estimator):
            partial_fit_params = self.fit_params.copy()
            y = self.y.values if isinstance(self.y, pd.Series) else self.y
            classes = np.unique(y)

            partial_fit_params.setdefault("classes", classes)

        else:
            partial_fit_params = self.fit_params

        n_splits = self.cv.get_n_splits(self.X, self.y, groups=self.groups)
        estimators = [clone(estimator) for _ in range(n_splits)]
        scores = {
            "fit_time": np.zeros(n_splits),
            "score_time": np.zeros(n_splits),
            "test_score": np.empty(n_splits),
        }

        if self.return_train_score:
            scores["train_score"] = np.empty(n_splits)

        for step in range(self.max_iter):
            for i, (train, test) in enumerate(
                    self.cv.split(self.X, self.y, groups=self.groups)):
                out = self._partial_fit_and_score(estimators[i], train, test,
                                                  partial_fit_params)

                if self.return_train_score:
                    scores["train_score"][i] = out.pop(0)

                scores["test_score"][i] = out[0]
                scores["fit_time"][i] += out[1]
                scores["score_time"][i] += out[2]

            intermediate_value = np.nanmean(scores["test_score"])

            trial.report(intermediate_value, step=step)

            if trial.should_prune():
                self._store_scores(trial, scores)

                raise TrialPruned(
                    "trial was pruned at iteration {}.".format(step))

        return scores
Esempio n. 3
0
    def objective(trial: Trial) -> float:

        x = trial.suggest_int("x", 5, 5)
        if trial.number == 0:
            return x
        elif trial.number == 1:
            trial.report(1, 4)
            trial.report(2, 7)
            raise TrialPruned()
        elif trial.number == 2:
            trial.report(float("nan"), 3)
            raise TrialPruned()
        elif trial.number == 3:
            raise TrialPruned()
        else:
            raise RuntimeError()
Esempio n. 4
0
    def objective(trial: Trial) -> float:

        x = trial.suggest_int("x", 5, 5)
        z = trial.suggest_categorical("z", [None])
        if trial.number == 0:
            return x * int(z is None)
        elif trial.number == 1:
            trial.report(1, 4)
            trial.report(2, 7)
            raise TrialPruned()
        elif trial.number == 2:
            trial.report(float("nan"), 3)
            raise TrialPruned()
        elif trial.number == 3:
            raise TrialPruned()
        else:
            raise RuntimeError()
Esempio n. 5
0
 def objective(trial: Trial) -> Tuple[float, float]:
     with pytest.raises(NotImplementedError):
         trial.report(1.0, 0)
     return 1.0, 1.0
Esempio n. 6
0
def test_report() -> None:

    study = create_study()
    trial = Trial(study, study._storage.create_new_trial(study._study_id))

    # Report values that can be cast to `float` (OK).
    trial.report(1.23, 1)
    trial.report(float("nan"), 2)
    trial.report("1.23", 3)  # type: ignore
    trial.report("inf", 4)  # type: ignore
    trial.report(1, 5)
    trial.report(np.array([1], dtype=np.float32)[0], 6)

    # Report values that cannot be cast to `float` or steps that are negative (Error).
    with pytest.raises(TypeError):
        trial.report(None, 7)  # type: ignore

    with pytest.raises(TypeError):
        trial.report("foo", 7)  # type: ignore

    with pytest.raises(TypeError):
        trial.report([1, 2, 3], 7)  # type: ignore

    with pytest.raises(TypeError):
        trial.report("foo", -1)  # type: ignore

    with pytest.raises(ValueError):
        trial.report(1.23, -1)
Esempio n. 7
0
    def objective(trial: Trial, report_intermediate_values: bool) -> float:

        if report_intermediate_values:
            trial.report(1.0, step=0)
            trial.report(2.0, step=1)
        return 0.0
Esempio n. 8
0
def test_trial_report():
    # type: () -> None

    study = create_study()
    trial = Trial(study, study._storage.create_new_trial(study.study_id))

    # Report values that can be cast to `float` (OK).
    trial.report(1.23)
    trial.report(float('nan'))
    trial.report('1.23')  # type: ignore
    trial.report('inf')  # type: ignore
    trial.report(1)
    trial.report(np.array([1], dtype=np.float32)[0])

    # Report values that cannot be cast to `float` (Error).
    with pytest.raises(TypeError):
        trial.report(None)  # type: ignore

    with pytest.raises(TypeError):
        trial.report('foo')  # type: ignore

    with pytest.raises(TypeError):
        trial.report([1, 2, 3])  # type: ignore
Esempio n. 9
0
        def objective(trial: Trial) -> float:
            """Compute objective value

            Parameter
            ---------
            trial : `Trial`
                Current trial

            Returns
            -------
            loss : `float`
                Loss
            """

            # use pyannote.metrics metric when available
            try:
                metric = self.pipeline.get_metric()
            except NotImplementedError as e:
                metric = None
                losses = []

            processing_time = []
            evaluation_time = []

            # instantiate pipeline with value suggested in current trial
            pipeline = self.pipeline.instantiate(
                self.pipeline.parameters(trial=trial))

            if show_progress != False:
                progress_bar = tqdm(total=len(inputs), **show_progress)
                progress_bar.update(0)

            # accumulate loss for each input
            for i, input in enumerate(inputs):

                # process input with pipeline
                # (and keep track of processing time)
                before_processing = time.time()
                output = pipeline(input)
                after_processing = time.time()
                processing_time.append(after_processing - before_processing)

                # evaluate output (and keep track of evaluation time)
                before_evaluation = time.time()

                # when metric is not available, use loss method instead
                if metric is None:
                    loss = pipeline.loss(input, output)
                    losses.append(loss)

                # when metric is available,`input` is expected to be provided
                # by a `pyannote.database` protocol
                else:
                    from pyannote.database import get_annotated

                    _ = metric(input["annotation"],
                               output,
                               uem=get_annotated(input))

                after_evaluation = time.time()
                evaluation_time.append(after_evaluation - before_evaluation)

                if show_progress != False:
                    progress_bar.update(1)

                if self.pruner is None:
                    continue

                trial.report(
                    np.mean(losses) if metric is None else abs(metric), i)
                if trial.should_prune(i):
                    raise optuna.structs.TrialPruned()

            if show_progress != False:
                progress_bar.close()

            trial.set_user_attr("processing_time", sum(processing_time))
            trial.set_user_attr("evaluation_time", sum(evaluation_time))

            return np.mean(losses) if metric is None else abs(metric)