コード例 #1
0
ファイル: _grid.py プロジェクト: xaphoon/optuna
    def after_trial(
        self,
        study: Study,
        trial: FrozenTrial,
        state: TrialState,
        values: Optional[Sequence[float]],
    ) -> None:
        target_grids = self._get_unvisited_grid_ids(study)

        if len(target_grids) == 0:
            study.stop()
        elif len(target_grids) == 1:
            grid_id = study._storage.get_trial_system_attrs(trial._trial_id)["grid_id"]
            if grid_id == target_grids[0]:
                study.stop()
コード例 #2
0
    def sample_relative(
            self, study: Study, trial: FrozenTrial,
            search_space: Dict[str, BaseDistribution]) -> Dict[str, Any]:
        # Instead of returning param values, GridSampler puts the target grid id as a system attr,
        # and the values are returned from `sample_independent`. This is because the distribution
        # object is hard to get at the beginning of trial, while we need the access to the object
        # to validate the sampled value.

        target_grids = self._get_unvisited_grid_ids(study)

        if len(target_grids) == 0:
            # This case may occur with distributed optimization or trial queue. If there is no
            # target grid, `GridSampler` evaluates a visited, duplicated point with the current
            # trial. After that, the optimization stops.

            _logger.warning(
                "`GridSampler` is re-evaluating a configuration because the grid has been "
                "exhausted. This may happen due to a timing issue during distributed optimization "
                "or when re-running optimizations on already finished studies."
            )

            # One of all grids is randomly picked up in this case.
            target_grids = list(range(len(self._all_grids)))

            study.stop()

        elif len(target_grids) == 1:
            # When there is only one target grid, optimization stops after the current trial
            # finishes.

            study.stop()

        # In distributed optimization, multiple workers may simultaneously pick up the same grid.
        # To make the conflict less frequent, the grid is chosen randomly.
        grid_id = random.choice(target_grids)

        study._storage.set_trial_system_attr(trial._trial_id, "search_space",
                                             self._search_space)
        study._storage.set_trial_system_attr(trial._trial_id, "grid_id",
                                             grid_id)

        return {}