def go(self):
        """Begin hyperparameter optimization process after experiment guidelines have been set and search dimensions are in place.
        This process includes the following: setting the hyperparameter space; locating similar experiments to be used as
        learning material for :class:`InformedOptimizationProtocol` s; and executing :meth:`_optimization_loop`, which actually
        sets off the Experiment execution process"""
        if self.model_initializer is None:
            raise ValueError(
                'Experiment guidelines and options must be set before hyperparameter optimization can be started'
            )

        self.logger = OptimizationReporter([_.name for _ in self.dimensions],
                                           **self.reporter_parameters)

        self.tested_keys = []
        self._set_hyperparameter_space()
        self._find_similar_experiments()

        loop_start_time = datetime.now()
        self._optimization_loop()
        loop_end_time = datetime.now()
        G.log_(
            F'Optimization loop completed in {loop_end_time - loop_start_time}'
        )
        G.log_(
            F'Best score was {self.best_score} from Experiment "{self.best_experiment}"'
        )
예제 #2
0
 def _validate_environment():
     """Check that there is a currently active and unoccupied Environment instance"""
     if G.Env is None:
         raise EnvironmentInactiveError()
     if G.Env.current_task is None:
         G.log_(f'Validated Environment with key: "{G.Env.cross_experiment_key}"')
     else:
         raise EnvironmentInvalidError("Must finish current task before starting a new one")
 def _validate_environment():
     """Check that there is a currently active Environment instance that is not already occupied"""
     if G.Env is None:
         raise EnvironmentInactiveError()
     if G.Env.current_task is None:
         G.log_(
             F'Validated Environment with key: "{G.Env.cross_experiment_key}"'
         )
     else:
         raise EnvironmentInvalidError(
             'A task is in progress. It must finish before a new one can be started'
         )
예제 #4
0
    def _optimization_loop(self, iteration=0):
        """Perform Experiment execution loop while `iteration` < `iterations`. At each iteration, an
        Experiment will be executed, its results will be logged, and it will be compared to the
        current best experiment

        Parameters
        ----------
        iteration: Int, default=0
            The current iteration in the optimization loop"""
        self.logger.print_optimization_header()

        while iteration < self.iterations:
            try:
                self._execute_experiment()
            except RepeatedExperimentError:
                # G.debug_(F'Skipping repeated Experiment: {_ex!s}\n')
                if len(self.similar_experiments) + len(
                        self.tested_keys) >= self.search_space_size:
                    G.log_(f"Hyperparameter search space has been exhausted")
                    break
                self.skipped_iterations += 1
                continue
            except StopIteration:
                if len(self.similar_experiments) + len(
                        self.tested_keys) >= self.search_space_size:
                    G.log_(f"Hyperparameter search space has been exhausted")
                    break
                # G.debug_(f'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')
                self._set_hyperparameter_space()
                continue

            self.logger.print_result(
                self.current_hyperparameters_list,
                self.current_score,
                experiment_id=self.current_experiment.experiment_id,
            )

            if ((self.best_experiment is None)  # First evaluation
                    or (self.do_maximize and
                        (self.best_score < self.current_score))  # New best max
                    or (not self.do_maximize and
                        (self.best_score > self.current_score))  # New best min
                ):
                self.best_experiment = self.current_experiment.experiment_id
                self.best_score = self.current_score

            iteration += 1
    def _optimization_loop(self, iteration=0):
        """Perform Experiment execution loop while `iteration` < `iterations`. At each iteration, an Experiment will be executed,
        its results will be logged, and it will be compared to the current best experiment

        Parameters
        ----------
        iteration: Int, default=0
            The current iteration in the optimization loop"""
        self.logger.print_optimization_header()

        while iteration < self.iterations:
            try:
                self._execute_experiment()
            except RepeatedExperimentError:
                # G.debug_(F'Skipping repeated Experiment: {_ex!s}\n')
                self.skipped_iterations += 1
                continue
            except StopIteration:
                if len(self.tested_keys) >= self.search_space_size:
                    G.log_(
                        F'Hyperparameter search space has been exhausted after testing {len(self.tested_keys)} keys'
                    )
                    break
                # G.debug_(F'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')
                self._set_hyperparameter_space()
                continue

            # TODO: :attr:`current_hyperparameters_list` only exists in Informed Protocols
            self.logger.print_result(
                self.current_hyperparameters_list,
                self.current_score,
                experiment_id=self.current_experiment.experiment_id)

            if (self.best_experiment is None) or (self.current_score >
                                                  self.best_score):
                self.best_experiment = self.current_experiment.experiment_id
                self.best_score = self.current_score

            iteration += 1