Exemplo n.º 1
0
    def base_setup(self):
        """Set up a test case for optimizing a simple quadratic polynomial."""
        self.dim = 3
        domain_bounds = [ClosedInterval(-1.0, 1.0)] * self.dim
        self.domain = TensorProductDomain(domain_bounds)

        maxima_point = numpy.full(self.dim, 0.5)
        current_point = numpy.zeros(self.dim)
        self.polynomial = QuadraticFunction(maxima_point, current_point)
        self.null_optimizer = NullOptimizer(self.domain, self.polynomial)
Exemplo n.º 2
0
class NullOptimizerTest(OptimalLearningTestCase):
    """Test the NullOptimizer on a simple objective.

    NullOptimizer should do nothing.
    Multistarting it should be the same as a 'dumb' search over points.

    """
    @T.class_setup
    def base_setup(self):
        """Set up a test case for optimizing a simple quadratic polynomial."""
        self.dim = 3
        domain_bounds = [ClosedInterval(-1.0, 1.0)] * self.dim
        self.domain = TensorProductDomain(domain_bounds)

        maxima_point = numpy.full(self.dim, 0.5)
        current_point = numpy.zeros(self.dim)
        self.polynomial = QuadraticFunction(maxima_point, current_point)
        self.null_optimizer = NullOptimizer(self.domain, self.polynomial)

    def test_null_optimizer(self):
        """Test that null optimizer does not change current_point."""
        current_point_old = self.null_optimizer.objective_function.current_point
        self.null_optimizer.optimize()
        current_point_new = self.null_optimizer.objective_function.current_point
        self.assert_vector_within_relative(current_point_old,
                                           current_point_new, 0.0)

    def test_multistarted_null_optimizer(self):
        """Test that multistarting null optimizer just evalutes the function and indentifies the max."""
        num_points = 15
        points = self.domain.generate_uniform_random_points_in_domain(
            num_points)

        truth = numpy.empty(num_points)
        for i, point in enumerate(points):
            self.null_optimizer.objective_function.current_point = point
            truth[
                i] = self.null_optimizer.objective_function.compute_objective_function(
                )

        best_index = numpy.argmax(truth)
        truth_best_point = points[best_index, ...]

        test_best_point, test_values = multistart_optimize(
            self.null_optimizer, starting_points=points)

        self.assert_vector_within_relative(test_best_point, truth_best_point,
                                           0.0)
        self.assert_vector_within_relative(test_values, truth, 0.0)
Exemplo n.º 3
0
def evaluate_log_likelihood_at_hyperparameter_list(
    log_likelihood_evaluator,
    hyperparameters_to_evaluate,
    max_num_threads=DEFAULT_MAX_NUM_THREADS,
    status=None,
):
    """Compute the specified log likelihood measure at each input set of hyperparameters.

    Generally Newton or gradient descent is preferred but when they fail to converge this may be the only "robust" option.
    This function is also useful for plotting or debugging purposes (just to get a bunch of log likelihood values).

    :param log_likelihood_evaluator: object specifying which log likelihood measure to evaluate
    :type log_likelihood_evaluator: interfaces.log_likelihood_interface.LogLikelihoodInterface subclass
    :param hyperparameters_to_evaluate: the hyperparameters at which to compute the specified log likelihood
    :type hyperparameters_to_evaluate: array of float64 with shape (num_to_eval, log_likelihood_evaluator.num_hyperparameters)
    :param max_num_threads: maximum number of threads to use, >= 1 (UNUSED)
    :type max_num_threads: int
    :param status: (output) status messages (e.g., reporting on optimizer success, etc.)
    :type status: dict
    :return: log likelihood value at each specified set of hyperparameters
    :rtype: array of float64 with shape (hyperparameters_to_evaluate.shape[0])

    """
    null_optimizer = NullOptimizer(None, log_likelihood_evaluator)
    _, values = multistart_optimize(
        null_optimizer, starting_points=hyperparameters_to_evaluate)

    # TODO(GH-59): Have null optimizer actually indicate whether updates were found, e.g., in an IOContainer-like structure.
    found_flag = True
    if status is not None:
        status["evaluate_log_likelihood_at_hyperparameter_list"] = found_flag

    return values
Exemplo n.º 4
0
class NullOptimizerTest(OptimalLearningTestCase):

    """Test the NullOptimizer on a simple objective.

    NullOptimizer should do nothing.
    Multistarting it should be the same as a 'dumb' search over points.

    """

    @T.class_setup
    def base_setup(self):
        """Set up a test case for optimizing a simple quadratic polynomial."""
        self.dim = 3
        domain_bounds = [ClosedInterval(-1.0, 1.0)] * self.dim
        self.domain = TensorProductDomain(domain_bounds)

        maxima_point = numpy.full(self.dim, 0.5)
        current_point = numpy.zeros(self.dim)
        self.polynomial = QuadraticFunction(maxima_point, current_point)
        self.null_optimizer = NullOptimizer(self.domain, self.polynomial)

    def test_null_optimizer(self):
        """Test that null optimizer does not change current_point."""
        current_point_old = self.null_optimizer.objective_function.current_point
        self.null_optimizer.optimize()
        current_point_new = self.null_optimizer.objective_function.current_point
        self.assert_vector_within_relative(current_point_old, current_point_new, 0.0)

    def test_multistarted_null_optimizer(self):
        """Test that multistarting null optimizer just evalutes the function and indentifies the max."""
        num_points = 15
        points = self.domain.generate_uniform_random_points_in_domain(num_points)

        truth = numpy.empty(num_points)
        for i, point in enumerate(points):
            self.null_optimizer.objective_function.current_point = point
            truth[i] = self.null_optimizer.objective_function.compute_objective_function()

        best_index = numpy.argmax(truth)
        truth_best_point = points[best_index, ...]

        test_best_point, test_values = multistart_optimize(self.null_optimizer, starting_points=points)

        self.assert_vector_within_relative(test_best_point, truth_best_point, 0.0)
        self.assert_vector_within_relative(test_values, truth, 0.0)
Exemplo n.º 5
0
    def base_setup(self):
        """Set up a test case for optimizing a simple quadratic polynomial."""
        self.dim = 3
        domain_bounds = [ClosedInterval(-1.0, 1.0)] * self.dim
        self.domain = TensorProductDomain(domain_bounds)

        maxima_point = numpy.full(self.dim, 0.5)
        current_point = numpy.zeros(self.dim)
        self.polynomial = QuadraticFunction(maxima_point, current_point)
        self.null_optimizer = NullOptimizer(self.domain, self.polynomial)