def test_multistart_qei_expected_improvement_dfo(self):
        """Check that multistart optimization (BFGS) can find the optimum point to sample (using 2-EI)."""
        numpy.random.seed(7860)
        index = numpy.argmax(numpy.greater_equal(self.num_sampled_list, 20))
        domain, gaussian_process = self.gp_test_environments[index]

        tolerance = 6.0e-5
        num_multistarts = 3

        # Expand the domain so that we are definitely not doing constrained optimization
        expanded_domain = TensorProductDomain([ClosedInterval(-4.0, 3.0)] *
                                              self.dim)
        num_to_sample = 2
        repeated_domain = RepeatedDomain(num_to_sample, expanded_domain)

        num_mc_iterations = 100000
        # Just any random point that won't be optimal
        points_to_sample = repeated_domain.generate_random_point_in_domain()
        ei_eval = ExpectedImprovement(gaussian_process,
                                      points_to_sample,
                                      num_mc_iterations=num_mc_iterations)
        # Compute EI and its gradient for the sake of comparison
        ei_initial = ei_eval.compute_expected_improvement()

        ei_optimizer = LBFGSBOptimizer(repeated_domain, ei_eval,
                                       self.BFGS_parameters)
        best_point = multistart_expected_improvement_optimization(
            ei_optimizer, num_multistarts, num_to_sample)

        # Check that gradients are "small" or on border. MC is very inaccurate near 0, so use finite difference
        # gradient instead.
        ei_eval.current_point = best_point
        ei_final = ei_eval.compute_expected_improvement()

        finite_diff_grad = numpy.zeros(best_point.shape)
        h_value = 0.00001
        for i in range(best_point.shape[0]):
            for j in range(best_point.shape[1]):
                best_point[i, j] += h_value
                ei_eval.current_point = best_point
                ei_upper = ei_eval.compute_expected_improvement()
                best_point[i, j] -= 2 * h_value
                ei_eval.current_point = best_point
                ei_lower = ei_eval.compute_expected_improvement()
                best_point[i, j] += h_value
                finite_diff_grad[i, j] = (ei_upper - ei_lower) / (2 * h_value)

        self.assert_vector_within_relative(finite_diff_grad,
                                           numpy.zeros(finite_diff_grad.shape),
                                           tolerance)

        # Check that output is in the domain
        assert repeated_domain.check_point_inside(best_point) is True

        # Since we didn't really converge to the optimal EI (too costly), do some other sanity checks
        # EI should have improved
        assert ei_final >= ei_initial
Example #2
0
    def test_multistart_monte_carlo_expected_improvement_optimization(self):
        """Check that multistart optimization (gradient descent) can find the optimum point to sample (using 2-EI)."""
        numpy.random.seed(7858)  # TODO(271): Monte Carlo only works for this seed
        index = numpy.argmax(numpy.greater_equal(self.num_sampled_list, 20))
        domain, gaussian_process = self.gp_test_environments[index]

        max_num_steps = 75  # this is *too few* steps; we configure it this way so the test will run quickly
        max_num_restarts = 5
        num_steps_averaged = 50
        gamma = 0.2
        pre_mult = 1.5
        max_relative_change = 1.0
        tolerance = 3.0e-2  # really large tolerance b/c converging with monte-carlo (esp in Python) is expensive
        gd_parameters = GradientDescentParameters(
            max_num_steps,
            max_num_restarts,
            num_steps_averaged,
            gamma,
            pre_mult,
            max_relative_change,
            tolerance,
        )
        num_multistarts = 2

        # Expand the domain so that we are definitely not doing constrained optimization
        expanded_domain = TensorProductDomain([ClosedInterval(-4.0, 2.0)] * self.dim)
        num_to_sample = 2
        repeated_domain = RepeatedDomain(num_to_sample, expanded_domain)

        num_mc_iterations = 10000
        # Just any random point that won't be optimal
        points_to_sample = repeated_domain.generate_random_point_in_domain()
        ei_eval = ExpectedImprovement(gaussian_process, points_to_sample, num_mc_iterations=num_mc_iterations)
        # Compute EI and its gradient for the sake of comparison
        ei_initial = ei_eval.compute_expected_improvement(force_monte_carlo=True)  # TODO(271) Monte Carlo only works for this seed
        grad_ei_initial = ei_eval.compute_grad_expected_improvement()

        ei_optimizer = GradientDescentOptimizer(repeated_domain, ei_eval, gd_parameters)
        best_point = multistart_expected_improvement_optimization(ei_optimizer, num_multistarts, num_to_sample)

        # Check that gradients are "small"
        ei_eval.current_point = best_point
        ei_final = ei_eval.compute_expected_improvement(force_monte_carlo=True)  # TODO(271) Monte Carlo only works for this seed
        grad_ei_final = ei_eval.compute_grad_expected_improvement()
        self.assert_vector_within_relative(grad_ei_final, numpy.zeros(grad_ei_final.shape), tolerance)

        # Check that output is in the domain
        assert repeated_domain.check_point_inside(best_point) is True

        # Since we didn't really converge to the optimal EI (too costly), do some other sanity checks
        # EI should have improved
        assert ei_final >= ei_initial

        # grad EI should have improved
        for index in numpy.ndindex(grad_ei_final.shape):
            assert numpy.fabs(grad_ei_final[index]) <= numpy.fabs(grad_ei_initial[index])
    def test_multistart_qei_expected_improvement_dfo(self):
        """Check that multistart optimization (BFGS) can find the optimum point to sample (using 2-EI)."""
        numpy.random.seed(7860)
        index = numpy.argmax(numpy.greater_equal(self.num_sampled_list, 20))
        domain, gaussian_process = self.gp_test_environments[index]

        tolerance = 6.0e-5
        num_multistarts = 3

        # Expand the domain so that we are definitely not doing constrained optimization
        expanded_domain = TensorProductDomain([ClosedInterval(-4.0, 3.0)] * self.dim)
        num_to_sample = 2
        repeated_domain = RepeatedDomain(num_to_sample, expanded_domain)

        num_mc_iterations = 100000
        # Just any random point that won't be optimal
        points_to_sample = repeated_domain.generate_random_point_in_domain()
        ei_eval = ExpectedImprovement(gaussian_process, points_to_sample, num_mc_iterations=num_mc_iterations)
        # Compute EI and its gradient for the sake of comparison
        ei_initial = ei_eval.compute_expected_improvement()

        ei_optimizer = LBFGSBOptimizer(repeated_domain, ei_eval, self.BFGS_parameters)
        best_point = multistart_expected_improvement_optimization(ei_optimizer, num_multistarts, num_to_sample)

        # Check that gradients are "small" or on border. MC is very inaccurate near 0, so use finite difference
        # gradient instead.
        ei_eval.current_point = best_point
        ei_final = ei_eval.compute_expected_improvement()

        finite_diff_grad = numpy.zeros(best_point.shape)
        h_value = 0.00001
        for i in range(best_point.shape[0]):
            for j in range(best_point.shape[1]):
                best_point[i, j] += h_value
                ei_eval.current_point = best_point
                ei_upper = ei_eval.compute_expected_improvement()
                best_point[i, j] -= 2 * h_value
                ei_eval.current_point = best_point
                ei_lower = ei_eval.compute_expected_improvement()
                best_point[i, j] += h_value
                finite_diff_grad[i, j] = (ei_upper - ei_lower) / (2 * h_value)

        self.assert_vector_within_relative(finite_diff_grad, numpy.zeros(finite_diff_grad.shape), tolerance)

        # Check that output is in the domain
        assert repeated_domain.check_point_inside(best_point) is True

        # Since we didn't really converge to the optimal EI (too costly), do some other sanity checks
        # EI should have improved
        assert ei_final >= ei_initial
    def test_multistart_analytic_expected_improvement_optimization(self):
        """Check that multistart optimization (gradient descent) can find the optimum point to sample (using 1D analytic EI)."""
        numpy.random.seed(3148)
        index = numpy.argmax(numpy.greater_equal(self.num_sampled_list, 20))
        domain, gaussian_process = self.gp_test_environments[index]

        max_num_steps = 200  # this is generally *too few* steps; we configure it this way so the test will run quickly
        max_num_restarts = 5
        num_steps_averaged = 0
        gamma = 0.2
        pre_mult = 1.5
        max_relative_change = 1.0
        tolerance = 1.0e-7
        gd_parameters = GradientDescentParameters(
            max_num_steps,
            max_num_restarts,
            num_steps_averaged,
            gamma,
            pre_mult,
            max_relative_change,
            tolerance,
        )
        num_multistarts = 3

        points_to_sample = domain.generate_random_point_in_domain()
        ei_eval = ExpectedImprovement(gaussian_process, points_to_sample)

        # expand the domain so that we are definitely not doing constrained optimization
        expanded_domain = TensorProductDomain([ClosedInterval(-4.0, 2.0)] *
                                              self.dim)

        num_to_sample = 1
        repeated_domain = RepeatedDomain(ei_eval.num_to_sample,
                                         expanded_domain)
        ei_optimizer = GradientDescentOptimizer(repeated_domain, ei_eval,
                                                gd_parameters)
        best_point = multistart_expected_improvement_optimization(
            ei_optimizer, num_multistarts, num_to_sample)

        # Check that gradients are small
        ei_eval.current_point = best_point
        gradient = ei_eval.compute_grad_expected_improvement()
        self.assert_vector_within_relative(gradient,
                                           numpy.zeros(gradient.shape),
                                           tolerance)

        # Check that output is in the domain
        assert repeated_domain.check_point_inside(best_point) is True
    def test_multistart_analytic_expected_improvement_optimization(self):
        """Check that multistart optimization (gradient descent) can find the optimum point to sample (using 1D analytic EI)."""
        numpy.random.seed(3148)
        index = numpy.argmax(numpy.greater_equal(self.num_sampled_list, 20))
        domain, gaussian_process = self.gp_test_environments[index]

        max_num_steps = 200  # this is generally *too few* steps; we configure it this way so the test will run quickly
        max_num_restarts = 5
        num_steps_averaged = 0
        gamma = 0.2
        pre_mult = 1.5
        max_relative_change = 1.0
        tolerance = 1.0e-7
        gd_parameters = GradientDescentParameters(
            max_num_steps,
            max_num_restarts,
            num_steps_averaged,
            gamma,
            pre_mult,
            max_relative_change,
            tolerance,
        )
        num_multistarts = 3

        points_to_sample = domain.generate_random_point_in_domain()
        ei_eval = ExpectedImprovement(gaussian_process, points_to_sample)

        # expand the domain so that we are definitely not doing constrained optimization
        expanded_domain = TensorProductDomain([ClosedInterval(-4.0, 2.0)] * self.dim)

        num_to_sample = 1
        repeated_domain = RepeatedDomain(ei_eval.num_to_sample, expanded_domain)
        ei_optimizer = GradientDescentOptimizer(repeated_domain, ei_eval, gd_parameters)
        best_point = multistart_expected_improvement_optimization(ei_optimizer, num_multistarts, num_to_sample)

        # Check that gradients are small
        ei_eval.current_point = best_point
        gradient = ei_eval.compute_grad_expected_improvement()
        self.assert_vector_within_relative(gradient, numpy.zeros(gradient.shape), tolerance)

        # Check that output is in the domain
        assert repeated_domain.check_point_inside(best_point) is True