Exemple #1
0
    def test_gradient_descent_optimizer_constrained(self):
        """Check that gradient descent can find the global optimum (in a domain) when the true optimum is outside."""
        # Domain where the optimum, (0.5, 0.5, 0.5), lies outside the domain
        domain_bounds = [ClosedInterval(0.05, 0.32), ClosedInterval(0.05, 0.6), ClosedInterval(0.05, 0.32)]
        domain = TensorProductDomain(domain_bounds)
        gradient_descent_optimizer = GradientDescentOptimizer(domain, self.polynomial, self.gd_parameters)

        # Work out what the maxima point woudl be given the domain constraints (i.e., project to the nearest point on domain)
        constrained_optimum_point = self.polynomial.optimum_point
        for i, bounds in enumerate(domain_bounds):
            if constrained_optimum_point[i] > bounds.max:
                constrained_optimum_point[i] = bounds.max
            elif constrained_optimum_point[i] < bounds.min:
                constrained_optimum_point[i] = bounds.min

        tolerance = 2.0e-13
        initial_guess = numpy.full(self.polynomial.dim, 0.2)
        gradient_descent_optimizer.objective_function.current_point = initial_guess
        initial_value = gradient_descent_optimizer.objective_function.compute_objective_function()
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        # Verify coordinates
        self.assert_vector_within_relative(output, constrained_optimum_point, tolerance)

        # Verify optimized value is better than initial guess
        final_value = self.polynomial.compute_objective_function()
        T.assert_gt(final_value, initial_value)

        # Verify derivative: only get 0 derivative if the coordinate lies inside domain boundaries
        gradient = self.polynomial.compute_grad_objective_function()
        for i, bounds in enumerate(domain_bounds):
            if bounds.is_inside(self.polynomial.optimum_point[i]):
                self.assert_scalar_within_relative(gradient[i], 0.0, tolerance)
Exemple #2
0
    def test_gradient_descent_optimizer(self):
        """Check that gradient descent can find the optimum of the quadratic test objective."""
        # Check the claimed optima is an optima
        optimum_point = self.polynomial.optimum_point
        self.polynomial.current_point = optimum_point
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient, numpy.zeros(self.polynomial.dim), 0.0)

        # Verify that gradient descent does not move from the optima if we start it there.
        gradient_descent_optimizer = GradientDescentOptimizer(self.domain, self.polynomial, self.gd_parameters)
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        self.assert_vector_within_relative(output, optimum_point, 0.0)

        # Start at a wrong point and check optimization
        tolerance = 2.0e-13
        initial_guess = numpy.full(self.polynomial.dim, 0.2)
        gradient_descent_optimizer.objective_function.current_point = initial_guess
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        # Verify coordinates
        self.assert_vector_within_relative(output, optimum_point, tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value, self.polynomial.optimum_value, tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient, numpy.zeros(self.polynomial.dim), tolerance)
Exemple #3
0
    def test_gradient_descent_optimizer_with_averaging(self):
        """Check that gradient descent can find the optimum of the quadratic test objective with averaging on.

        This test doesn't exercise the purpose of averaging (i.e., this objective isn't stochastic), but it does
        check that it at least runs.

        """
        num_steps_averaged = self.gd_parameters.max_num_steps * 3 / 4
        gd_parameters_averaging = GradientDescentParameters(
            self.gd_parameters.max_num_steps,
            self.gd_parameters.max_num_restarts,
            num_steps_averaged,
            self.gd_parameters.gamma,
            self.gd_parameters.pre_mult,
            self.gd_parameters.max_relative_change,
            self.gd_parameters.tolerance,
        )
        # Check the claimed optima is an optima
        optimum_point = self.polynomial.optimum_point
        self.polynomial.current_point = optimum_point
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient,
                                           numpy.zeros(self.polynomial.dim),
                                           0.0)

        # Verify that gradient descent does not move from the optima if we start it there.
        gradient_descent_optimizer = GradientDescentOptimizer(
            self.domain, self.polynomial, gd_parameters_averaging)
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        self.assert_vector_within_relative(output, optimum_point, 0.0)

        # Start at a wrong point and check optimization
        tolerance = 2.0e-10
        initial_guess = numpy.full(self.polynomial.dim, 0.2)
        gradient_descent_optimizer.objective_function.current_point = initial_guess
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        # Verify coordinates
        self.assert_vector_within_relative(output, optimum_point, tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value,
                                           self.polynomial.optimum_value,
                                           tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient,
                                           numpy.zeros(self.polynomial.dim),
                                           tolerance)
Exemple #4
0
    def test_gradient_descent_optimizer_with_averaging(self):
        """Check that gradient descent can find the optimum of the quadratic test objective with averaging on.

        This test doesn't exercise the purpose of averaging (i.e., this objective isn't stochastic), but it does
        check that it at least runs.

        """
        num_steps_averaged = self.gd_parameters.max_num_steps * 3 / 4
        gd_parameters_averaging = GradientDescentParameters(
            self.gd_parameters.max_num_steps,
            self.gd_parameters.max_num_restarts,
            num_steps_averaged,
            self.gd_parameters.gamma,
            self.gd_parameters.pre_mult,
            self.gd_parameters.max_relative_change,
            self.gd_parameters.tolerance,
        )
        # Check the claimed optima is an optima
        optimum_point = self.polynomial.optimum_point
        self.polynomial.current_point = optimum_point
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient, numpy.zeros(self.polynomial.dim), 0.0)

        # Verify that gradient descent does not move from the optima if we start it there.
        gradient_descent_optimizer = GradientDescentOptimizer(self.domain, self.polynomial, gd_parameters_averaging)
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        self.assert_vector_within_relative(output, optimum_point, 0.0)

        # Start at a wrong point and check optimization
        tolerance = 2.0e-10
        initial_guess = numpy.full(self.polynomial.dim, 0.2)
        gradient_descent_optimizer.objective_function.current_point = initial_guess
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        # Verify coordinates
        self.assert_vector_within_relative(output, optimum_point, tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value, self.polynomial.optimum_value, tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient, numpy.zeros(self.polynomial.dim), tolerance)
Exemple #5
0
    def test_gradient_descent_optimizer_constrained(self):
        """Check that gradient descent can find the global optimum (in a domain) when the true optimum is outside."""
        # Domain where the optimum, (0.5, 0.5, 0.5), lies outside the domain
        domain_bounds = [
            ClosedInterval(0.05, 0.32),
            ClosedInterval(0.05, 0.6),
            ClosedInterval(0.05, 0.32)
        ]
        domain = TensorProductDomain(domain_bounds)
        gradient_descent_optimizer = GradientDescentOptimizer(
            domain, self.polynomial, self.gd_parameters)

        # Work out what the maxima point woudl be given the domain constraints (i.e., project to the nearest point on domain)
        constrained_optimum_point = self.polynomial.optimum_point
        for i, bounds in enumerate(domain_bounds):
            if constrained_optimum_point[i] > bounds.max:
                constrained_optimum_point[i] = bounds.max
            elif constrained_optimum_point[i] < bounds.min:
                constrained_optimum_point[i] = bounds.min

        tolerance = 2.0e-13
        initial_guess = numpy.full(self.polynomial.dim, 0.2)
        gradient_descent_optimizer.objective_function.current_point = initial_guess
        initial_value = gradient_descent_optimizer.objective_function.compute_objective_function(
        )
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        # Verify coordinates
        self.assert_vector_within_relative(output, constrained_optimum_point,
                                           tolerance)

        # Verify optimized value is better than initial guess
        final_value = self.polynomial.compute_objective_function()
        assert final_value >= initial_value

        # Verify derivative: only get 0 derivative if the coordinate lies inside domain boundaries
        gradient = self.polynomial.compute_grad_objective_function()
        for i, bounds in enumerate(domain_bounds):
            if bounds.is_inside(self.polynomial.optimum_point[i]):
                self.assert_scalar_within_relative(gradient[i], 0.0, tolerance)
Exemple #6
0
    def test_gradient_descent_optimizer(self):
        """Check that gradient descent can find the optimum of the quadratic test objective."""
        # Check the claimed optima is an optima
        optimum_point = self.polynomial.optimum_point
        self.polynomial.current_point = optimum_point
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient,
                                           numpy.zeros(self.polynomial.dim),
                                           0.0)

        # Verify that gradient descent does not move from the optima if we start it there.
        gradient_descent_optimizer = GradientDescentOptimizer(
            self.domain, self.polynomial, self.gd_parameters)
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        self.assert_vector_within_relative(output, optimum_point, 0.0)

        # Start at a wrong point and check optimization
        tolerance = 2.0e-13
        initial_guess = numpy.full(self.polynomial.dim, 0.2)
        gradient_descent_optimizer.objective_function.current_point = initial_guess
        gradient_descent_optimizer.optimize()
        output = gradient_descent_optimizer.objective_function.current_point
        # Verify coordinates
        self.assert_vector_within_relative(output, optimum_point, tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value,
                                           self.polynomial.optimum_value,
                                           tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient,
                                           numpy.zeros(self.polynomial.dim),
                                           tolerance)