Exemple #1
0
    def test_multistarted_gradient_descent_optimizer_crippled_start(self):
        """Check that multistarted GD is finding the best result from GD."""
        # Only allow 1 GD iteration.
        max_num_steps = 1
        max_num_restarts = 1

        param_dict = self.gd_parameters._asdict()
        param_dict['max_num_steps'] = max_num_steps
        param_dict['max_num_restarts'] = max_num_restarts
        gd_parameters_crippled = GradientDescentParameters(**param_dict)

        gradient_descent_optimizer_crippled = GradientDescentOptimizer(
            self.domain, self.polynomial, gd_parameters_crippled)

        num_points = 15
        points = self.domain.generate_uniform_random_points_in_domain(
            num_points)

        multistart_optimizer = MultistartOptimizer(
            gradient_descent_optimizer_crippled, num_points)
        test_best_point, _ = multistart_optimizer.optimize(
            random_starts=points)
        # This point set won't include the optimum so multistart GD won't find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            assert value != 0.0

        points_with_opt = numpy.append(points,
                                       self.polynomial.optimum_point.reshape(
                                           (1, self.polynomial.dim)),
                                       axis=0)
        test_best_point, _ = multistart_optimizer.optimize(
            random_starts=points_with_opt)
        # This point set will include the optimum so multistart GD will find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            assert value == 0.0
Exemple #2
0
    def test_multistarted_gradient_descent_optimizer_crippled_start(self):
        """Check that multistarted GD is finding the best result from GD."""
        # Only allow 1 GD iteration.
        gd_parameters_crippled = GradientDescentParameters(
            1,
            1,
            self.gd_parameters.num_steps_averaged,
            self.gd_parameters.gamma,
            self.gd_parameters.pre_mult,
            self.gd_parameters.max_relative_change,
            self.gd_parameters.tolerance,
        )
        gradient_descent_optimizer_crippled = GradientDescentOptimizer(self.domain, self.polynomial, gd_parameters_crippled)

        num_points = 15
        points = self.domain.generate_uniform_random_points_in_domain(num_points)

        multistart_optimizer = MultistartOptimizer(gradient_descent_optimizer_crippled, num_points)
        test_best_point, _ = multistart_optimizer.optimize(random_starts=points)
        # This point set won't include the optimum so multistart GD won't find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            T.assert_not_equal(value, 0.0)

        points_with_opt = numpy.append(points, self.polynomial.optimum_point.reshape((1, self.polynomial.dim)), axis=0)
        test_best_point, _ = multistart_optimizer.optimize(random_starts=points_with_opt)
        # This point set will include the optimum so multistart GD will find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            T.assert_equal(value, 0.0)
Exemple #3
0
    def test_multistarted_gradient_descent_optimizer_crippled_start(self):
        """Check that multistarted GD is finding the best result from GD."""
        # Only allow 1 GD iteration.
        max_num_steps = 1
        max_num_restarts = 1

        param_dict = self.gd_parameters._asdict()
        param_dict['max_num_steps'] = max_num_steps
        param_dict['max_num_restarts'] = max_num_restarts
        gd_parameters_crippled = GradientDescentParameters(**param_dict)

        gradient_descent_optimizer_crippled = GradientDescentOptimizer(self.domain, self.polynomial, gd_parameters_crippled)

        num_points = 15
        points = self.domain.generate_uniform_random_points_in_domain(num_points)

        multistart_optimizer = MultistartOptimizer(gradient_descent_optimizer_crippled, num_points)
        test_best_point, _ = multistart_optimizer.optimize(random_starts=points)
        # This point set won't include the optimum so multistart GD won't find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            assert value != 0.0

        points_with_opt = numpy.append(points, self.polynomial.optimum_point.reshape((1, self.polynomial.dim)), axis=0)
        test_best_point, _ = multistart_optimizer.optimize(random_starts=points_with_opt)
        # This point set will include the optimum so multistart GD will find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            assert value == 0.0
Exemple #4
0
    def test_multistarted_bfgs_optimizer(self):
        """Check that multistarted GD can find the optimum in a 'very' large domain."""
        # Set a large domain: a single GD run is unlikely to reach the optimum
        domain_bounds = [ClosedInterval(-10.0, 10.0)] * self.dim
        domain = TensorProductDomain(domain_bounds)

        tolerance = 2.0e-10
        num_points = 10
        bfgs_optimizer = LBFGSBOptimizer(domain, self.polynomial,
                                         self.BFGS_parameters)
        multistart_optimizer = MultistartOptimizer(bfgs_optimizer, num_points)

        output, _ = multistart_optimizer.optimize()
        # Verify coordinates
        self.assert_vector_within_relative(output,
                                           self.polynomial.optimum_point,
                                           tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value,
                                           self.polynomial.optimum_value,
                                           tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient,
                                           numpy.zeros(self.polynomial.dim),
                                           tolerance)
Exemple #5
0
    def multistarted_optimizer_test(self, optimizer):
        """Check that the multistarted optimizer can find the optimum in a 'very' large domain."""
        tolerance = 2.0e-10
        num_points = 10
        multistart_optimizer = MultistartOptimizer(optimizer, num_points)

        output, _ = multistart_optimizer.optimize()
        # Verify coordinates
        self.assert_vector_within_relative(output, self.polynomial.optimum_point, tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value, self.polynomial.optimum_value, tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient, numpy.zeros(self.polynomial.dim), tolerance)
Exemple #6
0
    def test_multistarted_bfgs_optimizer(self):
        """Check that multistarted GD can find the optimum in a 'very' large domain."""
        # Set a large domain: a single GD run is unlikely to reach the optimum
        domain_bounds = [ClosedInterval(-10.0, 10.0)] * self.dim
        domain = TensorProductDomain(domain_bounds)

        tolerance = 2.0e-10
        num_points = 10
        bfgs_optimizer = LBFGSBOptimizer(domain, self.polynomial, self.BFGS_parameters)
        multistart_optimizer = MultistartOptimizer(bfgs_optimizer, num_points)

        output, _ = multistart_optimizer.optimize()
        # Verify coordinates
        self.assert_vector_within_relative(output, self.polynomial.optimum_point, tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value, self.polynomial.optimum_value, tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient, numpy.zeros(self.polynomial.dim), tolerance)
Exemple #7
0
    def multistarted_optimizer_test(self, optimizer):
        """Check that the multistarted optimizer can find the optimum in a 'very' large domain."""
        tolerance = 2.0e-10
        num_points = 10
        multistart_optimizer = MultistartOptimizer(optimizer, num_points)

        output, _ = multistart_optimizer.optimize()
        # Verify coordinates
        self.assert_vector_within_relative(output,
                                           self.polynomial.optimum_point,
                                           tolerance)

        # Verify function value
        value = self.polynomial.compute_objective_function()
        self.assert_scalar_within_relative(value,
                                           self.polynomial.optimum_value,
                                           tolerance)

        # Verify derivative
        gradient = self.polynomial.compute_grad_objective_function()
        self.assert_vector_within_relative(gradient,
                                           numpy.zeros(self.polynomial.dim),
                                           tolerance)
Exemple #8
0
    def test_multistarted_gradient_descent_optimizer_crippled_start(self):
        """Check that multistarted GD is finding the best result from GD."""
        # Only allow 1 GD iteration.
        gd_parameters_crippled = GradientDescentParameters(
            1,
            1,
            self.gd_parameters.num_steps_averaged,
            self.gd_parameters.gamma,
            self.gd_parameters.pre_mult,
            self.gd_parameters.max_relative_change,
            self.gd_parameters.tolerance,
        )
        gradient_descent_optimizer_crippled = GradientDescentOptimizer(
            self.domain, self.polynomial, gd_parameters_crippled)

        num_points = 15
        points = self.domain.generate_uniform_random_points_in_domain(
            num_points)

        multistart_optimizer = MultistartOptimizer(
            gradient_descent_optimizer_crippled, num_points)
        test_best_point, _ = multistart_optimizer.optimize(
            random_starts=points)
        # This point set won't include the optimum so multistart GD won't find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            T.assert_not_equal(value, 0.0)

        points_with_opt = numpy.append(points,
                                       self.polynomial.optimum_point.reshape(
                                           (1, self.polynomial.dim)),
                                       axis=0)
        test_best_point, _ = multistart_optimizer.optimize(
            random_starts=points_with_opt)
        # This point set will include the optimum so multistart GD will find it.
        for value in (test_best_point - self.polynomial.optimum_point):
            T.assert_equal(value, 0.0)