コード例 #1
0
    def test_predict_output(self):
        d, n = (3, 10)
        sx = LHS(
            xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), d, axis=0),
            criterion="m",
            random_state=42,
        )
        x = sx(n)
        sy = LHS(
            xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), 1, axis=0),
            criterion="m",
            random_state=42,
        )
        y = sy(n)
        y = y.flatten()

        kriging = MGP(n_comp=2)
        kriging.set_training_values(x, y)
        kriging.train()

        x_fail_1 = np.asarray([0, 0, 0, 0])
        x_fail_2 = np.asarray([0])

        self.assertRaises(ValueError, lambda: kriging.predict_values(x_fail_1))
        self.assertRaises(ValueError, lambda: kriging.predict_values(x_fail_2))

        self.assertRaises(ValueError, lambda: kriging.predict_variances(x_fail_1))
        self.assertRaises(ValueError, lambda: kriging.predict_variances(x_fail_2))

        x_1 = np.atleast_2d([0, 0, 0])

        var = kriging.predict_variances(x_1)
        var_1 = kriging.predict_variances(x_1, True)
        self.assertEqual(var, var_1[0])
コード例 #2
0
    def test_function(self):
        problem = ProblemBranin()
        # problem = ProblemSphere1D()

        # surrogate
        problem.surrogate = SurrogateModelSMT(problem)
        # set custom regressor
        # problem.surrogate.regressor = RBF(d0=5, print_prediction=False)
        problem.surrogate.regressor = MGP(theta0=[1e-2],
                                          n_comp=2,
                                          print_prediction=False)

        # DoE - Latin - Hypercube
        gen = LHSGenerator(parameters=problem.parameters)
        gen.init(number=100)

        algorithm_sweep = SweepAlgorithm(problem, generator=gen)
        algorithm_sweep.run()

        # train model
        problem.surrogate.train()

        # set train step
        problem.surrogate.train_step = 50

        # run optimization
        # algorithm = NLopt(problem)
        # algorithm.options['verbose_level'] = 0
        # algorithm.options['algorithm'] = LN_BOBYQA
        # algorithm.options['xtol_abs'] = 1e-6
        # algorithm.options['xtol_rel'] = 1e-3
        # algorithm.options['ftol_rel'] = 1e-3
        # algorithm.options['ftol_abs'] = 1e-6
        # algorithm.options['n_iterations'] = 50
        # algorithm.run()

        algorithm = NSGAII(problem)
        algorithm.options['max_population_number'] = 40
        algorithm.options['max_population_size'] = 10
        algorithm.options['max_processes'] = 10
        algorithm.run()

        b = Results(problem)
        optimum = b.find_optimum('F_1')  # Takes last cost function
        # print(optimum.vector, optimum.costs)
        self.assertGreater(optimum.vector[0], 0.9)
        self.assertLess(optimum.vector[0], 1.1)
        self.assertGreater(optimum.vector[1], 2.8)
        self.assertLess(optimum.vector[1], 3.2)
        self.assertLess(optimum.costs[0], 0.05)
        self.assertGreater(problem.surrogate.predict_counter, 100)

        problem.logger.info(
            "surrogate: predict / eval counter: {0:5.0f} / {1:5.0f}, total: {2:5.0f}"
            .format(
                problem.surrogate.predict_counter,
                problem.surrogate.eval_counter,
                problem.surrogate.predict_counter +
                problem.surrogate.eval_counter))
コード例 #3
0
    def test_likelihood_hessian(self):
        for corr_str in [
                "abs_exp",
                "squar_exp",
                "act_exp",
                "matern32",
                "matern52",
        ]:  # For every kernel
            for poly_str in ["constant", "linear",
                             "quadratic"]:  # For every method
                if corr_str == "act_exp":
                    kr = MGP(print_global=False)
                    theta = self.random.rand(4)
                else:
                    kr = KRG(print_global=False)
                    theta = self.theta
                kr.options["poly"] = poly_str
                kr.options["corr"] = corr_str
                kr.set_training_values(self.X, self.y)
                kr.train()
                grad_red, dpar = kr._reduced_likelihood_gradient(theta)

                hess, hess_ij, _ = kr._reduced_likelihood_hessian(theta)
                Hess = np.zeros((theta.shape[0], theta.shape[0]))
                Hess[hess_ij[:, 0], hess_ij[:, 1]] = hess[:, 0]
                Hess[hess_ij[:, 1], hess_ij[:, 0]] = hess[:, 0]

                grad_norm_all = []
                diff_norm_all = []
                ind_theta = []
                for j, omega_j in enumerate(theta):
                    eps_omega = theta.copy()
                    eps_omega[j] += self.eps

                    grad_red_eps, _ = kr._reduced_likelihood_gradient(
                        eps_omega)
                    for i, theta_i in enumerate(theta):

                        hess_eps = (grad_red_eps[i] - grad_red[i]) / self.eps

                        grad_norm_all.append(
                            np.linalg.norm(Hess[i, j]) / np.linalg.norm(Hess))
                        diff_norm_all.append(
                            np.linalg.norm(hess_eps) / np.linalg.norm(Hess))
                        ind_theta.append(r"$x_%d,x_%d$" % (j, i))
                self.assert_error(
                    np.array(grad_norm_all),
                    np.array(diff_norm_all),
                    atol=1e-5,
                    rtol=1e-3,
                )  # from utils/smt_test_case.py
コード例 #4
0
    def test_likelihood_derivatives(self):
        for corr_str in [
                "abs_exp",
                "squar_exp",
                "act_exp",
                "matern32",
                "matern52",
        ]:  # For every kernel
            for poly_str in ["constant", "linear",
                             "quadratic"]:  # For every method
                if corr_str == "act_exp":
                    kr = MGP(print_global=False)
                    theta = self.random.rand(4)
                else:
                    kr = KRG(print_global=False)
                    theta = self.theta
                kr.options["poly"] = poly_str
                kr.options["corr"] = corr_str
                kr.set_training_values(self.X, self.y)
                kr.train()

                grad_red, dpar = kr._reduced_likelihood_gradient(theta)
                red, par = kr._reduced_likelihood_function(theta)

                grad_norm_all = []
                diff_norm_all = []
                ind_theta = []
                for i, theta_i in enumerate(theta):
                    eps_theta = theta.copy()
                    eps_theta[i] = eps_theta[i] + self.eps

                    red_dk, par_dk = kr._reduced_likelihood_function(eps_theta)
                    dred_dk = (red_dk - red) / self.eps

                    grad_norm_all.append(grad_red[i])
                    diff_norm_all.append(float(dred_dk))
                    ind_theta.append(r"$x_%d$" % i)

                grad_norm_all = np.atleast_2d(grad_norm_all)
                diff_norm_all = np.atleast_2d(diff_norm_all).T
                self.assert_error(grad_norm_all,
                                  diff_norm_all,
                                  atol=1e-5,
                                  rtol=1e-3)  # from utils/smt_test_case.py
コード例 #5
0
    def test_mgp(self):
        import numpy as np
        import matplotlib.pyplot as plt
        from smt.surrogate_models import MGP
        from smt.sampling_methods import LHS

        # Construction of the DOE
        dim = 3

        def fun(x):
            import numpy as np

            res = (np.sum(x, axis=1)**2 - np.sum(x, axis=1) + 0.2 *
                   (np.sum(x, axis=1) * 1.2)**3)
            return res

        sampling = LHS(xlimits=np.asarray([(-1, 1)] * dim), criterion="m")
        xt = sampling(8)
        yt = np.atleast_2d(fun(xt)).T

        # Build the MGP model
        sm = MGP(
            theta0=[1e-2],
            print_prediction=False,
            n_comp=1,
        )
        sm.set_training_values(xt, yt[:, 0])
        sm.train()

        # Get the transfert matrix A
        emb = sm.embedding["C"]

        # Compute the smallest box containing all points of A
        upper = np.sum(np.abs(emb), axis=0)
        lower = -upper

        # Test the model
        u_plot = np.atleast_2d(np.arange(lower, upper, 0.01)).T
        x_plot = sm.get_x_from_u(u_plot)  # Get corresponding points in Omega
        y_plot_true = fun(x_plot)
        y_plot_pred = sm.predict_values(u_plot)
        sigma_MGP, sigma_KRG = sm.predict_variances(u_plot, True)

        u_train = sm.get_u_from_x(xt)  # Get corresponding points in A

        # Plots
        fig, ax = plt.subplots()
        ax.plot(u_plot, y_plot_pred, label="Predicted")
        ax.plot(u_plot, y_plot_true, "k--", label="True")
        ax.plot(u_train, yt, "k+", mew=3, ms=10, label="Train")
        ax.fill_between(
            u_plot[:, 0],
            y_plot_pred - 3 * sigma_MGP,
            y_plot_pred + 3 * sigma_MGP,
            color="r",
            alpha=0.5,
            label="Variance with hyperparameters uncertainty",
        )
        ax.fill_between(
            u_plot[:, 0],
            y_plot_pred - 3 * sigma_KRG,
            y_plot_pred + 3 * sigma_KRG,
            color="b",
            alpha=0.5,
            label="Variance without hyperparameters uncertainty",
        )

        ax.set(xlabel="x", ylabel="y", title="MGP")
        fig.legend(loc="upper center", ncol=2)
        fig.tight_layout()
        fig.subplots_adjust(top=0.74)
        plt.show()
コード例 #6
0
    def test_predict_output(self):
        x = np.random.random((10, 3))
        y = np.random.random((10))

        kriging = MGP(n_comp=2)
        kriging.set_training_values(x, y)
        kriging.train()

        x_fail_1 = np.asarray([0, 0, 0, 0])
        x_fail_2 = np.asarray([0])

        self.assertRaises(ValueError, lambda: kriging.predict_values(x_fail_1))
        self.assertRaises(ValueError, lambda: kriging.predict_values(x_fail_2))

        self.assertRaises(ValueError,
                          lambda: kriging.predict_variances(x_fail_1))
        self.assertRaises(ValueError,
                          lambda: kriging.predict_variances(x_fail_2))

        x_1 = np.atleast_2d([0, 0, 0])

        var = kriging.predict_variances(x_1)
        var_1 = kriging.predict_variances(x_1, True)
        self.assertEqual(var, var_1[0])
コード例 #7
0
b = Results(problem)
optimum = b.find_optimum('F_1')  # Takes last cost function
print(optimum.vector, optimum.costs)

problem = None

# surrogate
trained_problem = ProblemBranin()
trained_problem.data_store = SqliteDataStore(trained_problem,
                                             database_name="data.sqlite")

# set custom regressor
trained_problem.surrogate = SurrogateModelSMT(trained_problem)
trained_problem.surrogate.regressor = MGP(theta0=[1e-2],
                                          n_comp=2,
                                          print_prediction=False)
trained_problem.surrogate.read_from_data_store()
trained_problem.surrogate.train()

# Tests
x = [0, 0]
print(trained_problem.surrogate.predict(x))
y = (x[0] + 2 * x[1] - 7)**2 + (2 * x[0] + x[1] - 5)**2
print(y)

algorithm = NSGAII(trained_problem)
algorithm.options['max_population_number'] = 50
algorithm.options['max_population_size'] = 10
algorithm.options['max_processes'] = 1
algorithm.run()