コード例 #1
0
    def generate_PINN_samples(self, app_str=""):
        self.app_str = app_str
        samples_list = []

        filename = "NS_AL_{0}{1}.npz".format(self.Ns, app_str)
        if os.path.exists("{1}{0}".format(filename, self.path_env)):
            npzfile = np.load("{1}{0}".format(filename, self.path_env))
            if self.Nf > 0:
                self.Xf = npzfile['Xf']
                target_f = npzfile['target_f']
            else:
                self.Xf_tf = None
            if self.Nb > 0:
                self.Xb_d = npzfile['Xb_d']
                self.ub_d = npzfile['ub_d']

            if self.N0 > 0:
                self.X0 = npzfile['X0']
                self.u0 = npzfile['u0']
            else:
                self.X0_tf = None
                self.u0_tf = None
            if self.Nn > 0:
                self.Xn = npzfile['Xn']
                self.un = npzfile['un']
            else:
                self.Xn = None
                self.un = None
            if self.Nr > 0:
                self.Xr = npzfile['Xr']
                target_r = npzfile['target_r']
            else:
                self.Xr_tf = None

        else:
            np.random.seed(10)

            # sampling_f = LHS(xlimits = np.array([[-1, 1], [-1, 1], [-4, 0]]))
            sampling_f = LHS(xlimits=self.x_p_domain)
            self.Xf = sampling_f(self.Nf)
            self.Xf[:, 2] = np.power(10, self.Xf[:, 2])
            if app_str == "_altsol":
                w1 = 1 - self.Xf[:, [0]]**2
            else:
                w1 = 1 - self.Xf[:, [1]]**2
            w2 = 0 * self.Xf[:, [1]]
            self.Xf = np.concatenate((self.Xf, w1, w2), axis=1)
            if app_str == "_random":
                target_f = np.random.rand(self.Nf, 2)
            else:
                target_f = np.zeros((self.Nf, 2))
                target_f[:, 0] = 2 * self.Xf[:, 2]

            sampling_b = LHS(xlimits=np.array([[-1, 1], [-4, 0]]))
            Nb_side = self.Nb // 3
            x_p_b = sampling_b(Nb_side)

            pb = x_p_b[:, [1]]
            pb_10 = np.power(10, pb)
            xyb = x_p_b[:, [0]]

            if app_str == "_altsol":
                lb = np.concatenate((-np.ones(
                    (Nb_side, 1)), xyb, pb_10, np.zeros(
                        (Nb_side, 1)), np.zeros((Nb_side, 1))),
                                    axis=1)
                ulb = np.zeros((Nb_side, 2))
                rb = np.concatenate((np.ones(
                    (Nb_side, 1)), xyb, pb_10, np.zeros(
                        (Nb_side, 1)), np.zeros((Nb_side, 1))),
                                    axis=1)
                urb = np.zeros((Nb_side, 2))
                tb = np.concatenate((xyb, np.ones(
                    (Nb_side, 1)), pb_10, 1 - xyb**2, np.zeros((Nb_side, 1))),
                                    axis=1)
                utb = np.zeros((Nb_side, 2))
                db = np.concatenate((xyb, -np.ones(
                    (Nb_side, 1)), pb_10, 1 - xyb**2, np.zeros((Nb_side, 1))),
                                    axis=1)
                udb = np.zeros((Nb_side, 2))
            else:
                lb = np.concatenate((-np.ones(
                    (Nb_side, 1)), xyb, pb_10, 1 - xyb**2,
                                     np.zeros((Nb_side, 1))),
                                    axis=1)
                ulb = np.zeros((Nb_side, 2))
                # rb = np.concatenate((np.ones([Nb_side,1]),xyb,pb_10,1-xyb**2,np.zeros((Nb_side,1))),axis = 1)
                # urb = np.zeros((Nb_side,2))
                tb = np.concatenate((xyb, np.ones(
                    (Nb_side, 1)), pb_10, np.zeros(
                        (Nb_side, 1)), np.zeros((Nb_side, 1))),
                                    axis=1)
                utb = np.zeros((Nb_side, 2))
                db = np.concatenate((xyb, -np.ones(
                    (Nb_side, 1)), pb_10, np.zeros(
                        (Nb_side, 1)), np.zeros((Nb_side, 1))),
                                    axis=1)
                udb = np.zeros((Nb_side, 2))

            # self.Xb_d = np.concatenate((lb,rb,tb,db),axis = 0)
            # self.ub_d = np.concatenate((ulb,urb,utb,udb),axis = 0)

            self.Xb_d = np.concatenate((lb, tb, db), axis=0)
            self.ub_d = np.concatenate((ulb, utb, udb), axis=0)

            if self.Nn > 0:
                sampling_n = LHS(xlimits=np.array([[-1, 1], [-4, 0]]))
                x_p_n = sampling_n(self.Nn)

                pn = x_p_n[:, [1]]
                pn_10 = np.power(10, pn)
                xyn = x_p_n[:, [0]]
                if app_str == "_altsol":
                    rb = np.concatenate((np.ones(
                        [self.Nn, 1]), xyn, pn_10, np.zeros(
                            (self.Nn, 1)), np.zeros((self.Nn, 1))),
                                        axis=1)
                else:
                    rb = np.concatenate(
                        (np.ones([self.Nn, 1]), xyn, pn_10, 1 - xyn**2,
                         np.zeros((self.Nn, 1))),
                        axis=1)

                urb = np.zeros((self.Nn, 2))
                # if app_str == "_altneumann":
                # 	pass
                # else:
                # 	urb[:, [0]] = -2*pn_10*np.ones([self.Nn,1])

                self.Xn = rb
                self.un = urb
            else:
                self.Xn = None
                self.un = None

            if self.Nr > 0:
                sampling_r = LHS(xlimits=self.x_p_domain)
                self.Xr = sampling_f(self.Nr)
                self.Xr[:, 2] = np.power(10, self.Xr[:, 2])
                if app_str == "_altsol":
                    w1 = 1 - self.Xr[:, [0]]**2
                else:
                    w1 = 1 - self.Xr[:, [1]]**2
                w2 = 0 * self.Xf[:, [1]]
                self.Xr = np.concatenate((self.Xr, w1, w2), axis=1)
                target_r = np.zeros((self.Nr, 1))

            else:
                self.Xr = None
                target_r = None

            np.savez(self.path_env + "{0}".format(filename),
                     Xf=self.Xf,
                     target_f=target_f,
                     Xb_d=self.Xb_d,
                     ub_d=self.ub_d,
                     Xn=self.Xn,
                     un=self.un,
                     Xr=self.Xr,
                     target_r=target_r)
            # if self.N0>0:
            # 	sampling_0 = LHS(xlimits = self.x_p_domain)
            # 	x = sampling_0(self.N0)
            # 	x[:,2] = np.power(10, x[:,2])

            # 	str_arr = app_str.split("_")
            # 	if len(str_arr)==3:
            # 		setting_str = str_arr[1]
            # 		setting_para_str = str_arr[2]
            # 		setting_para_str = setting_para_str.replace("p",".")
            # 		setting = int(setting_str)
            # 		setting_para = float(setting_para_str)
            # 	else:
            # 		setting = 0
            # 	# setting 1
            # 	if setting == 1:
            # 		x[:,0] = x[:,1]*x[:,0]+1-x[:,1]
            # 	# setting 2
            # 	elif setting == 2:
            # 		x[:,0] = setting_para*x[:,1]*x[:,0]+1-setting_para*x[:,1]
            # 		x[:,0] = np.maximum(0.001*np.ones(np.shape(x[:,0])),x[:,0])
            # 	# setting 3
            # 	elif setting == 3:
            # 		percent = setting_para/100
            # 		in_corner_int = int(percent*self.N0)
            # 		out_corner_int = self.N0-in_corner_int
            # 		N0_sel = np.random.choice(self.N0,in_corner_int, replace=False)
            # 		N0_sel_diff = np.array([i for i in range(self.N0) if (i not in N0_sel)])
            # 		N0_sel = N0_sel.reshape([in_corner_int,1])
            # 		N0_sel_diff = N0_sel_diff.reshape([out_corner_int,1])
            # 		x[N0_sel,0] = x[N0_sel,1]*x[N0_sel,0]+1-x[N0_sel,1]
            # 		x[N0_sel_diff,0] = (1-x[N0_sel_diff,1])*x[N0_sel_diff,0]

            # 	# self.X0 = np.concatenate((x,1e-4*np.ones((self.N0,1))),axis = 1)
            # 	self.X0 = x
            # 	if app_str == "_reduced":
            # 		leftinds = self.X0[:,1]+np.sqrt(3)*self.X0[:,0]<-1
            # 		self.u0 = np.ones((self.N0,1))
            # 		self.u0[leftinds] = 0
            # 	else:
            # 		self.u0 = self.u_exact(self.X0)
            # 	np.savez(self.path_env+"{0}".format(filename), Xf = self.Xf, Xb_d = self.Xb_d, ub_d = self.ub_d, X0 = self.X0, u0 = self.u0, Xr = self.Xr)

        if self.Nf > 0:
            t_tf = tf.constant((), shape=(self.Nf, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xf[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.Xf[:, [1]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xf[:, [2]], dtype=tf.float32)
            w1_tf = tf.constant(self.Xf[:, [3]], dtype=tf.float32)
            w2_tf = tf.constant(self.Xf[:, [4]], dtype=tf.float32)
            target_tf = tf.constant(target_f, dtype=tf.float32)
            N = tf.constant(self.Nf, dtype=tf.float32)

            weight = tf.constant(self.type_weighting[0], dtype=tf.float32)
            self.Xf_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'w1_tf': w1_tf,
                'w2_tf': w2_tf,
                'target': target_tf,
                'N': N,
                'type': 'Res',
                'weight': weight
            }
            samples_list.append(self.Xf_dict)

        if self.Nb > 0:
            t_tf = tf.constant((), shape=(self.Nb, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xb_d[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.Xb_d[:, [1]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xb_d[:, [2]], dtype=tf.float32)
            w1_tf = tf.constant(self.Xb_d[:, [3]], dtype=tf.float32)
            w2_tf = tf.constant(self.Xb_d[:, [4]], dtype=tf.float32)
            target_tf = tf.constant(self.ub_d, dtype=tf.float32)
            N = tf.constant(self.Nb, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[1], dtype=tf.float32)
            self.Xb_d_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'w1_tf': w1_tf,
                'w2_tf': w2_tf,
                'target': target_tf,
                'N': N,
                'type': 'B_D',
                'weight': weight
            }
            samples_list.append(self.Xb_d_dict)

        if self.Nn > 0:
            t_tf = tf.constant((), shape=(self.Nn, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xn[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.Xn[:, [1]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xn[:, [2]], dtype=tf.float32)
            w1_tf = tf.constant(self.Xn[:, [3]], dtype=tf.float32)
            w2_tf = tf.constant(self.Xn[:, [4]], dtype=tf.float32)
            target_tf = tf.constant(self.un, dtype=tf.float32)
            N = tf.constant(self.Nn, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[2], dtype=tf.float32)
            self.Xb_n_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'w1_tf': w1_tf,
                'w2_tf': w2_tf,
                'target': target_tf,
                'N': N,
                'type': 'B_N',
                'weight': weight
            }
            samples_list.append(self.Xb_n_dict)

        if self.N0 > 0:
            #check number of samples in (1-xi,1) corner
            # xis = self.X0[:,[2]]
            # xs = self.X0[:,[0]]
            # inds_corner = [i for i in range(len(xs)) if xs[i]>=1-xis[i]]
            # print("Number of samples in the corners is {0} out of {1}.\n".format(len(inds_corner),self.N0))

            # plot samples
            # fig, ax = plt.subplots()
            # ax.plot(xs,xis, 'o')
            # plt.show()

            # y_tf = tf.constant(self.X0[:,[1]],dtype = tf.float32)
            # t_tf = tf.constant((),shape = (self.N0,0),dtype = tf.float32)
            # x_tf = tf.constant(self.X0[:,[0]],dtype = tf.float32)
            # xi_tf = tf.constant(self.X0[:,[2]],dtype = tf.float32)
            # target_tf = tf.constant(self.u0, dtype = tf.float32)
            # N = tf.constant(self.N0, dtype = tf.float32)
            # weight = tf.constant(self.type_weighting[3], dtype = tf.float32)
            # self.X0_dict = {'x_tf':x_tf, 'y_tf':y_tf, 't_tf':t_tf, 'xi_tf':xi_tf, 'target':target_tf, 'N':N, 'type':"Init", 'weight':weight}
            # samples_list.append(self.X0_dict)
            pass
        if self.Nr > 0:
            t_tf = tf.constant((), shape=(self.Nr, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xr[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.Xr[:, [1]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xr[:, [2]], dtype=tf.float32)
            w1_tf = tf.constant(self.Xr[:, [3]], dtype=tf.float32)
            w2_tf = tf.constant(self.Xr[:, [4]], dtype=tf.float32)
            target_tf = tf.constant(target_r, dtype=tf.float32)
            N = tf.constant(self.Nr, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[4], dtype=tf.float32)
            self.Xr_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'w1_tf': w1_tf,
                'w2_tf': w2_tf,
                'target': target_tf,
                'N': N,
                'type': 'Div',
                'weight': weight
            }
            samples_list.append(self.Xr_dict)
            # pass

        return samples_list
コード例 #2
0
ファイル: Opt_old.py プロジェクト: FNTwin/GPGO
    def bayesian_run_max(self,
                         n_search,
                         boundaries,
                         iteration=10,
                         epsilon=0.1,
                         minimization=False,
                         optimization=False,
                         n_restart=5,
                         sampling=np.random.uniform,
                         plot=False):

        if GP is None:
            raise ValueError(
                "Gaussian Process not existing. Define one before running a " +
                "Bayesian Optimization")

        else:
            gp = self.get_GP()
            dim = self.get_dim_inputspace()
            tm = self.get_time_logger()
            self._it = iteration
            boundaries_array = np.asarray(boundaries)

            for i in range(1, iteration + 1):
                logger.debug("Iteration: ", i)
                # Generate dimensional Grid to search
                if sampling == "LHS":
                    lhs_generator = LHS(xlimits=boundaries_array)
                    search_grid = lhs_generator(n_search)

                else:
                    search_grid = generate_grid(dim, n_search, boundaries,
                                                sampling)

                # Generate surrogate model GP and predict the grid values
                gp.fit()
                if optimization:
                    gp.direct(n_restarts=n_restart)
                    logger.info("Optimization: ", i, " completed")
                mean, var = gp.predict(search_grid)
                logger.info("Surrogate Model generated: ", i)

                # Compute the EI and the new theoretical best
                predicted_best_X, improvements, best_value = self.optimization_max(
                    search_grid, mean, var, epsilon, plot)
                tm.time()

                # Check if it is a duplicate
                predicted_best_X = self.next_sample_validation(
                    predicted_best_X, boundaries_array)
                if hasattr(self, "_no_evaluation"):
                    return predicted_best_X
                else:
                    predicted_best_Y = self.compute_new_sample(
                        predicted_best_X)
                    self._helper.observe(predicted_best_Y, predicted_best_X)

                    # Augment the dataset of the BO and the GP objects
                    self.augment_XY(predicted_best_X, predicted_best_Y)
                    gp.augment_XY(predicted_best_X, predicted_best_Y)

            best_index = np.argmax(self.get_Y())
            tm.time_end()
            # log_bo(self.__str__())
            self._helper.plot()

            return self.get_X()[best_index], self.get_Y()[best_index]
コード例 #3
0
    def run_vfm_example(self):
        import matplotlib.pyplot as plt
        import numpy as np
        from scipy import linalg
        from smt.utils import compute_rms_error

        from smt.problems import WaterFlowLFidelity, WaterFlow
        from smt.sampling_methods import LHS
        from smt.applications import VFM

        # Problem set up
        ndim = 8
        ntest = 500
        ncomp = 1
        ndoeLF = int(10 * ndim)
        ndoeHF = int(3)
        funLF = WaterFlowLFidelity(ndim=ndim)
        funHF = WaterFlow(ndim=ndim)
        deriv1 = True
        deriv2 = True
        LF_candidate = "QP"
        Bridge_candidate = "KRG"
        type_bridge = "Multiplicative"
        optionsLF = {}
        optionsB = {
            "theta0": [1e-2] * ndim,
            "print_prediction": False,
            "deriv": False
        }

        # Construct low/high fidelity data and validation points
        sampling = LHS(xlimits=funLF.xlimits, criterion="m")
        xLF = sampling(ndoeLF)
        yLF = funLF(xLF)
        if deriv1:
            dy_LF = np.zeros((ndoeLF, 1))
            for i in range(ndim):
                yd = funLF(xLF, kx=i)
                dy_LF = np.concatenate((dy_LF, yd), axis=1)

        sampling = LHS(xlimits=funHF.xlimits, criterion="m")
        xHF = sampling(ndoeHF)
        yHF = funHF(xHF)
        if deriv2:
            dy_HF = np.zeros((ndoeHF, 1))
            for i in range(ndim):
                yd = funHF(xHF, kx=i)
                dy_HF = np.concatenate((dy_HF, yd), axis=1)

        xtest = sampling(ntest)
        ytest = funHF(xtest)
        dytest = np.zeros((ntest, ndim))
        for i in range(ndim):
            dytest[:, i] = funHF(xtest, kx=i).T

        # Initialize the extension VFM
        M = VFM(
            type_bridge=type_bridge,
            name_model_LF=LF_candidate,
            name_model_bridge=Bridge_candidate,
            X_LF=xLF,
            y_LF=yLF,
            X_HF=xHF,
            y_HF=yHF,
            options_LF=optionsLF,
            options_bridge=optionsB,
            dy_LF=dy_LF,
            dy_HF=dy_HF,
        )

        # Prediction of the validation points
        y = M.predict_values(x=xtest)

        plt.figure()
        plt.plot(ytest, ytest, "-.")
        plt.plot(ytest, y, ".")
        plt.xlabel(r"$y$ True")
        plt.ylabel(r"$y$ prediction")
        plt.show()
コード例 #4
0
try:
    import matplotlib.pyplot as plt
    plot_status = True
except:
    plot_status = False

########### Initialization of the problem, construction of the training and validation points

ndim = 10
ndoe = int(10 * ndim)
# Define the function
fun = Sphere(ndim=ndim)

# Construction of the DOE
sampling = LHS(xlimits=fun.xlimits, criterion='m')
xt = sampling(ndoe)
# Compute the output
yt = fun(xt)
# Compute the gradient
for i in range(ndim):
    yd = fun(xt, kx=i)
    yt = np.concatenate((yt, yd), axis=1)

# Construction of the validation points
ntest = 500
sampling = LHS(xlimits=fun.xlimits)
xtest = sampling(ntest)
ytest = fun(xtest)
ydtest = np.zeros((ntest, ndim))
for i in range(ndim):
コード例 #5
0
ファイル: CD_1D_25.py プロジェクト: jguan4/PPDE_Approximation
    def generate_PINN_samples(self, app_str=""):

        Ns = [self.Nf, self.Nb, self.Nn, self.N0]
        samples_list = []

        filename = "CD_1D_25ver_{0}{1}.npz".format(Ns, app_str)
        if os.path.exists("{1}{0}".format(filename, self.path_env)):
            npzfile = np.load("{1}{0}".format(filename, self.path_env))
            if self.Nf > 0:
                self.Xf = npzfile['Xf']
                target_f = np.zeros([self.Nf, 1])
            else:
                self.Xf_tf = None
            if self.Nb > 0:
                self.Xb_d = npzfile['Xb_d']
                self.ub_d = npzfile['ub_d']

            if self.N0 > 0:
                self.X0 = npzfile['X0']
                self.u0 = npzfile['u0']
            else:
                self.X0_tf = None
                self.u0_tf = None
        else:
            np.random.seed(10)

            if app_str == "_uniform":
                xnum = 100
                epsnum = int(self.Nf / xnum)
                eps_log = np.linspace(-4, 0, epsnum)
                eps = np.power(10, eps_log)
                eps_arr = np.tile(eps, xnum)
                eps_arr = eps_arr.reshape((self.Nf, 1))
                rend = np.ones((epsnum, 1))
                xmat = np.linspace(np.zeros(rend.shape), rend, xnum + 2)
                xmat = xmat[1:-1]
                x_arr = xmat.reshape((self.Nf, 1))
                self.Xf = np.concatenate((x_arr, eps_arr), axis=1)
            else:
                sampling_f = LHS(xlimits=self.x_p_domain)
                self.Xf = sampling_f(self.Nf)
                self.Xf[:, 1] = np.power(10, self.Xf[:, 1])

            target_f = np.zeros([self.Nf, 1])
            sampling_b = LHS(xlimits=np.array([[-4, 0]]))
            x_p_b = sampling_b(self.Nb // 2)
            pb = x_p_b
            pb_10 = np.power(10, pb)
            lb = np.concatenate((np.zeros((self.Nb // 2, 1)), pb_10), axis=1)
            ulb = np.zeros((self.Nb // 2, 1))
            rb = np.concatenate((np.ones([self.Nb // 2, 1]), pb_10), axis=1)
            urb = np.zeros((self.Nb // 2, 1))

            self.Xb_d = np.concatenate((lb, rb), axis=0)
            self.ub_d = np.concatenate((ulb, urb), axis=0)

            if self.N0 > 0:
                sampling_0 = LHS(xlimits=self.x_p_domain)
                x = sampling_0(self.N0)
                x[:, 1] = np.power(10, x[:, 1])

                str_arr = app_str.split("_")
                if len(str_arr) == 3:
                    setting_str = str_arr[1]
                    setting_para_str = str_arr[2]
                    setting_para_str = setting_para_str.replace("p", ".")
                    setting = int(setting_str)
                    setting_para = float(setting_para_str)
                else:
                    setting = 0
                # setting 1
                if setting == 1:
                    x[:, 0] = x[:, 1] * x[:, 0] + 1 - x[:, 1]
                # setting 2
                elif setting == 2:
                    x[:,
                      0] = setting_para * x[:,
                                            1] * x[:,
                                                   0] + 1 - setting_para * x[:,
                                                                             1]
                    x[:, 0] = np.maximum(0.001 * np.ones(np.shape(x[:, 0])),
                                         x[:, 0])
                # setting 3
                elif setting == 3:
                    percent = setting_para / 100
                    in_corner_int = int(percent * self.N0)
                    out_corner_int = self.N0 - in_corner_int
                    N0_sel = np.random.choice(self.N0,
                                              in_corner_int,
                                              replace=False)
                    N0_sel_diff = np.array(
                        [i for i in range(self.N0) if (i not in N0_sel)])
                    N0_sel = N0_sel.reshape([in_corner_int, 1])
                    N0_sel_diff = N0_sel_diff.reshape([out_corner_int, 1])
                    x[N0_sel,
                      0] = x[N0_sel, 1] * x[N0_sel, 0] + 1 - x[N0_sel, 1]
                    x[N0_sel_diff,
                      0] = (1 - x[N0_sel_diff, 1]) * x[N0_sel_diff, 0]

                # self.X0 = np.concatenate((x,1e-4*np.ones((self.N0,1))),axis = 1)
                self.X0 = x
                self.u0 = self.u_exact(self.X0)
                np.savez(self.path_env + "{0}".format(filename),
                         Xf=self.Xf,
                         Xb_d=self.Xb_d,
                         ub_d=self.ub_d,
                         X0=self.X0,
                         u0=self.u0)
            else:
                np.savez(self.path_env + "{0}".format(filename),
                         Xf=self.Xf,
                         Xb_d=self.Xb_d,
                         ub_d=self.ub_d)

        if self.Nf > 0:
            y_tf = tf.constant((), shape=(self.Nf, 0), dtype=tf.float32)
            t_tf = tf.constant((), shape=(self.Nf, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xf[:, [0]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xf[:, [1]], dtype=tf.float32)
            target_tf = tf.constant(target_f, dtype=tf.float32)
            N = tf.constant(self.Nf, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[0], dtype=tf.float32)
            self.Xf_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': 'Res',
                'weight': weight
            }
            samples_list.append(self.Xf_dict)

        if self.Nb > 0:
            y_tf = tf.constant((), shape=(self.Nb, 0), dtype=tf.float32)
            t_tf = tf.constant((), shape=(self.Nb, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xb_d[:, [0]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xb_d[:, [1]], dtype=tf.float32)
            target_tf = tf.constant(self.ub_d, dtype=tf.float32)
            N = tf.constant(self.Nb, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[1], dtype=tf.float32)
            self.Xb_d_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': 'B_D',
                'weight': weight
            }
            samples_list.append(self.Xb_d_dict)

        if self.N0 > 0:
            #check number of samples in (1-xi,1) corner
            xis = self.X0[:, [1]]
            xs = self.X0[:, [0]]
            inds_corner = [i for i in range(len(xs)) if xs[i] >= 1 - xis[i]]
            print(
                "Number of samples in the corners is {0} out of {1}.\n".format(
                    len(inds_corner), self.N0))

            # plot samples
            # fig, ax = plt.subplots()
            # ax.plot(xs,xis, 'o')
            # plt.show()

            y_tf = tf.constant((), shape=(self.N0, 0), dtype=tf.float32)
            t_tf = tf.constant((), shape=(self.N0, 0), dtype=tf.float32)
            x_tf = tf.constant(self.X0[:, [0]], dtype=tf.float32)
            xi_tf = tf.constant(self.X0[:, [1]], dtype=tf.float32)
            target_tf = tf.constant(self.u0, dtype=tf.float32)
            N = tf.constant(self.N0, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[3], dtype=tf.float32)
            self.X0_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': "Init",
                'weight': weight
            }
            samples_list.append(self.X0_dict)
        return samples_list
コード例 #6
0
import numpy as np
import matplotlib.pyplot as plt
from itertools import combinations

from smt.sampling_methods import LHS


def ismember(a, B):
    ret = np.sum(a == B)
    return ret

    #x1      #x2       #x3


xlimits = np.array([[20, 30], [1, 10], [100, 200]])
sampling = LHS(xlimits=xlimits, criterion='c')

num = 50
x = sampling(num)

combi = list(combinations(np.arange(0, num), 2))

print(x.shape)

dist_unique = []
dist_raw = []
for a in combi:
    p1 = x[a[0]]
    p2 = x[a[1]]
    d = 0
    for i in range(len(p1)):
コード例 #7
0
def grid_search(algo,
                criterion,
                model,
                X,
                y,
                log_alpha_min,
                log_alpha_max,
                monitor,
                max_evals=50,
                tol=1e-5,
                nb_hyperparam=1,
                beta_star=None,
                random_state=42,
                samp="grid",
                log_alphas=None,
                t_max=1000,
                reverse=True):

    if log_alphas is None and samp == "grid":
        if reverse:
            log_alphas = np.linspace(log_alpha_max, log_alpha_min, max_evals)
        else:
            log_alphas = np.linspace(log_alpha_min, log_alpha_max, max_evals)
        if nb_hyperparam == 2:
            log_alphas = np.array(np.meshgrid(log_alphas,
                                              log_alphas)).T.reshape(-1, 2)

    elif samp == "random":
        rng = np.random.RandomState(random_state)
        log_alphas = rng.uniform(log_alpha_min, log_alpha_max, size=max_evals)
        if reverse:
            log_alphas = -np.sort(-log_alphas)
        else:
            log_alphas = np.sort(log_alphas)
        if nb_hyperparam == 2:
            log_alphas2 = rng.uniform(log_alpha_min,
                                      log_alpha_max,
                                      size=max_evals)
            if reverse:
                log_alphas2 = -np.sort(-log_alphas2)
            else:
                log_alphas2 = np.sort(log_alphas2)
            log_alphas = np.array(np.meshgrid(log_alphas,
                                              log_alphas2)).T.reshape(-1, 2)

    elif samp == "lhs":
        xlimits = np.array([[log_alpha_min, log_alpha_max]])
        sampling = LHS(xlimits=xlimits)
        num = max_evals
        log_alphas = sampling(num)
        log_alphas[log_alphas < log_alpha_min] = log_alpha_min
        log_alphas[log_alphas > log_alpha_max] = log_alpha_max
    min_g_func = np.inf
    log_alpha_opt = log_alphas[0]

    if nb_hyperparam == 2:
        n_try = max_evals**2
    else:
        n_try = log_alphas.shape[0]
    for i in range(n_try):
        try:
            log_alpha = log_alphas[i, :]
        except Exception:
            log_alpha = log_alphas[i]
        if samp == "lhs":
            log_alpha = log_alpha[0]
        g_func, grad_lambda = criterion.get_val_grad(model,
                                                     X,
                                                     y,
                                                     log_alpha,
                                                     algo.get_beta_jac_v,
                                                     tol=tol,
                                                     compute_jac=False,
                                                     monitor=monitor)

        if g_func < min_g_func:
            min_g_func = g_func
            log_alpha_opt = log_alpha

        if monitor.times[-1] > t_max:
            break
    return log_alpha_opt, min_g_func
コード例 #8
0
        for i in range(n_models):
            file.write('%i \n' % models_sort[i][2])

    print('DNN Regression complete!')

### LATIN HYPERCUBE SAMPLING ###
if (opt == 3):

    from smt.sampling_methods import LHS

    lambd = np.array([10**-(lambd_min), 10**-(lambd_max)])
    num_layers = np.array([n_layers_min, n_layers_max])
    num_hidden_units = np.array([n_hidden_min, n_hidden_max])

    xlimits = np.array([lambd, num_layers, num_hidden_units])
    sampling = LHS(xlimits=xlimits)

    XXX = sampling(n_models)

    for i in range(n_models):
        XXX[i, 1] = int(XXX[i, 1])
        XXX[i, 2] = int(XXX[i, 2])
        layers_dims = np.zeros([int(XXX[0, 1])])
        layers_dims[0] = n_var  # Input Layer size
        layers_dims[layers_dims.shape[0] - 1] = n_var  # Output layer size
        for j in range(1, layers_dims.shape[0] - 1):
            layers_dims[j] = int(XXX[i, 2])  # Hidden layers

        system_identification(X_train, Y_train, layers_dims, lambd,
                              learning_rate, num_iter, h, i,
                              nt_s)  # Regression step via DNN
コード例 #9
0
    def optimize(self, fun):
        """
        Optimizes fun

        Parameters
        ----------

        fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]

        Returns
        -------

        [nx, 1]: x optimum
        [1, 1]: y optimum
        int: index of optimum in data arrays 
        [ndoe + n_iter, nx]: coord-x data
        [ndoe + n_iter, 1]: coord-y data
        [ndoe, nx]: coord-x initial doe
        [ndoe, 1]: coord-y initial doe
        """
        # Set the bounds of the optimization problem
        xlimits = self.options["xlimits"]

        # Build initial DOE
        self._sampling = LHS(xlimits=xlimits, criterion="ese")
        self._evaluator = self.options["evaluator"]

        xdoe = self.options["xdoe"]
        if xdoe is None:
            self.log("Build initial DOE with LHS")
            n_doe = self.options["n_doe"]
            x_doe = self._sampling(n_doe)
        else:
            self.log("Initial DOE given")
            x_doe = np.atleast_2d(xdoe)

        ydoe = self.options["ydoe"]
        if ydoe is None:
            y_doe = self._evaluator.run(fun, x_doe)
        else:  # to save time if y_doe is already given to EGO
            y_doe = ydoe

        # to save the initial doe
        x_data = x_doe
        y_data = y_doe

        self.gpr = KRG(print_global=False)

        n_iter = self.options["n_iter"]
        n_parallel = self.options["n_parallel"]

        for k in range(n_iter):
            # Virtual enrichement loop
            for p in range(n_parallel):
                x_et_k, success = self._find_points(x_data, y_data)
                if not success:
                    self.log(
                        "Internal optimization failed at EGO iter = {}.{}".
                        format(k, p))
                    break
                elif success:
                    self.log(
                        "Internal optimization succeeded at EGO iter = {}.{}".
                        format(k, p))
                # Set temporaly the y_data to the one predicted by the kringin metamodel
                y_et_k = self.set_virtual_point(np.atleast_2d(x_et_k), y_data)

                # Update y_data with predicted value
                y_data = np.atleast_2d(np.append(y_data, y_et_k)).T
                x_data = np.atleast_2d(np.append(x_data, x_et_k, axis=0))

            # Compute the real values of y_data
            x_to_compute = np.atleast_2d(x_data[-n_parallel:])
            y = self._evaluator.run(fun, x_to_compute)
            y_data[-n_parallel:] = y

        # Find the optimal point
        ind_best = np.argmin(y_data)
        x_opt = x_data[ind_best]
        y_opt = y_data[ind_best]

        return x_opt, y_opt, ind_best, x_data, y_data, x_doe, y_doe
コード例 #10
0
try:
    import matplotlib.pyplot as plt

    plot_status = True
except:
    plot_status = False

########### Initialization of the problem, construction of the training and validation points

ndim = 10
ndoe = int(10 * ndim)
# Define the function
fun = Sphere(ndim=ndim)

# Construction of the DOE
sampling = LHS(xlimits=fun.xlimits, criterion="m")
xt = sampling(ndoe)
# Compute the output
yt = fun(xt)
# Compute the gradient
for i in range(ndim):
    yd = fun(xt, kx=i)
    yt = np.concatenate((yt, yd), axis=1)

# Construction of the validation points
ntest = 500
sampling = LHS(xlimits=fun.xlimits)
xtest = sampling(ntest)
ytest = fun(xtest)
ydtest = np.zeros((ntest, ndim))
for i in range(ndim):
コード例 #11
0
    def generate_PINN_samples(self):
        Ns = [self.Nf, self.Nb, self.Nn, self.N0]
        samples_list = []

        filename = "{1}_{0}.npz".format(Ns, self.name)
        if os.path.exists("{1}{0}".format(filename, self.path_env)):
            npzfile = np.load("{1}{0}".format(filename, self.path_env))
            if self.Nf > 0:
                self.Xf = npzfile['Xf']
                target_f = np.zeros([self.Nf, 1])
            else:
                self.Xf_tf = None
            if self.Nb > 0:
                self.Xb_d = npzfile['Xb_d']
                self.ub_d = npzfile['ub_d']
            if self.Nn > 0:
                self.Xb_n = npzfile['Xb_n']
                self.ub_n = npzfile['ub_n']
            if self.N0 > 0:
                self.X0 = npzfile['X0']
                self.u0 = npzfile['u0']
            else:
                self.X0_tf = None
                self.u0_tf = None
        else:
            np.random.seed(10)

            sampling_f = LHS(xlimits=self.x_p_domain)
            self.Xf = sampling_f(self.Nf)
            self.Xf[:, 2] = np.power(10, self.Xf[:, 2])

            sampling_b = LHS(
                xlimits=np.array([[-1, 1], [-4, 0], [0, 1], [0, 1]]))
            x_p_b = sampling_b(self.Nb // 3)
            x_p_b[:, 1] = np.power(10, x_p_b[:, 1])
            xb = x_p_b[:, [0]]
            pb = x_p_b[:, 1::]
            xi1 = pb[:, [1]]
            xi2 = pb[:, [2]]
            xi_tot = xi1 + xi2
            lb = np.concatenate((-np.ones(xb.shape), xb, pb), axis=1)
            ulb = xi1 * (1 - ((1 + xb) / 2))**3 / xi_tot
            rb = np.concatenate((np.ones(xb.shape), xb, pb), axis=1)
            urb = (xi1 * (1 - ((1 + xb) / 2))**2 + xi2) / xi_tot
            tb = np.concatenate((xb, -np.ones(xb.shape), pb), axis=1)
            utb = xi1 / xi_tot
            self.Xb_d = np.concatenate((lb, rb, tb), axis=0)
            self.ub_d = np.concatenate((ulb, urb, utb), axis=0)

            x_p_n = sampling_b(self.Nn)
            x_p_n[:, 1] = np.power(10, x_p_n[:, 1])
            xn = x_p_n[:, [0]]
            pn = x_p_n[:, 1::]
            self.Xb_n = np.concatenate((xn, np.ones(xn.shape), pn), axis=1)
            self.ub_n = np.zeros(xn.shape)

            if self.N0 > 0:
                self.generate_para()
                for i in range(self.mu_mat_train.shape[1]):
                    p = self.mu_mat_train[:, i]
                    self.generate_one_sol(p)
                N0_tot = self.N0_samples.shape[0]
                N0_sel = np.random.choice(N0_tot, self.N0)
                self.X0 = self.N0_samples[N0_sel, :]
                self.X0_tf = tf.constant(self.X0, dtype=tf.float32)
                self.u0 = self.u0_samples[N0_sel, :]
                self.u0_tf = tf.constant(self.u0, dtype=tf.float32)
                np.savez(self.path_env + "{0}".format(filename),
                         Xf=self.Xf,
                         Xb_d=self.Xb_d,
                         ub_d=self.ub_d,
                         Xb_n=self.Xb_n,
                         ub_n=self.ub_n,
                         X0=self.X0,
                         u0=self.u0)
            else:
                np.savez(self.path_env + "{0}".format(filename),
                         Xf=self.Xf,
                         Xb_d=self.Xb_d,
                         ub_d=self.ub_d,
                         Xb_n=self.Xb_n,
                         ub_n=self.ub_n)

        x_tf = tf.constant(self.Xf[:, [0]], dtype=tf.float32)
        y_tf = tf.constant(self.Xf[:, [1]], dtype=tf.float32)
        t_tf = tf.constant((), shape=(self.Nf, 0), dtype=tf.float32)
        xi_tf = tf.constant(self.Xf[:, 2::], dtype=tf.float32)
        target_tf = tf.constant(target_f, dtype=tf.float32)
        N = tf.constant(self.Nf, dtype=tf.float32)
        weight = tf.constant(self.type_weighting[0], dtype=tf.float32)
        self.Xf_dict = {
            'x_tf': x_tf,
            'y_tf': y_tf,
            't_tf': t_tf,
            'xi_tf': xi_tf,
            'target': target_tf,
            'N': N,
            'type': 'Res',
            'weight': weight
        }
        samples_list.append(self.Xf_dict)

        x_tf = tf.constant(self.Xb_d[:, [0]], dtype=tf.float32)
        y_tf = tf.constant(self.Xb_d[:, [1]], dtype=tf.float32)
        t_tf = tf.constant((), shape=(self.Nb, 0), dtype=tf.float32)
        xi_tf = tf.constant(self.Xb_d[:, 2::], dtype=tf.float32)
        target_tf = tf.constant(self.ub_d, dtype=tf.float32)
        N = tf.constant(self.Nb, dtype=tf.float32)
        weight = tf.constant(self.type_weighting[1], dtype=tf.float32)
        self.Xb_d_dict = {
            'x_tf': x_tf,
            'y_tf': y_tf,
            't_tf': t_tf,
            'xi_tf': xi_tf,
            'target': target_tf,
            'N': N,
            'type': 'B_D',
            'weight': weight
        }
        samples_list.append(self.Xb_d_dict)

        x_tf = tf.constant(self.Xb_n[:, [0]], dtype=tf.float32)
        y_tf = tf.constant(self.Xb_n[:, [1]], dtype=tf.float32)
        t_tf = tf.constant((), shape=(self.Nn, 0), dtype=tf.float32)
        xi_tf = tf.constant(self.Xb_n[:, 2::], dtype=tf.float32)
        target_tf = tf.constant(self.ub_n, dtype=tf.float32)
        N = tf.constant(self.Nn, dtype=tf.float32)
        weight = tf.constant(self.type_weighting[2], dtype=tf.float32)
        self.Xb_n_dict = {
            'x_tf': x_tf,
            'y_tf': y_tf,
            't_tf': t_tf,
            'xi_tf': xi_tf,
            'target': target_tf,
            'N': N,
            'type': 'B_N',
            'weight': weight
        }
        samples_list.append(self.Xb_n_dict)

        if self.N0 > 0:
            x_tf = tf.constant(self.X0[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.X0[:, [1]], dtype=tf.float32)
            t_tf = tf.constant((), shape=(self.N0, 0), dtype=tf.float32)
            xi_tf = tf.constant(self.X0[:, 2::], dtype=tf.float32)
            target_tf = tf.constant(self.u0, dtype=tf.float32)
            N = tf.constant(self.N0, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[3], dtype=tf.float32)
            self.X0_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': "Init",
                'weight': weight
            }
            samples_list.append(self.X0_dict)
        return samples_list
コード例 #12
0
ファイル: CD_2D_10.py プロジェクト: jguan4/PPDE_Approximation
    def generate_PINN_samples(self, app_str=""):

        Ns = [self.Nf, self.Nb, self.Nn, self.N0]
        samples_list = []

        filename = "CD_2D_ver10_{0}{1}.npz".format(Ns, app_str)
        if os.path.exists("{1}{0}".format(filename, self.path_env)):
            npzfile = np.load("{1}{0}".format(filename, self.path_env))
            if self.Nf > 0:
                self.Xf = npzfile['Xf']
                target_f = np.zeros([self.Nf, 1])
            else:
                self.Xf_tf = None
            if self.Nb > 0:
                self.Xb_d = npzfile['Xb_d']
                self.ub_d = npzfile['ub_d']
            if self.Nn > 0:
                self.Xb_n = npzfile['Xb_n']
                self.ub_n = npzfile['ub_n']
            if self.N0 > 0:
                self.X0 = npzfile['X0']
                self.u0 = npzfile['u0']
            else:
                self.X0_tf = None
                self.u0_tf = None
        else:
            np.random.seed(10)

            # sampling_f = LHS(xlimits = np.array([[-1, 1], [-1, 1], [-2, 0]]))
            sampling_f = LHS(xlimits=self.x_p_domain)
            self.Xf = sampling_f(self.Nf)
            self.Xf[:, 2] = np.power(10, self.Xf[:, 2])
            self.Xf[:, 0] = (1 - np.absolute(self.Xf[:, 0])) / self.Xf[:, 2]
            target_f = np.zeros([self.Nf, 1])

            sampling_b = LHS(xlimits=np.array([[-1, 1], [-4, 0]]))
            Nb_side = self.Nb // 3
            x_p_b = sampling_b(Nb_side)
            pb = x_p_b[:, [1]]
            pb_10 = np.power(10, pb)
            xyb = x_p_b[:, [0]]
            xietab = (1 - np.absolute(x_p_b[:, [0]])) / pb_10
            lb = np.concatenate((0 / pb_10, xyb, pb_10), axis=1)
            ulb = (1 - ((1 + xyb) / 2))**2
            rb = np.concatenate((0 / pb_10, xyb, pb_10), axis=1)
            urb = (1 - ((1 + xyb) / 2))**2
            db = np.concatenate((xietab, -np.ones((Nb_side, 1)), pb_10),
                                axis=1)
            udb = np.ones((Nb_side, 1))

            self.Xb_d = np.concatenate((lb, rb, db), axis=0)
            self.ub_d = np.concatenate((ulb, urb, udb), axis=0)

            sampling_n = LHS(xlimits=np.array([[-4, 0]]))
            x_p_n = sampling_n(self.Nn)
            pn = x_p_n
            pn_10 = np.power(10, pn)
            xietan = (1 - np.absolute(x_p_n)) / pn_10
            tb = np.concatenate((xietan, np.ones((Nb_side, 1)), pn_10), axis=1)
            utb = np.zeros((self.Nn, 1))

            self.Xb_n = tb
            self.ub_n = utb

            if self.N0 > 0:
                sampling_0 = LHS(xlimits=self.x_p_domain)
                x = sampling_0(self.N0)
                x[:, 1] = np.power(10, x[:, 1])

                str_arr = app_str.split("_")
                if len(str_arr) == 3:
                    setting_str = str_arr[1]
                    setting_para_str = str_arr[2]
                    setting_para_str = setting_para_str.replace("p", ".")
                    setting = int(setting_str)
                    setting_para = float(setting_para_str)
                else:
                    setting = 0
                # setting 1
                if setting == 1:
                    x[:, 0] = x[:, 1] * x[:, 0] + 1 - x[:, 1]
                # setting 2
                elif setting == 2:
                    x[:,
                      0] = setting_para * x[:,
                                            1] * x[:,
                                                   0] + 1 - setting_para * x[:,
                                                                             1]
                    x[:, 0] = np.maximum(0.001 * np.ones(np.shape(x[:, 0])),
                                         x[:, 0])
                # setting 3
                elif setting == 3:
                    percent = setting_para / 100
                    in_corner_int = int(percent * self.N0)
                    out_corner_int = self.N0 - in_corner_int
                    N0_sel = np.random.choice(self.N0,
                                              in_corner_int,
                                              replace=False)
                    N0_sel_diff = np.array(
                        [i for i in range(self.N0) if (i not in N0_sel)])
                    N0_sel = N0_sel.reshape([in_corner_int, 1])
                    N0_sel_diff = N0_sel_diff.reshape([out_corner_int, 1])
                    x[N0_sel,
                      0] = x[N0_sel, 1] * x[N0_sel, 0] + 1 - x[N0_sel, 1]
                    x[N0_sel_diff,
                      0] = (1 - x[N0_sel_diff, 1]) * x[N0_sel_diff, 0]

                # self.X0 = np.concatenate((x,1e-4*np.ones((self.N0,1))),axis = 1)
                self.X0 = x
                self.u0 = self.u_exact(self.X0)
                np.savez(self.path_env + "{0}".format(filename),
                         Xf=self.Xf,
                         Xb_d=self.Xb_d,
                         ub_d=self.ub_d,
                         Xb_n=self.Xb_n,
                         ub_n=self.ub_n,
                         X0=self.X0,
                         u0=self.u0)
            else:
                np.savez(self.path_env + "{0}".format(filename),
                         Xf=self.Xf,
                         Xb_d=self.Xb_d,
                         ub_d=self.ub_d,
                         Xb_n=self.Xb_n,
                         ub_n=self.ub_n)

        if self.Nf > 0:
            t_tf = tf.constant((), shape=(self.Nf, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xf[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.Xf[:, [1]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xf[:, [2]], dtype=tf.float32)
            target_tf = tf.constant(target_f, dtype=tf.float32)
            N = tf.constant(self.Nf, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[0], dtype=tf.float32)
            self.Xf_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': 'Res',
                'weight': weight
            }
            samples_list.append(self.Xf_dict)

        if self.Nb > 0:
            t_tf = tf.constant((), shape=(self.Nb, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xb_d[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.Xb_d[:, [1]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xb_d[:, [2]], dtype=tf.float32)
            target_tf = tf.constant(self.ub_d, dtype=tf.float32)
            N = tf.constant(self.Nb, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[1], dtype=tf.float32)
            self.Xb_d_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': 'B_D',
                'weight': weight
            }
            samples_list.append(self.Xb_d_dict)

        if self.Nn > 0:
            t_tf = tf.constant((), shape=(self.Nn, 0), dtype=tf.float32)
            x_tf = tf.constant(self.Xb_n[:, [0]], dtype=tf.float32)
            y_tf = tf.constant(self.Xb_n[:, [1]], dtype=tf.float32)
            xi_tf = tf.constant(self.Xb_n[:, [2]], dtype=tf.float32)
            target_tf = tf.constant(self.ub_n, dtype=tf.float32)
            N = tf.constant(self.Nn, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[1], dtype=tf.float32)
            self.Xb_n_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': 'B_N',
                'weight': weight
            }
            samples_list.append(self.Xb_n_dict)

        if self.N0 > 0:
            #check number of samples in (1-xi,1) corner
            xis = self.X0[:, [2]]
            xs = self.X0[:, [0]]
            inds_corner = [i for i in range(len(xs)) if xs[i] >= 1 - xis[i]]
            print(
                "Number of samples in the corners is {0} out of {1}.\n".format(
                    len(inds_corner), self.N0))

            # plot samples
            # fig, ax = plt.subplots()
            # ax.plot(xs,xis, 'o')
            # plt.show()

            t_tf = tf.constant((), shape=(self.N0, 0), dtype=tf.float32)
            y_tf = tf.constant(self.X0[:, [1]], dtype=tf.float32)
            x_tf = tf.constant(self.X0[:, [0]], dtype=tf.float32)
            xi_tf = tf.constant(self.X0[:, [2]], dtype=tf.float32)
            target_tf = tf.constant(self.u0, dtype=tf.float32)
            N = tf.constant(self.N0, dtype=tf.float32)
            weight = tf.constant(self.type_weighting[3], dtype=tf.float32)
            self.X0_dict = {
                'x_tf': x_tf,
                'y_tf': y_tf,
                't_tf': t_tf,
                'xi_tf': xi_tf,
                'target': target_tf,
                'N': N,
                'type': "Init",
                'weight': weight
            }
            samples_list.append(self.X0_dict)
        return samples_list
コード例 #13
0
def evolution(f,bounds,p,it,cull_percen,mut_percen):
    
    '''
    INPUTS: 
    f           : function to be optimized
    bounds      : bounds of function to be optimized 
                  in form [[xl,xu],[xl,xu],[xl,xu]]
    p           : population size 
    it          : number of generations 
    cull_percen : percentage of particles to be culled after each generation
    mut_percen  : percentage chance of a mutation to occur 

    OUTPUTS: 
    returns the coordinates of the best individual
    '''

    d=len(bounds)
    '''ORIGINAL SAMPLE'''
    sampling=LHS(xlimits=bounds) #LHS Sample
    i_pos=sampling(p)
    '''EVALUATING FITNESSES'''
    i_val=np.zeros((len(i_pos),1))
    for i in range(len(i_pos)):
        i_val[i,:]=f(i_pos[i,:])
    i_pos=np.concatenate((i_pos,i_val),axis=1)
    i_best=i_pos[np.argmin(i_pos[:,-1])]
    iteration=0
    while iteration<it: #PARAMETER HERE (iterations)
        '''TOURNAMENT SELECTION'''
        i_new=np.zeros((int(p*(cull_percen)),d+1)) # PARAMETER HERE (percentage to be kept)
        new_count=0
        while new_count<len(i_new):
            rnd.shuffle(i_pos)
            t_size=rnd.randint(1,5) # PARAMETER HERE (tournament size)
            t=i_pos[:t_size,:]
            t_best=t[np.argmin(t[:,-1])]
            i_new[new_count,:]=t_best[:]
            new_count+=1
        i_pos=copy.deepcopy(i_new)
        '''COMPLETING WITH RANDOM CANDIDATES'''
        new_psize=p-len(i_pos)
        sampling=LHS(xlimits=bounds)
        i_new=sampling(new_psize)
        i_val_new=np.zeros((len(i_new),1))
        for i in range(len(i_new)):
            i_val_new[i,:]=f(i_new[i,:])
        i_new=np.concatenate((i_new,i_val_new),axis=1)
        i_pos=np.concatenate((i_new,i_pos),axis=0)
        best_index=np.argmin(i_pos[:,-1])
        i_best=i_pos[best_index]
        i_best_val=i_pos[best_index,-1]
        print(i_best_val,end='\r')
        '''CROSSOVER HERE'''
        rnd.shuffle(i_pos)
        cross_index=np.linspace(0,p-2,(p/2))
        for i in cross_index: # SINGLE CROSSOVER
            i=int(i)
            k=rnd.randint(0,d)
            i_pos[i+1,k:]=i_pos[i,k:]
            i_pos[i+1,:k]=i_pos[i+1,:k]
            i_pos[i,:k]=i_pos[i,:k]
            i_pos[i,k:]=i_pos[i+1,k:]
        '''MUTATION CODE HERE'''
        for i in range(len(i_pos)):
            for j in range(d):
                prob=rnd.uniform()
                if prob<mut_percen:
                    i_pos[i,j]=rnd.uniform(bounds[j,0],bounds[j,1])
        i_pos=i_pos[:,:-1]
        i_val=np.zeros((len(i_pos),1))
        for i in range(len(i_pos)):
            i_val[i,:]=f(i_pos[i,:])
        i_pos=np.concatenate((i_pos,i_val),axis=1)
        iteration+=1
    return i_best
コード例 #14
0
    def _setup_optimizer(self, fun):
        """
        Instanciate internal surrogate used for optimization
        and setup function evaluator wrt options

        Parameters
        ----------

        fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]

        Returns
        -------

        ndarray: initial coord-x doe
        ndarray: initial coord-y doe = fun(xdoe)

        """
        # Set the model
        self.gpr = self.options["surrogate"]
        self.xlimits = self.options["xlimits"]

        # Handle mixed integer optimization
        xtypes = self.options["xtypes"]
        if self.options["categorical_kernel"] is not None:
            work_in_folded_space = True
        else:
            work_in_folded_space = False
        if xtypes:
            self.categorical_kernel = self.options["categorical_kernel"]
            self.mixint = MixedIntegerContext(
                xtypes,
                self.xlimits,
                work_in_folded_space=work_in_folded_space,
                categorical_kernel=self.options["categorical_kernel"],
            )

            self.gpr = self.mixint.build_surrogate_model(self.gpr)
            self._sampling = self.mixint.build_sampling_method(
                LHS,
                criterion="ese",
                random_state=self.options["random_state"],
                output_in_folded_space=work_in_folded_space,
            )
        else:
            self.mixint = None
            self._sampling = LHS(
                xlimits=self.xlimits,
                criterion="ese",
                random_state=self.options["random_state"],
            )

        # Build DOE
        self._evaluator = self.options["evaluator"]
        xdoe = self.options["xdoe"]
        if xdoe is None:
            self.log("Build initial DOE with LHS")
            n_doe = self.options["n_doe"]
            x_doe = self._sampling(n_doe)
        else:
            self.log("Initial DOE given")
            x_doe = np.atleast_2d(xdoe)
            if self.mixint and self.options["categorical_kernel"] is None:
                x_doe = self.mixint.unfold_with_enum_mask(x_doe)

        ydoe = self.options["ydoe"]
        if ydoe is None:
            y_doe = self._evaluator.run(fun, x_doe)
        else:  # to save time if y_doe is already given to EGO
            y_doe = ydoe

        return x_doe, y_doe
コード例 #15
0
# =============================================================================
# https://smt.readthedocs.io/en/latest/_src_docs/sampling_methods/lhs.html
# https://github.com/tisimst/pyDOE/blob/master/pyDOE/doe_lhs.py
# https://pypi.org/project/pyDOE/
# https://www.statisticshowto.datasciencecentral.com/latin-hypercube-sampling/
# =============================================================================
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import LHS  #surrogate modeling toolbox

# number of customers considered in the system
NUM_CUSTOMERS = 50

#A 3-factor design
maxmin_range = np.array([[12.0, 18.0], [4.5, 6.0], [25.0, 34.0]])
sampling = LHS(xlimits=maxmin_range)

#Generating count of samples/runs to be taken
runs_count = NUM_CUSTOMERS
runs = sampling(runs_count)

print(runs.shape)

BV_harv = runs[:, 0]  #Blood volume harvesting
CC_harv = runs[:, 1]  #coagulant concentration
Third = runs[:, 2]  #3rd predictor

# =============================================================================
#plt.plot(BV_harv, CC_harv, "o" , markersize=5, color="red")
#plt.xlabel("BV_harv")
#plt.ylabel("CC_harv")
コード例 #16
0
    def optimize(self, fun):
        """
        Optimizes fun

        Parameters
        ----------

        fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]

        Returns
        -------

        [nx, 1]: x optimum
        [1, 1]: y optimum
        int: index of optimum in data arrays 
        [ndoe + n_iter, nx]: coord-x data
        [ndoe + n_iter, 1]: coord-y data
        [ndoe, nx]: coord-x initial doe
        [ndoe, 1]: coord-y initial doe
        """
        xlimits = self.options["xlimits"]
        sampling = LHS(xlimits=xlimits, criterion="ese")

        doe = self.options["xdoe"]
        if doe is None:
            self.log("Build initial DOE with LHS")
            n_doe = self.options["n_doe"]
            x_doe = sampling(n_doe)
        else:
            self.log("Initial DOE given")
            x_doe = np.atleast_2d(doe)

        y_doe = fun(x_doe)

        # to save the initial doe
        x_data = x_doe
        y_data = y_doe

        self.gpr = KRG(print_global=False)

        bounds = xlimits

        criterion = self.options["criterion"]
        n_iter = self.options["n_iter"]
        n_start = self.options["n_start"]
        n_max_optim = self.options["n_max_optim"]

        for k in range(n_iter):

            self.gpr.set_training_values(x_data, y_data)
            self.gpr.train()

            if criterion == "EI":
                self.obj_k = lambda x: -self.EI(np.atleast_2d(x), y_data)
            elif criterion == "SBO":
                self.obj_k = lambda x: self.SBO(np.atleast_2d(x))
            elif criterion == "UCB":
                self.obj_k = lambda x: self.UCB(np.atleast_2d(x))

            success = False
            n_optim = 1  # in order to have some success optimizations with SLSQP
            while not success and n_optim <= n_max_optim:
                opt_all = []
                x_start = sampling(n_start)
                for ii in range(n_start):
                    opt_all.append(
                        minimize(
                            self.obj_k,
                            x_start[ii, :],
                            method="SLSQP",
                            bounds=bounds,
                            options={"maxiter": 200},
                        ))

                opt_all = np.asarray(opt_all)

                opt_success = opt_all[[opt_i["success"] for opt_i in opt_all]]
                obj_success = np.array([opt_i["fun"] for opt_i in opt_success])
                success = obj_success.size != 0
                if not success:
                    self.log("New start point for the internal optimization")
                    n_optim += 1

            if n_optim >= n_max_optim:
                self.log(
                    "Internal optimization failed at EGO iter = {}".format(k))
                break
            elif success:
                self.log(
                    "Internal optimization succeeded at EGO iter = {}".format(
                        k))

            ind_min = np.argmin(obj_success)
            opt = opt_success[ind_min]
            x_et_k = np.atleast_2d(opt["x"])
            y_et_k = fun(x_et_k)

            y_data = np.atleast_2d(np.append(y_data, y_et_k)).T
            x_data = np.atleast_2d(np.append(x_data, x_et_k, axis=0))

        ind_best = np.argmin(y_data)
        x_opt = x_data[ind_best]
        y_opt = y_data[ind_best]

        return x_opt, y_opt, ind_best, x_data, y_data, x_doe, y_doe
コード例 #17
0
loaded,filenames = load_files(path)

# %% Load only as reduced button cell data ====================================
# this loads only the as-reduced anode file data [4]
# onefile = loaded[filenames[4][50:-4]]
filenum = 0
onefile = loaded[filenames[filenum][50:-4]]

# filenames[1] = 6.012 # could be Button cell, tested for 493 h?
# filenames[2] = 6.931
# filenames[3] = 8.257
print(filenames[filenum][50:-4])
# %% LHS Sampling =============================================================
tic = time.time()
matrixlimits = np.array([ [0, onefile.shape[0]-1], [0, onefile.shape[1]-1], [0, onefile.shape[2]-1]])
sampling = LHS(xlimits=matrixlimits)

samps = sampling(int(BIGN)) # control number of points generated
samps = np.round(samps,decimals=0).astype(int)
print('\n')
print('Cube sampled: ', str(time.time()-tic)[0:5], ' seconds elapsed.')
print('Sample size (points, dimensions):', samps.shape)
print('\n')



# %% Create search algorithm for TPBs'
def quicksearch(boundarycubes, lhspoint, filematrix, searchdirections):  
  
    """
    searchdirections = [(-1, 1, 1),
コード例 #18
0
    def _optimize_hyperparam(self, D):
        """
        This function evaluates the Gaussian Process model at x.

        Parameters
        ----------
        D: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
            - The componentwise cross-spatial-correlation-distance between the
              vectors in X.

        Returns
        -------
        best_optimal_rlf_value: real
            - The value of the reduced likelihood function associated to the
              best autocorrelation parameters theta.
        best_optimal_par: dict()
            - A dictionary containing the requested Gaussian Process model
              parameters.
        best_optimal_theta: list(n_comp) or list(dim)
            - The best hyperparameters found by the optimization.
        """
        # reinitialize optimization best values
        self.best_iteration_fail = None
        self._thetaMemory = None
        # Initialize the hyperparameter-optimization
        if self.name in ["MGP"]:

            def minus_reduced_likelihood_function(theta):
                res = -self._reduced_likelihood_function(theta)[0]
                return res

            def grad_minus_reduced_likelihood_function(theta):
                grad = -self._reduced_likelihood_gradient(theta)[0]
                return grad

        else:

            def minus_reduced_likelihood_function(log10t):
                return -self._reduced_likelihood_function(
                    theta=10.0**log10t)[0]

            def grad_minus_reduced_likelihood_function(log10t):
                log10t_2d = np.atleast_2d(log10t).T
                res = (-np.log(10.0) * (10.0**log10t_2d) *
                       (self._reduced_likelihood_gradient(10.0**log10t_2d)[0]))
                return res

        limit, _rhobeg = 10 * len(self.options["theta0"]), 0.5
        exit_function = False
        if "KPLSK" in self.name:
            n_iter = 1
        else:
            n_iter = 0

        for ii in range(n_iter, -1, -1):
            (
                best_optimal_theta,
                best_optimal_rlf_value,
                best_optimal_par,
                constraints,
            ) = (
                [],
                [],
                [],
                [],
            )

            bounds_hyp = []

            self.theta0 = deepcopy(self.options["theta0"])
            for i in range(len(self.theta0)):
                # In practice, in 1D and for X in [0,1], theta^{-2} in [1e-2,infty),
                # i.e. theta in (0,1e1], is a good choice to avoid overfitting.
                # By standardising X in R, X_norm = (X-X_mean)/X_std, then
                # X_norm in [-1,1] if considering one std intervals. This leads
                # to theta in (0,2e1]
                theta_bounds = self.options["theta_bounds"]
                if self.theta0[i] < theta_bounds[0] or self.theta0[
                        i] > theta_bounds[1]:
                    self.theta0[i] = np.random.rand()
                    self.theta0[i] = (self.theta0[i] *
                                      (theta_bounds[1] - theta_bounds[0]) +
                                      theta_bounds[0])
                    print(
                        "Warning: theta0 is out the feasible bounds. A random initialisation is used instead."
                    )

                if self.name in ["MGP"]:  # to be discussed with R. Priem
                    constraints.append(
                        lambda theta, i=i: theta[i] + theta_bounds[1])
                    constraints.append(
                        lambda theta, i=i: theta_bounds[1] - theta[i])
                    bounds_hyp.append((-theta_bounds[1], theta_bounds[1]))
                else:
                    log10t_bounds = np.log10(theta_bounds)
                    constraints.append(
                        lambda log10t, i=i: log10t[i] - log10t_bounds[0])
                    constraints.append(
                        lambda log10t, i=i: log10t_bounds[1] - log10t[i])
                    bounds_hyp.append(log10t_bounds)

            if self.name in ["MGP"]:
                theta0_rand = m_norm.rvs(
                    self.options["prior"]["mean"] * len(self.theta0),
                    self.options["prior"]["var"],
                    1,
                )
                theta0 = self.theta0
            else:
                theta0_rand = np.random.rand(len(self.theta0))
                theta0_rand = (theta0_rand *
                               (log10t_bounds[1] - log10t_bounds[0]) +
                               log10t_bounds[0])
                theta0 = np.log10(self.theta0)
            self.D = self._componentwise_distance(D, opt=ii)

            # Initialization
            k, incr, stop, best_optimal_rlf_value, max_retry = 0, 0, 1, -1e20, 10
            while k < stop:
                # Use specified starting point as first guess
                self.noise0 = np.array(self.options["noise0"])
                noise_bounds = self.options["noise_bounds"]
                if self.options[
                        "eval_noise"] and not self.options["use_het_noise"]:
                    self.noise0[self.noise0 == 0.0] = noise_bounds[0]
                    for i in range(len(self.noise0)):
                        if (self.noise0[i] < noise_bounds[0]
                                or self.noise0[i] > noise_bounds[1]):
                            self.noise0[i] = noise_bounds[0]
                            print(
                                "Warning: noise0 is out the feasible bounds. The lowest possible value is used instead."
                            )

                    theta0 = np.concatenate(
                        [theta0,
                         np.log10(np.array([self.noise0]).flatten())])
                    theta0_rand = np.concatenate([
                        theta0_rand,
                        np.log10(np.array([self.noise0]).flatten()),
                    ])

                    for i in range(len(self.noise0)):
                        noise_bounds = np.log10(noise_bounds)
                        constraints.append(lambda log10t: log10t[i + len(
                            self.theta0)] - noise_bounds[0])
                        constraints.append(lambda log10t: noise_bounds[1] -
                                           log10t[i + len(self.theta0)])
                        bounds_hyp.append(noise_bounds)
                theta_limits = np.repeat(np.log10([theta_bounds]),
                                         repeats=len(theta0),
                                         axis=0)
                sampling = LHS(xlimits=theta_limits,
                               criterion="maximin",
                               random_state=41)
                theta_lhs_loops = sampling(self.options["n_start"])
                theta_all_loops = np.vstack(
                    (theta0, theta0_rand, theta_lhs_loops))
                optimal_theta_res = {"fun": float("inf")}
                try:
                    if self.options["hyper_opt"] == "Cobyla":
                        for theta0_loop in theta_all_loops:
                            optimal_theta_res_loop = optimize.minimize(
                                minus_reduced_likelihood_function,
                                theta0_loop,
                                constraints=[{
                                    "fun": con,
                                    "type": "ineq"
                                } for con in constraints],
                                method="COBYLA",
                                options={
                                    "rhobeg": _rhobeg,
                                    "tol": 1e-4,
                                    "maxiter": limit,
                                },
                            )
                            if optimal_theta_res_loop[
                                    "fun"] < optimal_theta_res["fun"]:
                                optimal_theta_res = optimal_theta_res_loop

                    elif self.options["hyper_opt"] == "TNC":
                        theta_all_loops = 10**theta_all_loops
                        for theta0_loop in theta_all_loops:
                            optimal_theta_res_loop = optimize.minimize(
                                minus_reduced_likelihood_function,
                                theta0_loop,
                                method="TNC",
                                jac=grad_minus_reduced_likelihood_function,
                                bounds=bounds_hyp,
                                options={"maxiter": 100},
                            )
                            if optimal_theta_res_loop[
                                    "fun"] < optimal_theta_res["fun"]:
                                optimal_theta_res = optimal_theta_res_loop

                    optimal_theta = optimal_theta_res["x"]

                    if self.name not in ["MGP"]:
                        optimal_theta = 10**optimal_theta
                    optimal_rlf_value, optimal_par = self._reduced_likelihood_function(
                        theta=optimal_theta)

                    # Compare the new optimizer to the best previous one
                    if k > 0:
                        if np.isinf(optimal_rlf_value):
                            stop += 1
                            if incr != 0:
                                return
                            if stop > max_retry:
                                raise ValueError(
                                    "%d attempts to train the model failed" %
                                    max_retry)
                        else:
                            if optimal_rlf_value >= self.best_iteration_fail:
                                if optimal_rlf_value > best_optimal_rlf_value:
                                    best_optimal_rlf_value = optimal_rlf_value
                                    best_optimal_par = optimal_par
                                    best_optimal_theta = optimal_theta
                                else:
                                    if (self.best_iteration_fail >
                                            best_optimal_rlf_value):
                                        best_optimal_theta = self._thetaMemory
                                        (
                                            best_optimal_rlf_value,
                                            best_optimal_par,
                                        ) = self._reduced_likelihood_function(
                                            theta=best_optimal_theta)
                    else:
                        if np.isinf(optimal_rlf_value):
                            stop += 1
                        else:
                            best_optimal_rlf_value = optimal_rlf_value
                            best_optimal_par = optimal_par
                            best_optimal_theta = optimal_theta
                    k += 1
                except ValueError as ve:
                    # raise ve
                    # If iteration is max when fmin_cobyla fail is not reached
                    if self.nb_ill_matrix > 0:
                        self.nb_ill_matrix -= 1
                        k += 1
                        stop += 1
                        # One evaluation objectif function is done at least
                        if self.best_iteration_fail is not None:
                            if self.best_iteration_fail > best_optimal_rlf_value:
                                best_optimal_theta = self._thetaMemory
                                (
                                    best_optimal_rlf_value,
                                    best_optimal_par,
                                ) = self._reduced_likelihood_function(
                                    theta=best_optimal_theta)
                    # Optimization fail
                    elif best_optimal_par == []:
                        print(
                            "Optimization failed. Try increasing the ``nugget``"
                        )
                        raise ve
                    # Break the while loop
                    else:
                        k = stop + 1
                        print(
                            "fmin_cobyla failed but the best value is retained"
                        )

            if "KPLSK" in self.name:
                if self.options["eval_noise"]:
                    # best_optimal_theta contains [theta, noise] if eval_noise = True
                    theta = best_optimal_theta[:-1]
                else:
                    # best_optimal_theta contains [theta] if eval_noise = False
                    theta = best_optimal_theta

                if exit_function:
                    return best_optimal_rlf_value, best_optimal_par, best_optimal_theta

                if self.options["corr"] == "squar_exp":
                    self.options["theta0"] = (theta * self.coeff_pls**2).sum(1)
                else:
                    self.options["theta0"] = (theta *
                                              np.abs(self.coeff_pls)).sum(1)

                self.options["n_comp"] = int(self.nx)
                limit = 10 * self.options["n_comp"]
                self.best_iteration_fail = None
                exit_function = True

        return best_optimal_rlf_value, best_optimal_par, best_optimal_theta
コード例 #19
0
def parameter_search(a_lims, b_lims, c_lims, d_lims, e_lims, runN, time, dt):
    limits = []
    
    limits.extend(a_lims)
    limits.extend(b_lims)
    limits.extend(c_lims)
    limits.extend(d_lims)
    limits.extend(e_lims)
    
    limits = np.asarray(limits)
    
    ## force minimum 1% search range
    for i in range(len(limits)):
        low_bound = limits[i][0]
        high_bound = limits[i][1]
        
        diff = np.abs(high_bound - low_bound)
                
        if diff < 0.01 * low_bound or diff < 0.01*high_bound:
            new_low = 0.999 * low_bound
            new_high = 1.001 * high_bound
            
            limits[i][0] = new_low
            limits[i][1] = new_high
            
        else: 
            pass
###    use latin hyper cube sampling
    
    sampling = LHS(xlimits=limits)
    coefficients = sampling(runN)
    
    results =  [[] for x in range(runN)]
    
    run_and_score = np.zeros((2,runN))
    for run in range(runN): 
        
        run_simulation = main_sim(coefficients[run][:7], coefficients[run][7:11], coefficients[run][11], coefficients[run][12:14], coefficients[run][14:17], time, dt)
        # run_simulation = main_sim(a_coeffs, b_coeffs, c_0, d_coeffs, e_coeffs, time, dt)
        
        penalty = penaltyfunction(np.asarray(run_simulation[:3]), np.asarray(run_simulation[8:12]))
            
        results[run].extend( run_simulation)
        results[run].append(penalty)
        if run % 25 == 0:
            print(f'penalty points: {penalty} for run number {run}')
        # print(f'penalty points: {penalty} for run number {run}')
        # plot_func(t_axis_f, results[run][0], results[run][1], results[run][2], run)
    
        run_and_score[0,run] = int(run)
        run_and_score[1,run]= penalty 
        
    if penalty == 0: 
            print(f'*!!!* Run number {run} has completed without incurring any penalties*!!!*')
                  
    df_results = pd.DataFrame(np.transpose(run_and_score), columns=['run', 'score'])
    df_results = df_results.sort_values('score')
    
    top_5_runs = df_results.iloc[:5,0]
    top_5_data = []
    topscore = df_results.iloc[0,1]
    
    best_a = np.zeros((5, len(a_coeffs)))
    best_b = np.zeros((5, len(b_coeffs)))
    best_c = np.zeros((5, 1))
    best_d = np.zeros((5, len(d_coeffs)))
    best_e = np.zeros((5, len(e_coeffs)))
    
    for i in enumerate(top_5_runs):    
        top_5_data.append(results[int(i[1])]) # not sure if we need this data seperately
        
        best_a[i[0],:] = results[int(i[1])][3] 
        best_b[i[0],:] = results[int(i[1])][4]
        best_c[i[0],:] = results[int(i[1])][5]
        best_d[i[0],:] = results[int(i[1])][6]
        best_e[i[0],:] = results[int(i[1])][7]

    new_a_range = make_limits_from_minmax(best_a)
    new_b_range = make_limits_from_minmax(best_b)
    new_c_range = make_limits_from_minmax(best_c)
    new_d_range = make_limits_from_minmax(best_d)
    new_e_range = make_limits_from_minmax(best_e)

    return new_a_range, new_b_range, new_c_range, new_d_range, new_e_range, top_5_data, topscore
コード例 #20
0
    def test_mgp(self):
        import numpy as np
        import matplotlib.pyplot as plt
        from smt.surrogate_models import MGP
        from smt.sampling_methods import LHS

        # Construction of the DOE
        dim = 3

        def fun(x):
            import numpy as np

            res = (np.sum(x, axis=1)**2 - np.sum(x, axis=1) + 0.2 *
                   (np.sum(x, axis=1) * 1.2)**3)
            return res

        sampling = LHS(xlimits=np.asarray([(-1, 1)] * dim), criterion="m")
        xt = sampling(8)
        yt = np.atleast_2d(fun(xt)).T

        # Build the MGP model
        sm = MGP(
            theta0=[1e-2],
            print_prediction=False,
            n_comp=1,
        )
        sm.set_training_values(xt, yt[:, 0])
        sm.train()

        # Get the transfert matrix A
        emb = sm.embedding["C"]

        # Compute the smallest box containing all points of A
        upper = np.sum(np.abs(emb), axis=0)
        lower = -upper

        # Test the model
        u_plot = np.atleast_2d(np.arange(lower, upper, 0.01)).T
        x_plot = sm.get_x_from_u(u_plot)  # Get corresponding points in Omega
        y_plot_true = fun(x_plot)
        y_plot_pred = sm.predict_values(u_plot)
        sigma_MGP, sigma_KRG = sm.predict_variances(u_plot, True)

        u_train = sm.get_u_from_x(xt)  # Get corresponding points in A

        # Plots
        fig, ax = plt.subplots()
        ax.plot(u_plot, y_plot_pred, label="Predicted")
        ax.plot(u_plot, y_plot_true, "k--", label="True")
        ax.plot(u_train, yt, "k+", mew=3, ms=10, label="Train")
        ax.fill_between(
            u_plot[:, 0],
            y_plot_pred - 3 * sigma_MGP,
            y_plot_pred + 3 * sigma_MGP,
            color="r",
            alpha=0.5,
            label="Variance with hyperparameters uncertainty",
        )
        ax.fill_between(
            u_plot[:, 0],
            y_plot_pred - 3 * sigma_KRG,
            y_plot_pred + 3 * sigma_KRG,
            color="b",
            alpha=0.5,
            label="Variance without hyperparameters uncertainty",
        )

        ax.set(xlabel="x", ylabel="y", title="MGP")
        fig.legend(loc="upper center", ncol=2)
        fig.tight_layout()
        fig.subplots_adjust(top=0.74)
        plt.show()
コード例 #21
0
N_samples = 1000

# LHS requires some limits in order to sample from the covariate space.
# Set up the dose times and frequency.  Will need these to pass to LHS
tpred, dose_times, dose_sizes, decision_point = setup_experiment(
    1, num_days=10, doses_per_day=2, hours_per_dose=12)

# These limits are the mins/max of the actual data.
age_lims = [26.0, 70.0]
weight_lims = [54.7, 136.6]
creatinine_lims = [50, 95]
sex_lims = [0, 1]
tpred_lims = [dose_times[decision_point - 1], dose_times[decision_point]]
dose_lims = [1, 20.0]
lims = np.array(
    [age_lims, weight_lims, creatinine_lims, sex_lims, tpred_lims, dose_lims])

# Do latin hypercube sampling
sampling = LHS(xlimits=lims)
domain = sampling(N_samples)

# Put the LH samples into a dataframe.  Sex is a continuous variable from the sampling, so we have to round to be binary
# (remember, sex is an indicator 1 -- male)
colnames = ["age", "weight", "creatinine", "sex", "tpred", "D"]
domain_df = pd.DataFrame(domain,
                         columns=colnames).assign(sex=lambda x: x.sex.round())

# Now write to csv for later use
domain_df.to_csv('data/generated_data/hypercube_sampled_covariates.csv',
                 index=False)
コード例 #22
0
    def test_vfm(self):
        # Problem set up
        ndim = 8
        ntest = 500
        ndoeLF = int(10 * ndim)
        ndoeHF = int(3)
        funLF = WaterFlowLFidelity(ndim=ndim)
        funHF = WaterFlow(ndim=ndim)
        deriv1 = True
        deriv2 = True
        LF_candidate = "QP"
        Bridge_candidate = "KRG"
        type_bridge = "Multiplicative"
        optionsLF = {}
        optionsB = {
            "theta0": [1e-2] * ndim,
            "print_prediction": False,
            "deriv": False
        }

        # Construct low/high fidelity data and validation points
        sampling = LHS(xlimits=funLF.xlimits, criterion="m")
        xLF = sampling(ndoeLF)
        yLF = funLF(xLF)
        if deriv1:
            dy_LF = np.zeros((ndoeLF, 1))
            for i in range(ndim):
                yd = funLF(xLF, kx=i)
                dy_LF = np.concatenate((dy_LF, yd), axis=1)

        sampling = LHS(xlimits=funHF.xlimits, criterion="m")
        xHF = sampling(ndoeHF)
        yHF = funHF(xHF)
        if deriv2:
            dy_HF = np.zeros((ndoeHF, 1))
            for i in range(ndim):
                yd = funHF(xHF, kx=i)
                dy_HF = np.concatenate((dy_HF, yd), axis=1)

        xtest = sampling(ntest)
        ytest = funHF(xtest)
        dytest = np.zeros((ntest, ndim))
        for i in range(ndim):
            dytest[:, i] = funHF(xtest, kx=i).T

        # Initialize the extension VFM
        vfm = VFM(
            type_bridge=type_bridge,
            name_model_LF=LF_candidate,
            name_model_bridge=Bridge_candidate,
            X_LF=xLF,
            y_LF=yLF,
            X_HF=xHF,
            y_HF=yHF,
            options_LF=optionsLF,
            options_bridge=optionsB,
            dy_LF=dy_LF,
            dy_HF=dy_HF,
        )

        # Prediction of the validation points
        rms_error = compute_rms_error(vfm, xtest, ytest)
        self.assert_error(rms_error, 0.0, 3e-1)
コード例 #23
0
def evolution(f,p,it,cull_percen,mut_percen,unsolved):
    '''
    INPUTS: 
    f               : sudoku cost function 
    p               : population size
    it              : iterations 
    cull_percen     : percentage of population to be culled
    mut_percen      : percentage chance of a mutation 
    unsolved        : the unsolved sudoku vector 

    OUTPUTS:
    i_best          : the vector of optimised input values 
    SudokuSolve.gif : a gif showing function value over time as 
                        well as the current sudoku

    '''
    empty_entries=0
    for i in range(len(unsolved)):
        if unsolved[i]==0:
            empty_entries+=1
    dimension_bounds=[1,9]
    bounds=np.zeros((empty_entries,2))
    for i in range(len(bounds)):
        bounds[i,0]=dimension_bounds[0]
        bounds[i,1]=dimension_bounds[1]
    d=len(bounds)
    '''ORIGINAL SAMPLE'''
    sampling=LHS(xlimits=bounds) #LHS Sample
    i_pos=sampling(p)
    '''EVALUATING FITNESSES'''
    i_val=np.zeros((len(i_pos),1))
    for i in range(len(i_pos)):
        i_val[i,:]=f(i_pos[i,:])
    i_pos=np.concatenate((i_pos,i_val),axis=1)
    i_best=i_pos[np.argmin(i_pos[:,-1])]
    iteration=0
    fstore=[]
    while iteration<it: #PARAMETER HERE (iterations)
        '''TOURNAMENT SELECTION'''
        i_new=np.zeros((int(p*(cull_percen)),d+1)) # PARAMETER HERE (percentage to be kept)
        new_count=0
        while new_count<len(i_new):
            rnd.shuffle(i_pos)
            t_size=rnd.randint(1,10) # SORT OF PARAMETER HERE (tournament size)
            t=i_pos[:t_size,:]
            t_best=t[np.argmin(t[:,-1])]
            i_new[new_count,:]=t_best[:]
            new_count+=1
        i_pos=copy.deepcopy(i_new)
        '''COMPLETING WITH RANDOM CANDIDATES'''
        new_psize=p-len(i_pos)
        sampling=LHS(xlimits=bounds)
        i_new=sampling(new_psize)
        i_val_new=np.zeros((len(i_new),1))
        for i in range(len(i_new)):
            i_val_new[i,:]=f(i_new[i,:])
        i_new=np.concatenate((i_new,i_val_new),axis=1)
        i_pos=np.concatenate((i_new,i_pos),axis=0)
        best_index=np.argmin(i_pos[:,-1])
        i_best=i_pos[best_index]
        i_best_val=i_pos[best_index,-1]
        print(i_best_val,end='\r')
        fstore.append(i_best_val)
        '''CROSSOVER HERE'''
        rnd.shuffle(i_pos)
        cross_index=np.linspace(0,p-2,(p/2))
        for i in cross_index: # SINGLE CROSSOVER
            i=int(i)
            k=rnd.randint(0,d)
            i_pos[i+1,k:]=i_pos[i,k:]
            i_pos[i+1,:k]=i_pos[i+1,:k]
            i_pos[i,:k]=i_pos[i,:k]
            i_pos[i,k:]=i_pos[i+1,k:]
        '''MUTATION CODE HERE'''
        for i in range(len(i_pos)):
            for j in range(d):
                prob=rnd.uniform()
                if prob<mut_percen:
                    i_pos[i,j]=rnd.uniform(bounds[j,0],bounds[j,1])
        i_pos=i_pos[:,:-1]
        i_val=np.zeros((len(i_pos),1))
        for i in range(len(i_pos)):
            i_val[i,:]=f(i_pos[i,:])
        i_pos=np.concatenate((i_pos,i_val),axis=1)

        plot_util(unsolved,i_best,fstore,iteration)
        iteration+=1

    images=[]

    for filename in range(it+1):
        images.append(imageio.imread(str(filename)+'.png'))
        os.remove(str(filename)+'.png')

    imageio.mimsave('SudokuSolve.gif', images)
    return i_best
コード例 #24
0
ファイル: test_mfk_variance.py プロジェクト: bouhlelma/smt
    def test_mfk_variance(self):

        # To create the doe
        # dim = 2
        nlevel = 2
        ub0 = 10.0
        ub1 = 15.0
        lb0 = -5.0
        lb1 = 0.0
        xlimits = np.array([[lb0, ub0], [lb1, ub1]])

        # Constants
        n_HF = 5  # number of high fidelity points (number of low fi is twice)
        xdoes = NestedLHS(nlevel=nlevel, xlimits=xlimits)
        x_t_lf, x_t_hf = xdoes(n_HF)

        # Evaluate the HF and LF functions
        y_t_lf = LF(x_t_lf)
        y_t_hf = HF(x_t_hf)

        sm = MFK(
            theta0=x_t_hf.shape[1] * [1e-2],
            print_global=False,
            rho_regr="constant",
        )

        # low-fidelity dataset names being integers from 0 to level-1
        sm.set_training_values(x_t_lf, y_t_lf, name=0)
        # high-fidelity dataset without name
        sm.set_training_values(x_t_hf, y_t_hf)
        # train the model
        sm.train()

        # Validation set
        # for validation with LHS
        ntest = 1
        sampling = LHS(xlimits=xlimits)
        x_test_LHS = sampling(ntest)
        # y_test_LHS = HF(x_test_LHS)

        # compare the mean value between different formula
        if print_output:
            print("Mu sm  : {}".format(sm.predict_values(x_test_LHS)[0, 0]))
            print("Mu LG_sm : {}".format(
                TestMFK_variance.Mu_LG_sm(x_test_LHS, sm)[0, 0]))
            print("Mu LG_LG : {}".format(
                TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0]))

        # self.assertAlmostEqual(
        #     TestMFK_variance.Mu_LG_sm(x_test_LHS, sm)[0, 0],
        #     TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0],
        #     delta=1,
        # )
        self.assertAlmostEqual(
            sm.predict_values(x_test_LHS)[0, 0],
            TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0],
            delta=1,
        )

        # compare the variance value between different formula
        (k_0_LG_sm,
         k_1_LG_sm) = TestMFK_variance.Cov_LG_sm(x_test_LHS, x_test_LHS, sm)
        (k_0_LG_LG,
         k_1_LG_LG) = TestMFK_variance.Cov_LG_LG(x_test_LHS, x_test_LHS, sm)
        k_0_sm = sm.predict_variances_all_levels(x_test_LHS)[0][0, 0]
        k_1_sm = sm.predict_variances_all_levels(x_test_LHS)[0][0, 1]

        if print_output:
            print("Level 0")
            print("Var sm  : {}".format(k_0_sm))
            print("Var LG_sm : {}".format(k_0_LG_sm[0, 0]))
            print("Var LG_LG : {}".format(k_0_LG_LG[0, 0]))

            print("Level 1")
            print("Var sm  : {}".format(k_1_sm))
            print("Var LG_sm : {}".format(k_1_LG_sm[0, 0]))
            print("Var LG_LG : {}".format(k_1_LG_LG[0, 0]))

        # for level 0
        self.assertAlmostEqual(k_0_sm, k_0_LG_sm[0, 0], delta=1)
        self.assertAlmostEqual(k_0_LG_sm[0, 0], k_0_LG_LG[0, 0], delta=1)
        # for level 1
        self.assertAlmostEqual(k_1_sm, k_1_LG_sm[0, 0], delta=1)
        self.assertAlmostEqual(k_1_LG_sm[0, 0], k_1_LG_LG[0, 0], delta=1)

        (
            beta_sm_1,
            sigma2_sm_1,
            beta_sm_2,
            sigma2_sm_2,
            rho_sm,
            sigma2_rho_sm,
            beta_LG_1,
            sigma2_LG_1,
            beta_LG_2,
            sigma2_LG_2,
            rho_LG,
            sigma2_rho_LG,
        ) = TestMFK_variance.verif_hyperparam(sm, x_test_LHS)
        if print_output:
            print("Hyperparameters")
            print("rho_sm : {}".format(rho_sm))
            print("rho_LG : {}".format(rho_LG))
            print("sigma2_rho_sm : {}".format(sigma2_rho_sm[0]))
            print("sigma2_rho_LG : {}".format(sigma2_rho_LG))
            print("beta_sm_1 : {}".format(beta_sm_1))
            print("beta_LG_1 : {}".format(beta_LG_1[0, 0]))
            print("beta_sm_2 : {}".format(beta_sm_2))
            print("beta_LG_2 : {}".format(beta_LG_2))
            print("sigma2_sm_1 : {}".format(sigma2_sm_1))
            print("sigma2_LG_1 : {}".format(sigma2_LG_1))
            print("sigma2_sm_2 : {}".format(sigma2_sm_2))
            print("sigma2_LG_2 : {}".format(sigma2_LG_2))
コード例 #25
0
ファイル: sampling.py プロジェクト: eexcalibur/UQ_python
import numpy as np
import matplotlib.pyplot as plt
import os
import time
from loguru import logger
import multiprocessing as mp

para_limits = np.array([[0.01, 1.4], [1.02E20, 1.67e24]])

num = 1000

base_path = "/home/tzhang/wrf_solar/wsolar412_bnl/"
archive_path = "/S2/gscr2/tzhang/big_data/UQ/sampling/"
para_names = ['vdis', 'beta_con']

sampling = LHS(xlimits=para_limits)
x = sampling(num)
print(x.shape)
print(x[0, :])

para_samples = []
for id, xx in enumerate(x):
    samp = {}
    for i, name in enumerate(para_names):
        samp[name] = xx[i]
    para_samples.append((id, samp))


def run_case(id, x):
    #create a case
    #print("cp -r "+base_path+"/runwrf "+base_path+"/case"+str(id))