def _omorf(self, s_old, del_k, del_min, eta1, eta2, gam1, gam2, omega_s, max_evals, random_initial, epsilon, d, subspace_method): """ Computes optimum using the ``omorf`` method """ self.n = s_old.size self.s_old = self._apply_scaling(s_old) if del_k is None: if self.bounds is None: self.del_k = 0.1 * max(np.linalg.norm(self.s_old, ord=np.inf), 1.0) else: self.del_k = 0.1 else: self.del_k = del_k self._update_bounds() self.f_old = self._blackbox_evaluation(self.s_old) self.d = d self.q = int(comb(self.d + 2, 2)) self.p = self.n + 1 self.random_initial = random_initial self.subspace_method = subspace_method self.epsilon = epsilon Base = Basis('total-order', orders=np.tile([2], self.d)) self.basis = Base.get_basis()[:, range(self.d - 1, -1, -1)] itermax = 10000 # Construct the sample set S_full, f_full = self._generate_initial_set() self._calculate_subspace(S_full, f_full) S_red, f_red = self._sample_set('new') for i in range(itermax): # self._update_bounds() if len(self.f) >= max_evals or self.del_k < del_min: break my_poly = self._build_model(S_red, f_red) m_old = np.asscalar(my_poly.get_polyfit(np.dot(self.s_old, self.U))) s_new, m_new = self._compute_step(my_poly) # Safety step implemented in BOBYQA if np.linalg.norm(s_new - self.s_old, ord=np.inf) < omega_s * self.del_k: if max(np.linalg.norm( S_full - self.s_old, axis=1, ord=np.inf)) <= self.epsilon * self.del_k: self._calculate_subspace(S_full, f_full) S_red, f_red = self._sample_set('new') self.del_k *= gam1 elif max(np.linalg.norm( S_red - self.s_old, axis=1, ord=np.inf)) <= self.epsilon * self.del_k: S_full, f_full = self._sample_set('improve', S_full, f_full, full_space=True) self._calculate_subspace(S_full, f_full) S_red, f_red = self._sample_set('new') else: S_red, f_red = self._sample_set('improve', S_red, f_red) S_full, f_full = self._sample_set('improve', S_full, f_full, full_space=True) continue if self.S.shape == np.unique(np.vstack((self.S, s_new)), axis=0).shape: ind_repeat = np.argmin( np.linalg.norm(self.S - s_new, ord=np.inf, axis=1)) f_new = self.f[ind_repeat] else: f_new = self._blackbox_evaluation(s_new) S_red = np.vstack((S_red, s_new)) f_red = np.vstack((f_red, f_new)) S_full = np.vstack((S_full, s_new)) f_full = np.vstack((f_full, f_new)) # Calculate trust-region factor rho_k = (self.f_old - f_new) / (m_old - m_new) self._choose_best(self.S, self.f) self._update_bounds() if len(self.f) >= max_evals or self.del_k < del_min: break if rho_k >= eta2: S_red, f_red = self._sample_set('replace', S_red, f_red) S_full, f_full = self._sample_set('replace', S_full, f_full) self.del_k *= gam2 elif rho_k >= eta1: S_red, f_red = self._sample_set('replace', S_red, f_red) S_full, f_full = self._sample_set('replace', S_full, f_full) else: if max(np.linalg.norm( S_full - self.s_old, axis=1, ord=np.inf)) <= self.epsilon * self.del_k: self._calculate_subspace(S_full, f_full) S_red, f_red = self._sample_set('new') self.del_k *= gam1 elif max(np.linalg.norm( S_red - self.s_old, axis=1, ord=np.inf)) <= self.epsilon * self.del_k: S_full, f_full = self._sample_set('improve', S_full, f_full, full_space=True) self._calculate_subspace(S_full, f_full) S_red, f_red = self._sample_set('new') else: S_red, f_red = self._sample_set('improve', S_red, f_red) S_full, f_full = self._sample_set('improve', S_full, f_full, full_space=True) self.S = self._remove_scaling(self.S) self._choose_best(self.S, self.f) return self.s_old, self.f_old
def _well_poised_LU(self, S, f, S_hat, f_hat): """ Ensures the regression set is well-poised using the LU algorithm (proposed by Andrew Conn) for ``trust-region`` method """ # Poised constant of algorithm psi = 1.0 # Generate natural monomial basis Base = Basis('total-order', orders=np.tile([1], self.n)) basis = Base.get_basis()[:, range(self.n - 1, -1, -1)] def natural_basis_function(x, basis): phi = np.zeros(basis.shape[0]) for j in range(basis.shape[0]): phi[j] = 1.0 for k in range(basis.shape[1]): phi[j] *= (x[k]**basis[j, k]) / factorial(basis[j, k]) return phi phi_function = lambda x: natural_basis_function(x, basis) # Initialise U matrix of LU factorisation of M matrix (see Conn et al.) U = np.zeros((self.p, self.p)) # Initialise the first row of U to the e1 basis vector which corresponds to solution with all zeros U[0, 0] = 1.0 # Perform the LU factorisation algorithm for the rest of the points for k in range(1, self.p): v = np.zeros(self.p) for j in range(k): v[j] = -U[j, k] / U[j, j] v[k] = 1.0 # If there are still points to choose from, find if points meet criterion. If so, use the index to choose # point with given index to be next point in regression/interpolation set if S_hat.size != 0: M = self._natural_basis_matrix(S_hat, v, phi_function) index2 = np.argmax(M) if M[index2] < psi: index2 = None else: index2 = None # If index exists, choose the point with that index and delete it from possible choices if index2 is not None: s = S_hat[index2, :].flatten() S = np.vstack((S, s)) f = np.vstack((f, f_hat[index2].flatten())) S_hat = np.delete(S_hat, index2, 0) f_hat = np.delete(f_hat, index2, 0) phi = phi_function(s.flatten()) # If index doesn't exist, solve an optimisation point to find the point in the range which best satisfies criterion else: s = optimize.minimize( lambda x: -abs(np.dot(v, phi_function(x.flatten()))), np.zeros(self.n), method='COBYLA', constraints=[{ 'type': 'ineq', 'fun': lambda x: 1.0 - x }, { 'type': 'ineq', 'fun': lambda x: 1.0 + x }], options={'disp': False})['x'].flatten() S = np.vstack((S, s)) f = np.vstack((f, np.array([np.inf]))) phi = phi_function(s.flatten()) # Update U factorisation in LU algorithm U[k, k] = np.dot(v, phi) for i in range(k + 1, self.p): U[k, i] += phi[i] for j in range(k): U[k, i] -= (phi[j] * U[j, i]) / U[j, j] return S, f, S_hat, f_hat
def _trust_region(self, s_old, del_k, del_min, eta1, eta2, gam1, gam2, omega_s, max_evals, random_initial, scale_bounds, epsilon): """ Computes optimum using the ``trust-region`` method """ itermax = 10000 self.n = s_old.size self.q = int(comb(self.n+2, 2)) self.p = int(comb(self.n+2, 2)) self.random_initial = random_initial self.scale_bounds = scale_bounds self.epsilon = epsilon Base = Basis('total-order', orders=np.tile([2], self.n)) self.basis = Base.get_basis()[:,range(self.n-1, -1, -1)] self.s_old = self._apply_scaling(s_old) self.f_old = self._blackbox_evaluation(self.s_old) if del_k is None: if self.bounds is None: self.del_k = 0.1*max(np.linalg.norm(self.s_old, ord=np.inf), 1.0) else: self.del_k = 0.1 else: self.del_k = del_k self._update_bounds() # Construct the sample set S, f = self._generate_initial_set() for i in range(itermax): # print(self.s_old) # print('-------------') self._update_bounds() if len(self.f) >= max_evals or self.del_k < del_min: break my_poly = self._build_model(S, f) m_old = np.asscalar(my_poly.get_polyfit(self.s_old)) s_new, m_new = self._compute_step(my_poly) # Safety step implemented in BOBYQA if np.linalg.norm(s_new - self.s_old, ord=np.inf) < omega_s*self.del_k: S, f = self._sample_set('improve', S, f) if max(np.linalg.norm(S-self.s_old, axis=1, ord=np.inf)) <= self.epsilon*self.del_k: self.del_k *= gam1 continue elif self.S.shape == np.unique(np.vstack((self.S, s_new)), axis=0).shape: ind_repeat = np.argmin(np.linalg.norm(self.S - s_new, ord=np.inf, axis=1)) f_new = self.f[ind_repeat] else: f_new = self._blackbox_evaluation(s_new) S = np.vstack((S, s_new)) f = np.vstack((f, f_new)) # Calculate trust-region factor rho_k = (self.f_old - f_new) / (m_old - m_new) self._choose_best(self.S, self.f) self._update_bounds() if len(self.f) >= max_evals or self.del_k < del_min: break if rho_k >= eta2: S, f = self._sample_set('replace', S, f) self.del_k *= gam2 elif rho_k >= eta1: S, f = self._sample_set('replace', S, f) else: if max(np.linalg.norm(S-self.s_old, axis=1, ord=np.inf)) <= self.epsilon*self.del_k: S, f = self._sample_set('improve', S, f) self.del_k *= gam1 else: S, f = self._sample_set('improve', S, f) self.S = self._remove_scaling(self.S) self._choose_best(self.S, self.f) return self.s_old, self.f_old