def test_rosenbrock(self): bounds = np.zeros((2, 2)) bounds[:, 0] = -3.0 bounds[:, 1] = 3.0 np.random.seed(1234124) my_opt = sbopt.RbfOpt(my_fun, bounds, polish=True) x, y, _, _ = my_opt.minimize(verbose=0) self.assertTrue(np.isclose(y, 0.0, rtol=1e-4, atol=1e-4)) self.assertTrue(np.isclose(x[0], 1.0, rtol=1e-2, atol=1e-2)) self.assertTrue(np.isclose(x[1], 1.0, rtol=1e-2, atol=1e-2))
def test_rosenbrock_distance_ei(self): bounds = np.zeros((2, 2)) bounds[:, 0] = -3.0 bounds[:, 1] = 3.0 np.random.seed(1234124) my_opt = sbopt.RbfOpt(my_fun, bounds, polish=False, n_local_optimze=2, acquisition='distance', exploration='distance', rbf_function='multiquadric') x, y, _, _ = my_opt.minimize(verbose=0) self.assertTrue(np.isclose(y, 0.0, rtol=1e-4, atol=1e-4)) self.assertTrue(np.isclose(x[0], 1.0, rtol=1e-2, atol=1e-2)) self.assertTrue(np.isclose(x[1], 1.0, rtol=1e-2, atol=1e-2))
bounds = np.zeros((2, 2)) bounds[:, 0] = -3.0 bounds[:, 1] = 3.0 # set random seed for reproducibility np.random.seed(1234124) # initialize the RbfOpt object my_opt = sbopt.RbfOpt( my_fun, # your objective function to minimize bounds, # bounds for your design variables initial_design='latin', # initial design type # 'latin' default, or 'random' initial_design_ndata=20, # number of initial points n_local_optimze=20, # number of local BFGS optimizers # scipy radial basis function parameters see # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Rbf.html rbf_function='linear', # default for SbOpt is 'linear' # while the default for scipy.interpolate.Rbf is # 'multi-quadratic' epsilon=None, # (default) smooth=0.0, # (default) norm='euclidean' # (default) ) # run the optimizer result = my_opt.minimize( max_iter=100, # maximum number of iterations # (default) n_same_best=20, # number of iterations to run # without improving best function value (default) eps=1e-6, # minimum distance a new design point # may be from an existing design point (default)
r = 6.0 s = 10.0 t = 1.0 / (8.0*np.pi) def my_fun(x): # define the Branin-Hoo function to minmize A = a*(x[1] - (b*x[0]**2) + c*x[0] - r)**2 B = s*(1.0 - t)*np.cos(x[0]) return A + B + s bounds = np.zeros((2, 2)) bounds[0, 0] = -5.0 bounds[0, 1] = 10.0 bounds[1, 1] = 15.0 # set random seed for reproducibility np.random.seed(1234124) # initialize the RbfOpt object my_opt = sbopt.RbfOpt(my_fun, # your objective function to minimize bounds, # bounds for your design variables ) # run the optimizer result = my_opt.minimize() print('Best design variables:', result[0]) print('Best function value:', result[1]) print('Convergence by max iteration:', result[2]) print('Convergence by n_same_best:', result[3])
t = 1.0 / (8.0*np.pi) def my_fun(x): # define the Branin-Hoo function to minmize A = a*(x[1] - (b*x[0]**2) + c*x[0] - r)**2 B = s*(1.0 - t)*np.cos(x[0]) return A + B + s bounds = np.zeros((2, 2)) bounds[0, 0] = -5.0 bounds[0, 1] = 10.0 bounds[1, 1] = 15.0 # set random seed for reproducibility np.random.seed(1234124) # initialize the RbfOpt object my_opt = sbopt.RbfOpt(my_fun, # your objective function to minimize bounds, # bounds for your design variables n_local_optimze=5, initial_design_ndata=5) # run the optimizer result = my_opt.minimize(strategy='all_local_reflect', eps=1e-3, max_iter=100) print('Best design variables:', result[0]) print('Best function value:', result[1]) print('Convergence by max iteration:', result[2]) print('Convergence by n_same_best:', result[3])