def Mutation2(Members, NM, Scale): GenesForMutation = np.random.randint(0, len(Members), NM) MutatedMembers = np.copy(Members) for i in GenesForMutation: gene = np.random.randint(0, len(Members[0]), 1) if Members[i, gene] >= 0: MutationP = np.random.uniform(Members[i, gene], Members[i, gene] * (1 + Scale), 1) MutationM = np.random.uniform(Members[i, gene] * (1 - Scale), Members[i, gene], 1) else: MutationP = np.random.uniform(Members[i, gene], Members[i, gene] * (1 - Scale), 1) MutationM = np.random.uniform(Members[i, gene] * (1 + Scale), Members[i, gene], 1) MutatedP = MutatedMembers[i] MutatedP[gene] = MutationP frosenP = rosen(MutatedP) #print(frosenP) MutatedM = MutatedMembers[i] MutatedM[gene] = MutationM frosenM = rosen(MutatedM) #print(frosenM) if frosenP >= frosenM: MutatedMembers[i, gene] = MutationM #print(rosen(MutatedMembers[i])) if frosenM > frosenP: MutatedMembers[i, gene] = MutationP #print(rosen(MutatedMembers[i])) return MutatedMembers
def plot(self, function, type = 'surface'): """ Plots a given function. Type defines what kind of class of plots to plot, the alternatives are; surface plot or a contour plot, it defaults to a surface plot. """ if (type == 'surface'): x = linspace(-2,2,250) #Defines plot intervall for x direction. y = linspace(-1,3,250) #Defines plot intervall for y direction. X, Y = meshgrid(x, y) Z = rosen([X, Y]) #Defines plot intervall for z direction. ax = plt.axes(projection='3d') ax.plot_surface(X,Y,Z, norm = LogNorm(), rstride = 5, cstride = 5, cmap = 'RdGy_r', alpha = .9, edgecolor = 'none') ax.set_title('Rosenbrock function - Surface plot') ax.set_xlabel('x_1') ax.set_ylabel('x_2') ax.set_zlabel('f(x)') if (type == 'contour'): ax = plt.axes() x = linspace(-5,5,500) #Defines plot intervall for x direction. y = linspace(-5,5,500) #Defines plot intervall for y direction. X, Y = meshgrid(x, y) Z = rosen([X, Y]) plt.contour(X, Y, Z, 150, cmap = 'RdGy'); ax.set_title('Rosenbrock function - Contour plot') ax.set_xlabel('x_1') ax.set_ylabel('x_2') plt.show()
def val(self, x, test=False): assert len(x) == self.N val = 0 for idx in range(len(x) - 1): val += (1 - x[idx])**2 + 100 * (x[idx + 1] - (x[idx])**2)**2 if test: if val != rosen(x): np.testing.assert_almost_equal(val, rosen(x)) return val
def rosen_obj(params, shift): val = rosen(params["x_half_a"] - shift) + rosen(params["x_half_b"] - shift) dval = OrderedDict([ ("x_half_a", rosen_der(params["x_half_a"] - shift)), ("x_half_b", rosen_der(params["x_half_b"] - shift)), ]) return val, dval
def generate_rosen_data(size, d, meshgrid=False): if not meshgrid: X = np.random.rand(size, d) y = rosen(X.T)[:, None] return X, y else: Xi, Xj = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1)) X = np.vstack([Xi.ravel(), Xj.ravel()]).T y = rosen(X.T) Y = y.reshape(Xi.shape) return X, y[:, None], Xi, Xj, Y
def plot_trace(ps, ttl): x = np.linspace(-5, 5, 100) y = np.linspace(-5, 5, 100) X, Y = np.meshgrid(x, y) Z = rosen(np.vstack([X.ravel(), Y.ravel()])).reshape((100,100)) ps = np.array(ps) plt.figure(figsize=(12,4)) plt.subplot(121) plt.contour(X, Y, Z, np.arange(10)**5) plt.plot(ps[:, 0], ps[:, 1], '-o') plt.plot(1, 1, 'r*', markersize=12) # global minimum plt.subplot(122) plt.semilogy(range(len(ps)), rosen(ps.T)) plt.title(ttl)
def plot_trace(ps, ttl): x = np.linspace(-5, 5, 100) y = np.linspace(-5, 5, 100) X, Y = np.meshgrid(x, y) Z = rosen(np.vstack([X.ravel(), Y.ravel()])).reshape((100, 100)) ps = np.array(ps) plt.figure(figsize=(12, 4)) plt.subplot(121) plt.contour(X, Y, Z, np.arange(10)**5) plt.plot(ps[:, 0], ps[:, 1], '-o') plt.plot(1, 1, 'r*', markersize=12) # global minimum plt.subplot(122) plt.semilogy(list(range(len(ps))), rosen(ps.T)) plt.title(ttl)
def rosenbrock(x): """The Rosenbrock funtion is is a non-convex function used as a performance test problem for optimization algorithms. The function is defined by: f(x,y) = (a-x)^2 + b(y-x^2)^2. The global minimum is inside a long, narrow, parabolic shaped flat valley. To find the valley is trivial. To converge to the global minimum, is difficult. More info: https://en.wikipedia.org/wiki/Rosenbrock_function """ return rosen(x)
def test_tqdm_resume_interrupted(): des = DESolver(lambda x: [rosen(x), time.sleep(.001)][0], bounds=[(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)], popsize=2, maxfun=23, p_bars=True) des.solve() assert des.pbar_feval.n == 24 assert des.pbar_gen_mutations.n == 4 assert np.isclose(des.pbar_gens.n, 1.0 + 4 / 10) state = des.state.copy() state['maxfun'] = 40 des = DESolver(rosen, **state) assert des._nfev == 24 assert des.pbar_feval.n == 24 assert des.pbar_gen_mutations.n == 4 assert np.isclose(des.pbar_gens.n, 1.0 + 4 / 10) x, y = next(des) assert des._nfev == 34 assert des.pbar_feval.n == 34 assert des.pbar_gen_mutations.n == 4 assert np.isclose(des.pbar_gens.n, 2.0+4/10) with pytest.raises(StopIteration): x, y = next(des) assert des._nfev == 41 assert des.pbar_feval.n == 41 assert des.pbar_gen_mutations.n == 1 assert np.isclose(des.pbar_gens.n, 3.0+1/10)
def testMaximization(self): self.optimizer.isMaximize = True rosen_inv = lambda x: - rosen(x) rosen_der_inv = lambda x: - rosen_der(x) max_params, max_value, _ = self.optimizer.optimize(rosen_inv, rosen_der_inv, x0=self.x0) self.assertAlmostEqual(self.reference_value, max_value, 10) self.assertAlmostEqual(self.reference_params[0], max_params[0], 4) self.assertAlmostEqual(self.reference_params[1], max_params[1], 4)
def calc_obj_func(sol, obj_func='sphere'): result = 0 if obj_func == 'sphere': result = sphere(sol) elif obj_func == 'rosenbrock': result = rosen(sol) else: return 0 return result
def nltest(x,grad): nonlocal counter nonlocal countergrad if len(grad) > 0: countergrad += 1 grad[:] = rosen_der(x) return grad counter += 1 return rosen(x)
def compute(self, inputs, outputs): time.sleep(self.options['sleep_time']) nan_points = self.options['nan_points'] if nan_points is not None: for nan_point in nan_points: if np.linalg.norm(inputs['x'] - np.asarray(nan_point) ) <= self.options['nan_range']: outputs['f'] = 1e27 return outputs['f'] = rosen(inputs['x'])
def test_tqdm(): des = DESolver(lambda x: [rosen(x), time.sleep(.001)][0], bounds=[(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)], popsize=2, maxiter=2, maxfun=30, p_bars=True) des.solve() assert des.pbar_feval.n == 30 assert des.pbar_gen_mutations.n == 0 assert np.isclose(des.pbar_gens.n, 2.0) des = DESolver(lambda x: [rosen(x), time.sleep(.001)][0], bounds=[(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)], popsize=2, maxiter=1, maxfun=30, p_bars=True) des.solve() assert des.pbar_feval.n == 20 assert des.pbar_gen_mutations.n == 0 assert np.isclose(des.pbar_gens.n, 1.0) des = DESolver(lambda x: [rosen(x), time.sleep(.001)][0], bounds=[(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)], popsize=2, maxiter=2, p_bars=True) des.solve() assert des.pbar_feval.n == 30 assert des.pbar_gen_mutations.n == 0 assert np.isclose(des.pbar_gens.n, 2.0) des = DESolver(lambda x: [rosen(x), time.sleep(.001)][0], bounds=[(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)], popsize=2, maxfun=23, p_bars=True) des.solve() assert des.pbar_feval.n == 24 assert des.pbar_gen_mutations.n == 4 assert np.isclose(des.pbar_gens.n, 1.0 + 4 / 10) des = DESolver(lambda x: [rosen(x), time.sleep(.001)][0], bounds=[(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)], popsize=2, maxiter=1, p_bars=True) des.solve() assert des.pbar_feval.n == 20 assert des.pbar_gen_mutations.n == 0 assert np.isclose(des.pbar_gens.n, 1.0)
def _build_set(self, use_cache, rewrite): if use_cache: try: return self._load() except AssertionError: print("Can't use cache, generating new dataset") x = 2*np.random.random((self.data_size, self.n_dim)) - 1 y = rosen(x.T)[:, None] if rewrite: self._save('x', x) self._save('y', y) return x, y
def test_minimize_ipopt_nojac_constraints_if_scipy(): """ `minimize_ipopt` works without Jacobian and with constraints""" from scipy.optimize import rosen, rosen_der x0 = [1.3, 0.7, 0.8, 1.9, 1.2] constr = {"fun": lambda x: rosen(x) - 1.0, "type": "ineq"} res = cyipopt.minimize_ipopt(rosen, x0, jac=rosen_der, constraints=constr) assert isinstance(res, dict) assert np.isclose(res.get("fun"), 1.0) assert res.get("status") == 0 assert res.get("success") is True expected_res = np.array( [1.00407015, 0.99655763, 1.05556205, 1.18568342, 1.38386505]) np.testing.assert_allclose(res.get("x"), expected_res)
def evaluate(self, x): assert len(x) == self.d x = x.astype(np.int64) # Compute non-binary representation unbinarized_x = [] for i in range(self.unbinarized_d): x_i = 0 for j in x[4 * i:4 * i + 4]: x_i = (x_i << 1) | j # bitshift to convert binary to integer normalized_x = x_i + self.unbinarized_lb unbinarized_x.append(normalized_x) return rosen(unbinarized_x) / self.scaling
def test_minimize_ipopt_nojac_constraints_if_scipy(): """ `minimize_ipopt` works without Jacobian and with constraints""" from scipy.optimize import rosen x0 = [1.3, 0.7, 0.8, 1.9, 1.2] constr = {"fun": lambda x: rosen(x) - 1.0, "type": "ineq"} res = cyipopt.minimize_ipopt(rosen, x0, constraints=constr) assert isinstance(res, dict) assert np.isclose(res.get("fun"), 1.0) assert res.get("status") == 0 assert res.get("success") is True expected_res = np.array([1.001867, 0.99434067, 1.05070075, 1.17906312, 1.38103001]) np.testing.assert_allclose(res.get("x"), expected_res)
def test_optimization_trust(): print("**********************************************") print("TEST Newton trust region ") x0 = [1.3, 0.7, 0.8, 1.9, 1.2] res = optimize.minimize( optimize.rosen, x0, method='trust-ncg', jac=optimize.rosen_der, hess=optimize.rosen_hess, options={'gtol': 1e-8, 'disp': True}) print(res.x) print(optimize.rosen(x0).shape) print(optimize.rosen_der(x0).shape) print(optimize.rosen_hess(x0).shape) return res.fun
def rosenbrockLnlike(theta): """ Rosenbrock function as a loglikelihood following Wang & Li (2017) Parameters ---------- theta : array Returns ------- l : float likelihood """ return -rosen(theta) / 100.0
def test(self): # let's at least make sure it doesn't error el_test = EvaluationLogger("Test logger") for i in range(50): arg = [0.0, 0.0] el_test.push([arg], rosen(arg)) min_coord = [-3, -3] max_coord = [3, 3] convergence_demo2d([el_test], rosen, min_coord, max_coord, resample_evals_to=25, show_plot=False) self.assertTrue(True) goal_val_plot([el_test], mintoi=True, xtimestep='time', fmin=0, plot_logarithm=False, show_plot=False) self.assertTrue(True)
def rosenbrock(x): """The Rosenbrock funtion is is a non-convex function used as a performance test problem for optimization algorithms. The function is defined by: f(x,y) = (a-x)^2 + b(y-x^2)^2. The global minimum is inside a long, narrow, parabolic shaped flat valley. To find the valley is trivial. To converge to the global minimum, is difficult. More info: https://en.wikipedia.org/wiki/Rosenbrock_function Examples: >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] >>> result = optimize_function(rosenbrock, bounds) >>> result.x # Solution array([ 1., 1., 1., 1., 1.]) >>> result.fun # Final value of the objective function 0.0 >>> result.success True """ return rosen(x)
def dim53Rosenbrock(ht_list, x): XX = [] assert len(ht_list) == 50 h2 = [ 1, 2 ] #convert to these categories, as Cocabo assumes categories in (0,1,2,3,etc.) for i in ht_list: if i: XX.append(h2[i]) else: XX.append(h2[0]) for i in x: XX.append(i) XX[0:len(ht_list)] = np.round( XX[0:len(ht_list)] ) #To make sure there is no cheating, round the discrete variables before calling the function return rosen(XX) / 20000 + 1e-6 * np.random.rand()
def test_tqdm_resume(): des = DESolver(lambda x: [rosen(x), time.sleep(.001)][0], bounds=[(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)], popsize=2, maxiter=1, p_bars=True) des.solve() assert des.pbar_feval.n == 20 assert des.pbar_gen_mutations.n == 0 assert np.isclose(des.pbar_gens.n, 1.0) des = DESolver(rosen, **des.state) assert des._nfev == 20 assert des.pbar_feval.n == 20 assert des.pbar_gen_mutations.n == 0 assert des.pbar_gens.n == 1.0+0/10 x, y = next(des) assert des._nfev == 30 assert des.pbar_feval.n == 30 assert des.pbar_gen_mutations.n == 0 assert np.isclose(des.pbar_gens.n, 2.0+0/10)
def f(x): time.sleep(0.1) return [opt.rosen(x), opt.rosen_der(x)]
def __call__(self, x): self.x = x self.fun = optimize.rosen(x) self.nit += 1
def func_grad_hess(x,*args): f = optimize.rosen(x) g = optimize.rosen_der(x) h= optimize.rosen_hess(x) return (f,g,h)
def f(x): time.sleep(0.01) return [opt.rosen(x), opt.rosen_der(x)]
def fun(self, x, *args): self.nfev += 1 return rosen(x)
def fun(self, x): time.sleep(40e-6) return rosen(x)
def evaluate(self, x): assert len(x) == self.d if self.dolog: return np.log(rosen(x) + 1) + self.noise_rng.normal(scale=self.noise_factor) return rosen(x)/self.scaling + self.noise_rng.normal(scale=self.noise_factor)
def myrosen(pars): return rosen(pars.values())
def SysEq(x, gc): f = rosen(x) return f, []
optlog.callback(x0) try: xfin = opt.minimize(f, jac=True, x0=x0, method='CG', callback=optlog.callback, options={'gtol': 0.0}) xfin = opt.minimize(f, jac=True, x0=xfin.x, method='CG', callback=optlog.callback) xfin = opt.minimize(f, jac=True, x0=xfin.x, method='CG', callback=optlog.callback) finx = xfin.x optlog.finish(finx) print(xfin) except KeyboardInterrupt: finx = optlog.hist.iloc[-1, :].x print(finx) hist = pd.read_pickle('./opthist.pkl') plt.figure() plt.plot(hist.i, hist.f, '-x') plt.plot(hist.i, hist.gnorm, '-x') plt.xscale('log') plt.yscale('log') plt.figure() X, Y = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500)) xy = np.hstack((X.flatten()[:, None], Y.flatten()[:, None])) feval = np.array([opt.rosen(v) for v in xy]).reshape(len(X), len(Y)) plt.contour(X, Y, np.log(feval)) hist_points = np.vstack(hist.x) plt.plot(hist_points[:, 0], hist_points[:, 1], 'x-') plt.show()
def test_function_selection5(): forrester = lambda x: (5.0*x-2.0)**2.0*np.sin(12.*x-4.) # def f(x): # x = np.array(x)*0.08 # return forrester(np.linalg.norm(x)) + 5.*np.linalg.norm(x) + 10.*(x[0]+x[1]) # def f2(x): # x = np.array(x)*0.085 # return forrester(np.linalg.norm(x)) # def f(x): # # return forrester(np.linalg.norm(0.08*np.array(x))) + 0.9*np.linalg.norm(x) + 0.2*(x[0]+x[1]) # def f2(x): # x[0] = x[0]+1.0 # x[1] = x[1]+1.0 # return forrester(np.linalg.norm(0.035*np.array(x))) + 0.5*np.linalg.norm(x) + 0.1*(x[0]+x[1]) f = lambda x: rosen(x) f2 = lambda x: 0.5*rosen(x) + 1.0*x[0] + .1*x[1] -1.0 #f = lambda x: x[0]**2*x[1]/20+.08 #f2 = lambda x: x[0]**2*x[1]/20. + (x[0]+x[1])/8.+.3 #f2 = lambda x: (x[0]+x[1]-5)**2/30 + (x[0]-x[1]-12)**2/120 - 3 #f = lambda x: x[0]**2*x[1]/20. - 1 #f2 = lambda x: x[0]**2*x[1]/20. + (x[0]+x[1])/5. - 2 # font = {'family' : 'normal', # 'weight' : 'normal', # 'size' : 14} # # rc('font', **font) #f2 = lambda x: np.exp(x[0]/3)+np.exp(x[1]/5)-x[0] lb = [-2,-2] ub = [2,2] dx = 0.1 x = y = np.arange(lb[0],ub[0],dx) X, Y = np.meshgrid(x,y) zs1 = np.array([f([x,y]) for x,y in zip(np.ravel(X),np.ravel(Y))]) Z1 = zs1.reshape(X.shape) zs2 = np.array([f2([x,y]) for x,y in zip(np.ravel(X),np.ravel(Y))]) Z2 = zs2.reshape(X.shape) #fig = plt.figure(1) #ax1 = fig.add_subplot(111) #ax1 = Axes3D(fig) #ax1.plot_wireframe(X,Y,Z1) # cs1 = ax1.contour(X,Y,Z1,levels=np.arange(0,20,1),colors='r') # cs1 = ax1.contour(X,Y,Z2,levels=np.arange(0,20,1),colors='b') # plt.clabel(cs1) fig1 = plt.figure(1) ax1 = Axes3D(fig1) ax1.hold(True) ax1.plot_wireframe(X,Y,Z1,color='r') #ax1.contour(X,Y,Z1,color='r',levels=[0]) #ax1.plot_wireframe(X,Y,Z2,color='b') #ax1.legend(['High fidelity function','Low fidelity function']) ax1.set_xlabel('x1') ax1.set_ylabel('x2') ax1.set_zlabel('f (x1,x2)') #ax1.axis([0,10,0,10]) ax1.set_zbound(-500,4000) fig2 = plt.figure(2) ax2 = Axes3D(fig2) ax2.hold(True) # #ax2.plot_surface(X,Y,Z1,color='r') ax2.plot_wireframe(X,Y,Z2,color='b') #ax1.contour(X,Y,Z2,color='b',levels=[0]) #ax2.legend(['High fidelity function','Low fidelity function']) ax2.set_xlabel('x1') ax2.set_ylabel('x2') ax2.set_zlabel('f (x1,x2)') # ax2.axis([0,10,0,10]) ax2.set_zbound(-500,4000) plt.show()
def rosen_obj_func(w): return rosen(w.T.toarray()[0])
def fitness(self, individual): return rosen(individual)
def compute_r(self): result = np.array((rosen(self.x.r) )) return result
def my_rosen(x): return rosen(x.T)
def myfunc1(x): return optimize.rosen(x)
def rosenbrock(vector): if all(-30 <= e <= 30 for e in vector): return optimize.rosen(vector) else: return float("inf")
def f(x): #return np.power(x[0], 2.) + np.power(x[1], 2.) return optimize.rosen(x) # The Rosenbrock function
fhigh1d = lambda x: (6.0*x-2.0)**2.0*np.sin(12.*x-4.) def fhigh(x): A = 0.07 B = 0.9 C = 0.2 return fhigh1d(np.linalg.norm(A*x)) + B*np.sum(x) + C def flow(x): A = 0.035 B = 0.5 C = 0.1 return fhigh1d(np.linalg.norm(A*x)) + B*np.sum(x) + C fhigh = lambda x: rosen(x) flow = lambda x: 0.5*rosen(x) + 1.0*x[0] + .1*x[1] -1.0 g1high = lambda x: x[0]**2*x[1]/20. -.1 g1low = lambda x: x[0]**2*x[1]/20. + (x[0]+x[1])/5. -2 g2 = lambda x: (x[0]+x[1]-5)**2/30 + (x[0]-x[1]-12)**2/120 - 1 #fhigh = lambda x: 4.*x[0]**2. + x[1]**3. + x[0]*x[1] #flow = lambda x: 4.*(x[0]+0.1)**2. + (x[1]-0.1)**3. + x[0]*x[1]+0.1 # #g1high = lambda x: -(1./x[0]+ 1/x[1] -2.) #g1low = lambda x: -(1./x[0] + 1./(x[1]+0.1)-2.001) def problem_2d(): cnstr = ({'type':'ineq','fun':g1high},{'type':'ineq','fun':g2})
optlog.callback(x0) try: xfin = opt.minimize(f, jac=True, x0=x0, method='CG', callback=optlog.callback) xfin = opt.minimize(f, jac=True, x0=xfin.x, method='CG', callback=optlog.callback) xfin = opt.minimize(f, jac=True, x0=xfin.x, method='CG', callback=optlog.callback) finx = xfin.x optlog.finish(finx) print(xfin) except KeyboardInterrupt: finx = optlog.hist.iloc[-1, :].x print(finx) hist = pd.read_pickle('./opthist.pkl') plt.figure() plt.plot(hist.i, hist.f, '-x') plt.plot(hist.i, hist.gnorm, '-x') plt.xscale('log') plt.yscale('log') plt.figure() X, Y = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500)) xy = np.hstack((X.flatten()[:, None], Y.flatten()[:, None])) feval = np.array([opt.rosen(v) for v in xy]).reshape(len(X), len(Y)) plt.contour(X, Y, np.log(feval)) hist_points = np.vstack(hist.x) plt.plot(hist_points[:, 0], hist_points[:, 1], 'x-') plt.show()
def f(self, x): return rosen(x)
def test_rosen(var,*args): import scipy.optimize as opt return opt.rosen(var)
def f(x, g): g[:] = rosen_der(x) print "one call" return rosen(x)