def compute_IP(self, vertex,jet): '''find the impact parameter of the trajectory with respect to a given point (vertex). The impact parameter has the same sign as the scalar product of the vector pointing from the given vertex to the point of closest approach with the given jet direction. new attributes : * closest_t = time of closest approach to the primary vertex. * IP = signed impact parameter * IPcoord = TVector3 of the point of closest approach to the primary vertex ''' self.vertex_IP = vertex def distquad (time): x,y,z = self.coord_at_time(time) dist2 = (x-vertex.x())**2 + (y-vertex.y())**2\ + (z-vertex.z())**2 return dist2 minim_answer = opti.bracket(distquad, xa = -0.5e-14, xb = 0.5e-14) self.closest_t = minim_answer[1] vector_IP = self.point_at_time(minim_answer[1]) - vertex Pj = jet.p4().Vect().Unit() signIP = vector_IP.Dot(Pj) self.IP = minim_answer[4]**(1.0/2)*sign(signIP) x,y,z = self.coord_at_time(minim_answer[1]) self.IPcoord = TVector3(x, y, z)
def compute_IP(self, vertex, jet): '''find the impact parameter of the trajectory with respect to a given point (vertex). The impact parameter has the same sign as the scalar product of the vector pointing from the given vertex to the point of closest approach with the given jet direction. new attributes : * closest_t = time of closest approach to the primary vertex. * IP = signed impact parameter * IPcoord = TVector3 of the point of closest approach to the primary vertex ''' self.vertex_IP = vertex def distquad(time): x, y, z = self.coord_at_time(time) dist2 = (x-vertex.x())**2 + (y-vertex.y())**2\ + (z-vertex.z())**2 return dist2 minim_answer = opti.bracket(distquad, xa=-0.5e-14, xb=0.5e-14) self.closest_t = minim_answer[1] vector_IP = self.point_at_time(minim_answer[1]) - vertex Pj = jet.p4().Vect().Unit() signIP = vector_IP.Dot(Pj) self.IP = minim_answer[4]**(1.0 / 2) * sign(signIP) x, y, z = self.coord_at_time(minim_answer[1]) self.IPcoord = TVector3(x, y, z)
def max_epsilon_ratio(q): def foo(eps): err = expected_error(q, eps, pf_pmf) eps2 = expected_epsilon(q, err, [eps, 2*eps]) return -eps2/eps br = bracket(foo, 1e-3, 1.0)[0:3] ans = minimize_scalar(foo, bracket=br, method='brent') eps0 = ans.x err = expected_error(q, eps0, pf_pmf) eps1 = expected_epsilon(q, err, [eps0, 2*eps0]) return eps0, err, eps1
def __call__(self, *args, **kwargs): f_line = lambda x: kwargs['f'](kwargs['x_k'] + x * kwargs['p_k']) oracle_calls = 0 if self.bracketing: l, r = 0, 100 xa, xb, xc, fa, fb, fc, calls = bracket(f_line, xa=l, xb=r) oracle_calls += calls brack = (xa, xb, xc) else: brack = (0, 100) alpha, _, _, calls = brent_sc(f_line, brack=brack, tol=kwargs['tol'], full_output=True) return alpha, calls + oracle_calls
def __call__(self, *args, **kwargs): f_line = lambda x: kwargs['f'](kwargs['x_k'] + x * kwargs['p_k']) oracle_calls = 0 if self.bracketing: l, r = 0, 100 xa, xb, xc, fa, fb, fc, calls = bracket(f_line, xa=l, xb=r) oracle_calls += calls l, r = xa, xc else: l, r = 0, 100 alpha, calls = self.golden_section(f_line, l, r, self.tol, self.max_iter) return alpha, calls + oracle_calls
def compute_IP(self, vertex): self.vertex=vertex def distquad (time): x,y,z = self.coord_at_time(time) dist2 = (x-vertex.x())**2 + (y-vertex.y())**2\ + (z-vertex.z())**2 return dist2 minim_answer = opti.bracket(distquad, xa = -0.5e-14, xb = 0.5e-14) self.closest_t = minim_answer[1] vector_IP = self.point_at_time(minim_answer[1]) - vertex self.signIP= vector_IP.Dot(self.p4.Vect().Unit()) self.IP = minim_answer[4]**(1.0/2)*sign(self.signIP)
def powell(F, x, h=0.1, tol=1.0e-0): def f(s): return F(x + s*v) # F in direction of v n = len(x) # Humber of design variables df = np.zeros(n) # Decreases of F stored here u = np.identity(n) # Vectors v stored here by rows for j in range(30): # Allow for 30 cycles: xOld = x.copy() # Save starting point fOld = F(xOld) # First n live searches record decreases of F for i in range(n): v = u[i] a, b = bracket(f, 0.0, h) s, fMin = search(f,a,b) df[1] = fOld -tUft fOld = x = x + a*v
def line_search(self): r"""Perform a line search along the descent direction to get a new value of the parameter""" u, p, q = self.state, self.parameter, self.search_direction u_t, p_t = u.copy(deepcopy=True), p.copy(deepcopy=True) def f(t): p_t.assign(p + firedrake.Constant(t) * q) u_t.assign(self._forward_solve(p_t)) return self._assemble(replace(self._J, {u: u_t, p: p_t})) try: line_search_options = self._line_search_options except AttributeError: line_search_options = {} brack = bracket(f, xa=0.0, xb=_bracket(f, max_iterations=30))[:3] result = minimize_scalar(f, bracket=brack, options=line_search_options) if not result.success: raise ValueError("Line search failed: {}".format(result.message)) return result.x
def getmin(fun,xa,xb): res1 = bracket(fun, xa = xa, xb=xb) res = minimize_scalar(fun, bounds=(res1[2],res1[1]), method='bounded') return res.x
from scipy.optimize import minimize, bracket, minimize_scalar def f(x): return (x - 1) * (x + 5) * (x - 3) * (x + 10) # res = minimize_scalar(f, bounds=(-3, 60000000000), method='bounded') #局域最低点 res1 = bracket(f, xa=5, xb=4) print(res1) res = minimize_scalar(f, bounds=(res1[2], res1[1]), method='bounded') print(res.x)
def grad(foo, var_list, init_values, tol=1e-5, max_iter=10000): """ This method computes the minimum value of a multivariable algebraic function using the Gradient Descent algorithm, provided that a initial point is given. Parameters: foo: callable multivariable algebraic function built with sympy; var_list: list containing independent variables of the function; init_values: list containing the initial values of the variables provided in "var_list" tol: tolerance of the method; max_iter: maximum number of iteration allowed; """ # Initial definitions and variables declaration alpha = symbols("alpha") n = 0 current_values = asarray(init_values) previous_values = None previous_replacements = None gradient_vector = list() gradient_values = list() # Generate gradient vector analytically for var in var_list: gradient_vector.append(diff(foo, var)) # Perform Gradient Descent Algorithm while n < max_iter: replacements = [(var, var_value) for var, var_value in zip(var_list, current_values)] # Check if this is the first iteration, if not, check for stop criteria if n != 0: if linalg.norm(current_values - previous_values) < tol and \ abs(foo.subs(replacements) - foo.subs(previous_replacements)) < tol and \ linalg.norm(gradient_values) < tol: return current_values, foo.subs(replacements), n gradient_values = list() for index, _ in enumerate(gradient_vector): gradient_values.append( float(gradient_vector[index].subs(replacements))) gradient_values = asarray(gradient_values) alpha_foo_arg = current_values - alpha * gradient_values alpha_replacements = [ (var, var_value) for var, var_value in zip(var_list, alpha_foo_arg) ] alpha_foo = foo.subs(alpha_replacements) alpha_foo = lambdify(alpha, alpha_foo) # Perform bracketing for determining the boundaries for line sarch xa, _, xc, _, _, _, _ = optimize.bracket(alpha_foo) # Perform line search for minimizing "alpha" # using the Golden Section Method min_alpha, _, _ = golden_ratio(alpha_foo, "min", xa, xc) # Save variable value of current iteration previous_values = current_values # Calculate variable values for next iteration current_values = previous_values - min_alpha * gradient_values previous_replacements = replacements n += 1 else: raise RuntimeError("The number of iterations reached " "the defined maximum number of iterations.")
def find_min_h_brent(self, Bs, dtau_init, tol=5E-2, skipIfLower=False, verbose=False, use_tangvec_overlap=False, max_iter=20): As0 = cp.deepcopy(self.A) Cs0 = cp.deepcopy(self.C) Ks0 = cp.deepcopy(self.K) h_expect_0 = self.H_expect ls0 = cp.deepcopy(self.l) rs0 = cp.deepcopy(self.r) taus=[0] if use_tangvec_overlap: ress = [self.eta_sq.real.sum()] else: ress = [h_expect_0.real] hs = [h_expect_0.real] def f(tau, *args): if tau < 0: if use_tangvec_overlap: res = tau**2 + self.eta_sq.sum().real else: res = tau**2 + h_expect_0.real log.debug((tau, res, "punishing negative tau!")) taus.append(tau) ress.append(res) hs.append(h_expect_0.real) return res try: i = taus.index(tau) log.debug((tau, ress[i], "from stored")) return ress[i] except ValueError: for n in xrange(1, self.N + 1): if not Bs[n] is None: self.A[n] = As0[n] - tau * Bs[n] if use_tangvec_overlap: self.update(restore_CF=False) Bsg = self.calc_B(set_eta=False) res = 0 for n in xrange(1, self.N + 1): if not Bs[n] is None: res += abs(m.adot(self.l[n - 1], tm.eps_r_noop(self.r[n], Bsg[n], Bs[n]))) h_exp = self.H_expect.real else: self.calc_l() self.calc_r() self.simple_renorm() self.calc_C() h_exp = 0 if self.ham_sites == 2: for n in xrange(1, self.N): h_exp += self.expect_2s(self.ham[n], n).real else: for n in xrange(1, self.N - 1): h_exp += self.expect_3s(self.ham[n], n).real res = h_exp log.debug((tau, res, h_exp, h_exp - h_expect_0.real)) taus.append(tau) ress.append(res) hs.append(h_exp) return res if skipIfLower: if f(dtau_init) < self.H_expect.real: return dtau_init brack_init = (dtau_init * 0.9, dtau_init * 1.5) attempt = 1 while attempt < 3: try: log.debug("CG: Bracketing...") xa, xb, xc, fa, fb, fc, funcalls = opti.bracket(f, xa=brack_init[0], xb=brack_init[1], maxiter=5) brack = (xa, xb, xc) log.debug("CG: Using bracket = " + str(brack)) break except RuntimeError: log.debug("CG: Bracketing failed, attempt %u." % attempt) brack_init = (brack_init[0] * 0.1, brack_init[1] * 0.1) attempt += 1 if attempt == 3: log.debug("CG: Bracketing failed. Aborting!") tau_opt = 0 h_min = h_expect_0.real else: try: tau_opt, res_min, itr, calls = opti.brent(f, brack=brack, tol=tol, maxiter=max_iter, full_output=True) i = taus.index(tau_opt) h_min = hs[i] except ValueError: log.debug("CG: Bad bracket. Aborting!") tau_opt = 0 h_min = h_expect_0.real #Must restore everything needed for take_step self.A = As0 self.l = ls0 self.r = rs0 self.C = Cs0 self.K = Ks0 self.H_expect = h_expect_0 return tau_opt, h_min
sensnorm = 0 removed = remove(seed, num_remove, mi[:num_exclude]) for ind in removed: if (outflag == 1): print(ind, reactions[ind].equation, ms[ind]) sensnorm += ms[ind] if (gas.reaction_type(measure_ind) == 4): k0 = gas.reactions()[measure_ind].low_rate.pre_exponential_factor else: k0 = gas.reactions()[measure_ind].rate.pre_exponential_factor try: xa, xb, xc, fa, fb, fc, nf = op.bracket( residual, xa=np.log10(0.5), xb=np.log10(2.0), args=(k0, observations, measure_ind, tmaxes, temperatures, pressures, initials, maxes, yields), grow_limit=1.5) brack = (xa, xb, xc) if (outflag == 1): print("bracket found in %d calls: (%f %f %f)" % (nf, xa, xb, xc)) result = op.minimize_scalar(residual, args=(k0, observations, measure_ind, tmaxes, temperatures, pressures, initials, maxes, yields), method='brent', bracket=brack, options={'xtol': ktol}) except Exception as error: print('failed')
def f(s): return F(x + s*v) # F in direction of v n = len(x) # Humber of design variables df = np.zeros(n) # Decreases of F stored here u = np.identity(n) # Vectors v stored here by rows for j in range(30): # Allow for 30 cycles: xOld = x.copy() # Save starting point fOld = F(xOld) # First n live searches record decreases of F for i in range(n): v = u[i] a, b = bracket(f, 0.0, h) s, fMin = search(f,a,b) df[1] = fOld -tUft fOld = x = x + a*v Last 1.e sear. . the cycle a,b = bracket(f.O.O.h) s,frast = search(f,a,b) Check for convergence If Math. grt(r.p.dot(x-xOld.x-x01d)/n) < tol: recur]) /dentify biggest decrease update seamen directions = np.argmax(df) for i range(1Max.n-1): u[i] -1(1+1] III 1] = v print( "Powell did not converge)
def minimize_linesearch(objF,x0,step): #line search causes many additional objF calls, but runs without new hessians and prevents pingponging with hessians@grads overshhots #golden #%% find abc bounds for alpha a=0 b=1 #fa=objF(a*step+x0) #fb=objF(b*step+x0) #if fb<fa: #a ok, falta acertar c #c=2 #fc=objF(c*step+x0) #i=0 #imax=10 #while i<imax and not fc>fb: #c=c*2 #towards inf #fc=objF(c*step+x0) #i+=1 #else: #c ok, falta acertar b #c=b*1 #fc=fb*1 #i=0 #imax=10 #while i<imax and not fb<fa: #b=b/2 #towards zero #fb=objF(b*step+x0) #i+=1 #alpha = gss(f=lambda alpha:objF(alpha*step+x0), #a=b, #b=c) #print(alpha) #print(objF(alpha*step+x0)) #input('alpha') #return alpha from scipy import optimize as opt #alpha = gss(f=lambda alpha:objF(x0 + alpha*step), #a=a, #b=c) #assert(fa<fb<fc) a,b,c,fa,fb,fc,_=opt.bracket(func=lambda alpha:objF(x0 + alpha*step),xa=a,xb=b) abc=np.array([a,b,c]) fabc=np.array([fa,fb,fc]) idx = np.argsort(abc) a,b,c=abc[idx] fa,fb,fc=fabc[idx] #print(a,b,c,fa,fb,fc) #input('paused') alpha=opt.golden(func=lambda alpha:objF(x0 + alpha*step), brack=(a, b, c)) #print(alpha) #print(objF(alpha*step+x0)) #input('alpha') return alpha