def test_secant_by_name(self): r"""Invoke secant through root_scalar()""" for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6) assert_allclose(f(r.root), 0, atol=1e-6) r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6) assert_allclose(f(r.root), 0, atol=1e-6)
def test_root_scalar_fail(self): with pytest.raises(ValueError): root_scalar(f1, method='secant', x0=3, xtol=1e-6) # no x1 with pytest.raises(ValueError): root_scalar(f1, method='newton', x0=3, xtol=1e-6) # no fprime with pytest.raises(ValueError): root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2 with pytest.raises(ValueError): root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime
def test_newton_combined(self): f1 = lambda x: x**2 - 2*x - 1 f1_1 = lambda x: 2*x - 2 f1_2 = lambda x: 2.0 + 0*x def f1_and_p_and_pp(x): return x**2 - 2*x-1, 2*x-2, 2.0 sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) assert_allclose(sol0.root, sol.root, atol=1e-8) assert_equal(2*sol.function_calls, sol0.function_calls) sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) assert_allclose(sol0.root, sol.root, atol=1e-8) assert_equal(3*sol.function_calls, sol0.function_calls)
def _find_root_of_omega_minus(self, N): omega_minus = lambda r: self.omega_minus(r, N) min = self.gamma / self.c_dilute * 0.1 max = 1 / self.k_dilute if (omega_minus(max) > 0): print(omega_minus(max)) sol = root_scalar(omega_minus, bracket=[min, max], xtol=0.01, method='brentq') return sol.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95): opt_nreads = partial(self.pcmodel.predict_detection_probability, tcr_frequencies=tcr_freq) opt_res = optimize.root_scalar( lambda nreads: opt_nreads(num_reads=nreads) - conf_level, method="secant", x0=1.0e-16, x1=1) return int(np.around(opt_res.root))
def time_when_equal_to(self, x: float) -> float: def f(t): return self(t) - x if not self.final_value < x < self.initial_value: raise ValueError( f"Can't solve for Pnot = {x}. It is out the calculated range: {self.initial_value}, {self.final_value}" ) result = root_scalar(f, bracket=[self.min_time, self.max_time]) return result.root
def modelToPercentileDecay(model, percentile=0.5, coarse=False): """When will memory decay to a given percentile? 🏀 Given a memory `model` of the kind consumed by `predictRecall`, etc., and optionally a `percentile` (defaults to 0.5, the half-life), find the time it takes for memory to decay to `percentile`. If `coarse`, the returned time (in the same units as `model`) is approximate. """ # Use a root-finding routine in log-delta space to find the delta that # will cause the GB1 distribution to have a mean of the requested quantile. # Because we are using well-behaved normalized deltas instead of times, and # owing to the monotonicity of the expectation with respect to delta, we can # quickly scan for a rough estimate of the scale of delta, then do a finishing # optimization to get the right value. assert (percentile > 0 and percentile < 1) from scipy.special import betaln from scipy.optimize import root_scalar alpha, beta, t0 = model logBab = betaln(alpha, beta) logPercentile = np.log(percentile) def f(lndelta): logMean = betaln(alpha + np.exp(lndelta), beta) - logBab return logMean - logPercentile # Scan for a bracket. bracket_width = 1.0 if coarse else 6.0 blow = -bracket_width / 2.0 bhigh = bracket_width / 2.0 flow = f(blow) fhigh = f(bhigh) while flow > 0 and fhigh > 0: # Move the bracket up. blow = bhigh flow = fhigh bhigh += bracket_width fhigh = f(bhigh) while flow < 0 and fhigh < 0: # Move the bracket down. bhigh = blow fhigh = flow blow -= bracket_width flow = f(blow) assert flow > 0 and fhigh < 0 if coarse: return (np.exp(blow) + np.exp(bhigh)) / 2 * t0 sol = root_scalar(f, bracket=[blow, bhigh]) t1 = np.exp(sol.root) * t0 return t1
def ks_ss(lb=0.98, ub=0.999, r=0.01, eis=1, delta=0.025, alpha=0.11, b=0.15, pUE=0.5, pEU=0.038, nA=100, amax=20): """Solve steady state of full GE model. Calibrate beta to hit target for interest rate.""" # set up grid a_grid = mathutils.agrid(amax=amax, n=nA) L = pUE / (pUE + pEU) # labor endowment normalized to 1 e_grid = np.array([b, 1 - (1 - L) / L * b]) Pi = np.array([[1 - pUE, pUE], [pEU, 1 - pEU]]) # solve for aggregates analitically rk = r + delta Z = (rk / alpha)**alpha / L**(1 - alpha) # normalize so that Y=1 K = (alpha * Z / rk)**(1 / (1 - alpha)) * L Y = Z * K**alpha * L**(1 - alpha) w = (1 - alpha) * Z * (alpha * Z / rk)**(alpha / (1 - alpha)) # solve for beta consistent with this beta_min = lb / (1 + r) beta_max = ub / (1 + r) sol = opt.root_scalar( lambda bet: household_ss(Pi, a_grid, e_grid, r, w, bet, eis)['A'] - K, bracket=[beta_min, beta_max], method='brentq') if sol.converged: beta = sol.root else: raise ValueError('Steady-state solver did not converge.') # extra evaluation to report variables ss = household_ss(Pi, a_grid, e_grid, r, w, beta, eis) mpc = mathutils.mpcs(ss['c'], ss['a_grid'], ss['r']) ss.update({ 'mpc': mpc, 'MPC': np.vdot(ss['D'], mpc), 'w': w, 'Z': Z, 'K': K, 'L': L, 'Y': Y, 'alpha': alpha, 'delta': delta, 'goods_mkt': Y - ss['C'] - delta * K }) return ss
def k2(self,k0,k1): # Find the k2 that is consistent with the Euler equation sol = optimize.root_scalar(lambda x: self.eulerError(k0,k1,x), x0=k0, x1=self.kss) # Return exception if no compatible capital is found if sol.flag != "converged": raise Exception('Could not find capital value satisfying Euler equation') return(sol.root)
def determine_EFs_circ1dNP(p, Vds, Ids_cutoff): """ function to find E_F at source in circular 1d NW """ e0 = optimize.root_scalar( func_det_EFs_circ1dNP, args=(p, Vds, Ids_cutoff), x0=0.0, x1=0.15) if(e0.converged==True): return e0.root else: print("EFs convergence error !") print(e0) return 0
def qscalar(val): if val <= p_bnd[0]: return self.bracket[0] if val >= p_bnd[1]: return self.bracket[1] sol = root_scalar( lambda x: val - self.p(x), bracket=self.bracket, method="bisect", xtol=self.atol, ) return sol.root
def density_match_exact(kperp,tev): from scipy import optimize def test(wp): return tpd_match_func(wp,kperp,tev) test2=optimize.root_scalar(test,x0=0.50,x1=0.49,method='secant') # DEBUG # print(test2) # DEBUG return test2.root*test2.root
def E0_2d_root(Vds, Vgs, EFs, p, left=-0.5, right=0.5): """ Get top of the barrier in 2D ballitic FET (Python implementation) Parabolic band """ e0 = optimize.root_scalar(func_for_findroot_E0_2d, args=(Vds,Vgs, EFs, p), x0=left, x1=right) if(e0.converged==True): return e0.root else: print("EFs convergence error !") print(e0) return 0
def ann_plateau_radius(r_tr, lum, gamma, rho_max=rho_max): """Solves for the annihilation plateau radius. Parameters ---------- r_tr : float Truncation radius, kpc. lum : float Clump luminosity, Hz. gamma : float Slope. rho_max : float Maximum DM density, which defines the annihilation plateau, GeV/cm^3. Returns ------- Annihilation plateau radius, kpc. """ def f(r_p): # rescaled luminosity if gamma != 1.5: return (3 * r_tr**3 * (r_p / r_tr)**(2. * gamma) - 2 * gamma * r_p**3 + 3 * (2. * gamma - 3) * mx**2 * lum / (2 * np.pi * sv * rho_max**2) / kpc_to_cm**3) else: return r_p**3 * (1. + 3. * np.log(r_tr / r_p)) - 3. * mx**2 * lum / ( 2 * np.pi * sv * rho_max**2) / kpc_to_cm**3 def fprime(r_p): if gamma != 1.5: return 6 * gamma * r_p * (r_tr * (r_p / r_tr)**(2. * gamma - 1.) - r_p) else: return 9. * r_p**2 * np.log(r_tr / r_p) sol = root_scalar(f, x0=1e-7 * r_tr, fprime=fprime, method="newton", xtol=1e-100, rtol=1e-5) root = np.real(sol.root) if root < 0: return np.nan # raise ValueError("r_p is negative.") elif root > r_tr: return np.nan # raise ValueError("r_p is larger than the truncation radius.") elif np.imag(sol.root) / root > 1e-5: return np.nan # raise ValueError("r_p is imaginary") else: return root
def find_scale(first_layer_width, number_of_layers, max_depth): if first_layer_width * number_of_layers >= max_depth: raise ValueError("Max depth must be greater then the number of layers times " "the first layer width.") else: solver = root_scalar(lambda x: max_depth - first_layer_width * np.sum(np.exp(x * np.arange(number_of_layers))), bracket=(0.0, 1.0)) if solver.converged: return solver.root else: raise RuntimeError
def ppf(self, p): if self.n_modes == 1: return self._ppf(p) if isinstance(p, (np.ndarray)) or (isinstance(p, (list, tuple)) and all(isinstance(x, (int, float)) for x in p)): return np.array([self.ppf(pi) for pi in p]) ppf = self._ppf(p) low = np.min(ppf) high = np.max(ppf) return root_scalar(lambda x: self.cdf(x) - p, method='brentq', bracket=[low, high], maxiter=10).root
def bubble_point(x, K, p, T_guess): """ :param x: mole fractions in liquid :param p: total pressure :param K: functions calculating K for each component :param T_guess: guess temperature :return: Temperature at which the liquid mixture begins to boil. """ from scipy.optimize import root_scalar sol = root_scalar(residual, args=(x, K, p), x0=T_guess-10, x1=T_guess+10) return sol.root
def update_mu(Delta, Sigma, occupancy_goal, T, old_mu): sol = optimize.root_scalar( lambda x: calc_occupancy(Delta, Sigma, x, T) - occupancy_goal, x0=old_mu - 0.05, x1=old_mu + 0.05) if not sol.converged: if verbose(): print( "root_scalar() failed to converged. Keeping the old value of mu." ) return old_mu return sol.root
def execute(paths): """Finds threshold level and writes it to threshold file if no threshold level was provided by the user""" if is_threshold_provided(paths): return map_data = deepcopy(mrcfile.open(paths['cleaned_map'], mode='r').data).ravel() num_non_zero_values = count_values(map_data, 0) # Find threshold value such that the surface area to volume ratio is 0.9 threshold1 = root_scalar(lambda t: 0.9163020188305991 - sav(t, paths), bracket=[0, 10]).root # Finding threshold value such that the ratio of number of values larger # than the threshold to number of non-zero values is 0.4 threshold2 = root_scalar(lambda t: 0.4640027954434909 - (count_values(map_data, t) / num_non_zero_values), bracket=[0, 10]).root threshold = (threshold1 + threshold2) / 2 with open(paths['threshold'], 'w') as f: f.write(str(threshold))
def _G_inv(self, arg, n_eff): # Numerical calculation of the inverse of `_G`. roots = [] for val, neff in zip(arg, n_eff): func = lambda x: self._G(x, neff) - val # noqa: _G_inv Traceback try: rt = brentq(func, a=0.05, b=200) except ValueError: # No root in [0.05, 200] (rare, but it may happen). rt = root_scalar(func, x0=1, x1=2).root.item() roots.append(rt) return np.asarray(roots)
def inversion_cdf(cdf, size): sample = [] roots = np.random.uniform(0, 1, size) for u in roots: def shifted(x): return cdf(x) - u sample.append( root_scalar(shifted, method='bisect', x0=0.5, bracket=[0, 1]).root) return np.array(sample)
def shoot(phi, xspan, lval, lder, rval, rder, init): """ shoot(phi,xspan,lval,lder,rval,rder,init) Use the shooting method to solve a two-point boundary value problem. The ODE is u'' = `phi`(x,u,u') for x in `xspan`. Specify a function value or derivative at the left endpoint using `lval` and `lder`, respectively, and similarly for the right endpoint using `rval` and `rder`. (Use an empty array to denote an unknown quantity.) The value `init` is an initial guess for whichever value is missing at the left endpoint. Return vectors for the nodes, the values of u, and the values of u'. """ # Tolerances for IVP solver and rootfinder. ivp_opt = 1e-6 optim_opt = 1e-5 # Evaluate the difference between computed and target values at x=b. def objective(s): nonlocal x, v # change these values in outer scope # Combine s with the known left endpoint value. if len(lder) == 0: v_init = [lval[0], s] else: v_init = [s, lder[0]] # ODE posed as a first-order equation in 2 variables. def shootivp(x, v): return array([v[1], phi(x, v[0], v[1])]) x = linspace(xspan[0], xspan[1], 400) # make decent plots on return sol = solve_ivp(shootivp, xspan, v_init, t_eval=x) x = sol.t v = sol.y if len(rder) == 0: return v[0, -1] - rval[0] else: return v[1, -1] - rder[0] # Find the unknown quantity at x=a by rootfinding. x = [] v = [] # the values will be overwritten s = root_scalar(objective, x0=init, x1=init + 0.05, xtol=optim_opt).root # Don't need to solve the IVP again. It was done within the # objective function already. u = v[0] # solution dudx = v[1] # derivative return x, u, dudx
def find_critical_angle(func, x0, mini, maxi, args, typ='alpha'): """ Wrapper for the critical-angle-finding-algorithm. :param func: function; function of which to calculate the critical angle from. :param x0: float; comparison value :param mini: float; lower bound of the angle :param maxi: float; upper bound of the angle :param args: iterable; list of additional arguments for the function func :param typ: ['alpha', 'beta']; handles if the critical angle should be phi-like or theta-like. :return: float; the critical angle / root of the func """ guess = mini + (maxi - mini) / 2 if typ == 'alpha': angle = opt.root_scalar(root, args=(func, args, x0, True), bracket=(mini, maxi), x0=guess) elif typ == 'beta': angle = opt.root_scalar(root, args=(func, args, x0, False), bracket=(mini, maxi), x0=guess) else: raise NotImplementedError('False typ specification.') return angle.root
def test_newton_combined(self): f1 = lambda x: x**2 - 2 * x - 1 f1_1 = lambda x: 2 * x - 2 f1_2 = lambda x: 2.0 + 0 * x def f1_and_p_and_pp(x): return x**2 - 2 * x - 1, 2 * x - 2, 2.0 sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) assert_allclose(sol0.root, sol.root, atol=1e-8) assert_equal(2 * sol.function_calls, sol0.function_calls) sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) assert_allclose(sol0.root, sol.root, atol=1e-8) assert_equal(3 * sol.function_calls, sol0.function_calls)
def simulation_CCC_charts(r): p = 0.008 p_tilde = 0.008 changepoint = -1 median = -1.0 / 2.0 while stats.nbinom.cdf(median, r, p) - 1 / 2 < 0: median += 1 mu = r / p sd = np.sqrt((1 - p) * r) / p #print(median) #print(sd) def sampling_distr(x, p_): return np.random.negative_binomial(r, p_, x) LCL = max(0, mu - 3 * sd) sim = SimulationHandler(lambda x: sampling_distr(x, p), [ lambda x: RunsRules.n_points_above_CL(x, 9, median), lambda x: RunsRules.n_points_below_CL(x, 9, median), lambda x: RunsRules.n_points_increasing(x, 6), lambda x: RunsRules.n_points_decreasing(x, 6), lambda x: RunsRules.lower_CL_rule(x, LCL) ]) def simulate(_LCL): out = [] for _ in range(1000): out.append( sim.simulate_inspection_length_variable_rules([ lambda x: RunsRules.n_points_above_CL(x, 9, median), lambda x: RunsRules.n_points_below_CL(x, 9, median), lambda x: RunsRules.n_points_increasing(x, 6), lambda x: RunsRules.n_points_decreasing(x, 6), lambda x: RunsRules.lower_CL_rule(x, _LCL) ], changepoint, lambda x: sampling_distr(x, p_tilde))) return np.mean(np.array(out)) print(simulate(LCL)) output = [] for _ in range(100): output.append( optimize.root_scalar(lambda k: simulate(mu - k * sd) - 80000, bracket=[0, 3], x0=1.5, xtol=0.01).root) print(f"{simulate(mu)} : {simulate(mu - 3 * sd)}") print(np.mean(output)) print(np.std(output)) print(simulate(np.mean(mu - np.mean(output) * sd)))
def intersect_curve_plane_equation(curve, plane, init_samples=10, tolerance=1e-3, maxiter=50): """ Find intersections of curve and a plane, by directly solving an equation. inputs: * curve : SvCurve * plane : sverchok.utils.geom.PlaneEquation * init_samples: number of samples to subdivide the curve to; this defines the maximum possible number of solutions the method will return (the solution is searched at each segment). * tolerance: target tolerance * maxiter: maximum number of iterations outputs: list of 2-tuples: * curve T value * point at the curve dependencies: scipy """ u_min, u_max = curve.get_u_bounds() u_range = np.linspace(u_min, u_max, num=init_samples) init_points = curve.evaluate_array(u_range) init_signs = plane.side_of_points(init_points) good_ranges = [] for u1, u2, sign1, sign2 in zip(u_range, u_range[1:], init_signs, init_signs[1:]): if sign1 * sign2 < 0: good_ranges.append((u1, u2)) if not good_ranges: return [] plane_normal = np.array(plane.normal) plane_d = plane.d def goal(t): point = curve.evaluate(t) value = (plane_normal * point).sum() + plane_d return value solutions = [] for u1, u2 in good_ranges: sol = root_scalar(goal, method='ridder', bracket=(u1, u2), xtol=tolerance, maxiter=maxiter) u = sol.root point = curve.evaluate(u) solutions.append((u, point)) return solutions
def optimal_liquidation_level_stop_loss(self): """ Calculates the optimal liquidation portfolio level considering the stop-loss level. (p.31) :return: (float) Optimal liquidation portfolio level considering the stop-loss. """ # Checking if the sl level was allocated if self.L is None: raise Exception( "To use this function stop-loss level must be allocated.") # If the liquidation level wasn't calculated before, set it if self.liquidation_level[1] is None: # Calculating three sub-parts of the equation a_var = lambda price: ((self.L - self.c[0]) * self._G(price, self.r[0]) - (price - self.c[0]) * self._G(self.L, self.r[0])) \ * self._F_derivative(price, self.r[0]) b_var = lambda price: ((price - self.c[0]) * self._F(self.L, self.r[0]) - (self.L - self.c[0]) * self._F(price, self.r[0])) \ * self._G_derivative(price, self.r[0]) c_var = lambda price: (self._G(price, self.r[0]) * self._F( self.L, self.r[0]) - self._G(self.L, self.r[0]) * self._F( price, self.r[0])) # Final equation equation = lambda price: a_var(price) + b_var(price) - c_var(price) bracket = [ self.theta - 6 * np.sqrt(self.sigma_square), self.theta + 6 * np.sqrt(self.sigma_square) ] sol = root_scalar(equation, bracket=bracket) # The root is the optimal liquidation level considering the stop-loss level output = sol.root self.liquidation_level[1] = output else: output = self.liquidation_level[1] return output
def calculate_saturation_pressure(self, temperature, sample, X_fluid=1.0, **kwargs): """ Calculates the pressure at which a an CO2-bearing fluid is saturated. Calls the scipy.root_scalar routine, which makes repeated called to the calculate_dissolved_volatiles method. Parameters ---------- sample: Sample class Magma major element composition. temperature float Temperature in degrees C. X_fluid float OPTIONAL. Default is 0. Mole fraction of CO2 in the H2O-CO2 fluid. Returns ------- float Calculated saturation pressure in bars. """ temperatureK = temperature + 273.15 if temperatureK <= 0.0: raise core.InputError("Temperature must be greater than 0K.") if X_fluid < 0 or X_fluid > 1: raise core.InputError("X_fluid must have a value between 0 and 1.") if isinstance(sample, sample_class.Sample) is False: raise core.InputError( "Sample must be an instance of the Sample class.") if sample.check_oxide('CO2') is False: raise core.InputError("sample must contain CO2.") if sample.get_composition('CO2') < 0.0: raise core.InputError( "Dissolved CO2 concentration must be greater than 0 wt%.") try: satP = root_scalar(self.root_saturation_pressure, args=(temperature, sample, X_fluid, kwargs), x0=10.0, x1=2000.0).root except Exception: w.warn("Saturation pressure not found.", RuntimeWarning, stacklevel=2) satP = np.nan return np.real(satP)
def haines(a0, ux0, uy0, uz0, t0, tf, z0): # Parameters # Ex = E0 sin(wt - kz) # a0 = laser amplitude # g0 = initially gamma of the particle # u[xyz]0 = normalized initial momenta (i.e., proper velocities, gamma*v) # t0 = initial time when the EM-wave hits the particle (can be thought of as phase of laser) # z0 = initial position of the particle g0 = np.sqrt(1. + np.square(ux0) + np.square(uy0) + np.square(uz0)) bx0 = ux0 / g0 by0 = uy0 / g0 bz0 = uz0 / g0 phi0 = t0 - z0 # Solve for the final value of s for the desired final value of time def t_haines(s): return ( 1. / (2 * g0 * (1 - bz0)) * (0.5 * np.square(a0) * s + np.square(a0) / (4 * g0 * (1 - bz0)) * (np.sin(2 * g0 * (1 - bz0) * s + 2 * phi0) - np.sin(2 * phi0)) + 2 * a0 * (g0 * bx0 - a0 * np.cos(phi0)) / (g0 * (1 - bz0)) * (np.sin(g0 * (1 - bz0) * s + phi0) - np.sin(phi0)) + np.square(g0 * bx0 - a0 * np.cos(phi0)) * s + s + np.square(g0 * by0) * s) - 0.5 * g0 * (1 - bz0) * s + g0 * (1 - bz0) * s - tf) sf = optimize.root_scalar(t_haines, x0=0, x1=tf).root s = np.linspace(0, sf, 1000) x = a0 / (g0 * (1 - bz0)) * (np.sin(g0 * (1 - bz0) * s + phi0) - np.sin(phi0) ) - a0 * s * np.cos(phi0) + g0 * bx0 * s z = 1. / (2 * g0 * (1 - bz0)) * ( 0.5 * np.square(a0) * s + np.square(a0) / (4 * g0 * (1 - bz0)) * (np.sin(2 * g0 * (1 - bz0) * s + 2 * phi0) - np.sin(2 * phi0)) + 2 * a0 * (g0 * bx0 - a0 * np.cos(phi0)) / (g0 * (1 - bz0)) * (np.sin(g0 * (1 - bz0) * s + phi0) - np.sin(phi0)) + np.square(g0 * bx0 - a0 * np.cos(phi0)) * s + s + np.square(g0 * by0) * s) - 0.5 * g0 * (1 - bz0) * s t = z + g0 * (1 - bz0) * s px = a0 * (np.cos(g0 * (1 - bz0) * s + phi0) - np.cos(phi0)) + g0 * bx0 pz = 1. / (2 * g0 * (1 - bz0)) * ( np.square(-a0 * (np.cos(g0 * (1 - bz0) * s + phi0) - np.cos(phi0)) - g0 * bx0) + 1 + np.square(g0 * by0)) - 0.5 * g0 * (1 - bz0) g = np.sqrt(1 + np.square(px) + np.square(pz)) return [t, x, z, px, pz, g]
def find_beta_deltaE(meanE_over_deltaE): # x = np.linspace(-100,100,10000) # plt.plot(x, np.vectorize(fn_for_beta)(x, meanE_over_deltaE)) # plt.show() if meanE_over_deltaE == 0: x0 = 1e-6 x1 = -1e-6 else: x0 = 2*meanE_over_deltaE x1 = meanE_over_deltaE sol = optimize.root_scalar(fn_for_beta, args=(meanE_over_deltaE), x0 = x0, x1 = x1) # print(sol) return sol.root
def UTransform(self, a, b, eta, u, G_inv, lam): g = np.zeros(len(u)) nu = lambda x: x - lam * a * (a / (1 - eta) * x + b)**(eta - 1) for i in range(len(u)): g[i] = optimize.root_scalar( lambda x: nu(x) - G_inv[i], method='bisect', bracket=[-b * (1 - eta) / a + 1e-5, 100]).root return g
def get_ray_1guess(minfn, odefn, rmax, z0, zm, dr, boundguess): lb, rb = get_bounds_1guess(boundguess, odefn, rmax, z0, zm, dr) if (lb == None and rb == None): return None, None else: minsol = root_scalar( minfn, args=(odefn, hit_bot, rmax, z0, zm, dr), bracket=[ lb, rb ]) #, options={'xtol':1e-12, 'rtol':1e-12, 'maxiter':int(1e4)}) print(minsol.converged, minsol.flag) odesol = shoot_ray(odefn, hit_top, 0, rmax, minsol.root, z0, dr) return odesol, rb
def calculate_decay_rate_trunc(average_x_truncated, configuration_space, fv, tv): root_result = root_scalar(V, bracket=[fv, tv]) b = root_result.root truncated_ensemble_idx = range(len(average_x_truncated)) count = 0 for idx in truncated_ensemble_idx: for lattice_point in configuration_space[idx]: if lattice_point < b: count += 1 break decay_rate = count / len(truncated_ensemble_idx) print(decay_rate)
def run_check_by_name(self, name, smoothness=0, **kwargs): a = .5 b = sqrt(3) xtol = 4*np.finfo(float).eps rtol = 4*np.finfo(float).eps for function, fname in zip(tstutils_functions, tstutils_fstrings): if smoothness > 0 and fname in ['f4', 'f5', 'f6']: continue r = root_scalar(function, method=name, bracket=[a, b], x0=a, xtol=xtol, rtol=rtol, **kwargs) zero = r.root assert_(r.converged) assert_allclose(zero, 1.0, atol=xtol, rtol=rtol, err_msg='method %s, function %s' % (name, fname))
def test_halley_by_name(self): r"""Invoke halley through root_scalar()""" for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='halley', x0=3, fprime=f_1, fprime2=f_2, xtol=1e-6) assert_allclose(f(r.root), 0, atol=1e-6)