Beispiel #1
0
 def eccentric_anomaly_at_mean_anomaly(self,M):
     otype = self.orbit_type
     if otype is elliptic:
         e = self.eccentricity
         def f(E,M,e=e):
             return E - e * sin(E) - M
         def dfdE(E,M,e=e):
             return 1 - e * cos(E)
         if hasattr(M,'__iter__'):
             E = np.array([opt.newton(f,π,args=(m,),fprime=dfdE) \
                           for m in M])
         else:
             E = opt.newton(f,π,args=(M,),fprime=dfdE)
         return E
     elif otype.isopen:
         e = self.eccentricity
         def f(F,M,e=e):
             return e * sinh(F) - F - M
         def dfdF(F,M,e=e):
             return e * cosh(F) - 1
         if hasattr(M,'__iter__'):
             F = np.array([opt.newton(f,π,args=(m,),fprime=dfdF) \
                           for m in M])
         else:
             F = opt.newton(f,π,args=(M,),fprime=dfdF)
         return F
Beispiel #2
0
 def setup(self, vec, meth):
     if vec == 'loop':
         if meth == 'newton':
             self.fvec = lambda f, x0, args, fprime, fprime2: [
                 newton(f, x, args=(a0, a1) + args[2:], fprime=fprime)
                 for (x, a0, a1) in zip(x0, args[0], args[1])
             ]
         elif meth == 'halley':
             self.fvec = lambda f, x0, args, fprime, fprime2: [
                 newton(
                     f, x, args=(a0, a1) + args[2:], fprime=fprime,
                     fprime2=fprime2
                 ) for (x, a0, a1) in zip(x0, args[0], args[1])
             ]
         else:
             self.fvec = lambda f, x0, args, fprime, fprime2: [
                 newton(f, x, args=(a0, a1) + args[2:]) for (x, a0, a1)
                 in zip(x0, args[0], args[1])
             ]
     else:
         if meth == 'newton':
             self.fvec = lambda f, x0, args, fprime, fprime2: newton(
                 f, x0, args=args, fprime=fprime
             )
         elif meth == 'halley':
             self.fvec = newton
         else:
             self.fvec = lambda f, x0, args, fprime, fprime2: newton(
                 f, x0, args=args
             )
Beispiel #3
0
def eqSolve() :
    ta = 10.
    y = [ 1. ]
    x = [ .5 ]
    for i in xrange(int(solveRange[1] / h)) :
        y.append(newton(yi,x[i],args=(y[i]),maxiters=10000))
        x.append(newton(xi,x[i],args=(y[i]),maxiters=10000))
 def fan_state(self,theta,p,d,M,alpha,leftright_in):
     leftright = {'left':-1,'right':1}
     M_fan = opt.newton(self.fan_mach_func,M,args=(theta,M,alpha,leftright_in))
     p_fan = p*opt.newton(self.fan_eta_func,p,args=(M_fan,M))
     d_fan = d*(p_fan/p)**(1./self.gamma)
     alpha_fan = alpha + leftright[leftright_in]*(
         self.Prandtl_Meyer(M_fan)-self.Prandtl_Meyer(M))
     return p_fan,alpha_fan,d_fan,M_fan
def active_and_irradiated_powerlaw_opacity(a,Mstar,Mdot,Rstar,Tstar,
                          diskMassFrac=0.01,kappa0_r=2.5e-4,kappa0_p=2.5e-4):
    '''Calculate T vs. R for a disk including contributions from irradiation
    (from the star and the accretion shock onto the star), and from viscous
    dissapation. The equation I use is a modification of eq 18 in Kratter et al. 2008, that
    also incorporates the contribution of the accretion shock irradiation.
    The equation is:

    \sigmaT_{disk}^4=(((8/6)*Sigma*kappa0_r*T^2)+(1/(2*Sigma*kappa0_p*T^2)))*F_{nu}+F_{irr},

    where F_{nu} is the flux from an active disk defined elsewhere in these codes (see
    Lacc.L_active), and F_{irr}=F_{star}+F_{acc}

    Inputs:
    a            -  The radius, in [AU], at which to find the temperature.
    Mstar        -  The mass of the central object in [Msolar]
    Mdot         -  The accretion rate, in [Msolar/year], onto the central object
    Rstar        -  The radius, in [Rsolar], of the central star
    Tstar        -  The effective temperature of the central object
    diskMassFrac -  The total mass of the circumstellar disk as a [fraction of Mstar]
    kappa0_r     -  The normalization of the Rosseland opacity powerlaw
    Kappa0_p     -  The normalization of the Planck opacity powerlaw
    '''

    a_au=a
    a_cgs=a*au2cm
    Rstar_sol=Rstar
    Rstar_cgs=Rstar*Rsolar2cm
    Mstar_sol=Mstar
    Mstar_cgs=Mstar*Msolar2g
    Mdot_sol=Mdot
    Mdot_cgs=Mdot*Msolar2g/year2second

    alphas=alpha(a_au,Rstar_sol,Tstar,Mstar_sol)#radians...
    Sigmar=Sigmar_func(Mstar_sol*diskMassFrac,a_au,beta=-1,rin=0.005,rout=100.)#g/cm^2

    Lstar=4*np.pi*((Rstar_cgs)**2)*stephan_boltzmann*Tstar**4#ergs/s
    Lacc=Lacc_func(Mstar_sol,Rstar_sol,Mdot_sol)#ergs/s

    F_active=(3/(8*np.pi))*(Mdot_cgs)*(Omega_func(Mstar_sol,a_au)**2)*(1-np.sqrt(Rstar_cgs/a_cgs))#ergs/s/cm^2

    A=(8./6.)*Sigmar*kappa0_r*F_active
    B=(0.5/(Sigmar*kappa0_p))*F_active
    C=(alphas/2.)*(Lstar+Lacc)/(4*np.pi*a_cgs**2)
    first_guesses=hydrostatic_disk(Mstar_sol,Rstar_sol,Tstar,a_au)

    Tr=np.array([],dtype=float)
    for params in zip(A,B,C,first_guesses):
        def func(T):
            #return (stephan_boltzmann*T**4)-(params[0]*T**2)-(params[1]*T**-2)-params[2]
            return (stephan_boltzmann*T**4)-(params[0]*T**2)-params[2]
        try:
            temp=newton(func,params[-1])
        except:
            temp=newton(func,100.*params[-1])
            
        Tr=np.r_[Tr,temp]
    return Tr
Beispiel #6
0
def mixture_from_disparity(rect_from, rect_to, disparity, match_prob, match_decay, match_max, disp_prob, disp_decay, disp_max):
	w, h = rect_from.shape[1], rect_from.shape[0]
	match_norm = (1 - exp(-match_decay)) / (1 - exp(-match_decay * match_max))
	disp_norm = (1 - exp(-disp_decay)) / (1 - exp(-disp_decay * disp_max))
	
	# The conditional probability of matching errors
	new_match_max, wd_sum, wd_weighted_sum, wd_count, xr = 0.0, 0.0, 0.0, 0.0, 0
	for y in range(h):
		for x in range(w):
			xr = x - disparity[y, x]
			if xr >= 0 and xr < w and rect_from[y, x] < 256 and rect_to[y, xr] < 256:
				match_delta = abs(rect_from[y, x] - rect_to[y, xr])
				if match_delta > new_match_max:
					new_match_max = match_delta
				tmp = match_prob * match_norm * exp(-match_decay * match_delta)
				wd = tmp / (tmp + (1 - match_prob) / match_max)
				wd_sum += wd
				wd_weighted_sum += wd * match_delta
				wd_count += 1

	match_max = new_match_max + 1
	match_prob = wd_sum / wd_count
	match_y = wd_weighted_sum / wd_sum
	match_decay = opt.newton(lambda decay: 1 / (exp(decay) - 1) - 1 / (exp(decay * match_max) - 1) - match_y, log(1 / match_y + 1))
	
	# The conditional probability of disparity differences
	new_disp_max, wp_sum, wp_weighted_sum, wp_count = 0.0, 0.0, 0.0, 0.0
	for y in range(h):
		for x in range(w):
			xr = x - disparity[y, x]
			if xr >= 0 and xr < w and rect_from[y, x] < 256 and rect_to[y, xr] < 256:
				if x != w - 1:
					disp_delta = abs(disparity[y, x] - disparity[y, x + 1])
					if disp_delta > new_disp_max:
						new_disp_max = disp_delta
					tmp = disp_prob * disp_norm * exp(-disp_decay * disp_delta)
					wp = tmp / (tmp + (1 - disp_prob) / disp_max)
					wp_sum += wp
					wp_weighted_sum += wp * disp_delta
					wp_count += 1
				if y != h - 1:
					disp_delta = abs(disparity[y, x] - disparity[y + 1, x])
					if disp_delta > new_disp_max:
						new_disp_max = disp_delta
					tmp = disp_prob * disp_norm * exp(-disp_decay * disp_delta)
					wp = tmp / (tmp + (1 - disp_prob) / disp_max)
					wp_sum += wp
					wp_weighted_sum += wp * disp_delta
					wp_count += 1
	
	disp_max = new_disp_max + 1
	disp_prob = wp_sum / wp_count
	disp_y = wp_weighted_sum / wp_sum
	disp_decay = opt.newton(lambda decay: 1 / (exp(decay) - 1) - 1 / (exp(decay * disp_max) - 1) - disp_y, log(1 / disp_y + 1))
	
	return array([match_prob, match_decay, match_max, disp_prob, disp_decay, disp_max])
	
def minimum_Ns(x,arg='mutation'):
    if arg=='variance':
        var = x
        guess = sqrt(2*var/lambertw(2*var)).real
        return newton(lambda x: condition(x,var), guess)
    elif arg=='mutation':
        NUd = x
        guess = NUd/lambertw(NUd).real
        return newton(lambda x: condition(x,NUd*x), guess)
Beispiel #8
0
def getmass_zfourge(N, z):
    if isinstance(N, np.ndarray) or isinstance(N, list):
        mass = np.zeros([len(N)])
        for i, elem in enumerate(N):
            mass[i] = newton(getnum_zfourge, 10., args=(z,elem))
    else:
        mass = newton(getnum_zfourge, 10., args=(z,N))

    return mass
Beispiel #9
0
    def mass_from_density(self, cum_num_dens, redshift, type='IllustrisCMF'):
        """ Calculate the stellar mass from a cum num dens by inverting the CMF """
        args = (redshift, cum_num_dens)
        if type=='IllustrisCMF':
            mass = newton(self.cmf_fit, 10.0, args=args)
        elif type=='MillenniumCDMF':
            mass = newton(self.mil_cdmf_fit, 10.0, args=args)

        return mass
Beispiel #10
0
def solve_statics_Mach(Mach, Pt, gamt, ht, s, Tt, W):
    '''Calculate the statics based on Mach'''
    out = [None] # Makes out[0] a reference
    def f(Ps):
        out[0] = solve_statics_Ps(Ps=Ps, s=s, Tt=Tt, ht=ht, W=W)
        return out[0].Mach - Mach
    Ps_guess = Pt * (1.0 + (gamt - 1.0) / 2.0 * Mach ** 2) ** (gamt / (1.0 - gamt))
    newton(f, Ps_guess)
    return out[0]
    def calculate(self, voltage_estimation):

        if self.__number_of_iterations == None:
            solution = optimize.newton(self.__function, voltage_estimation)
        # solution = optimize.fsolve(self.__function, voltage_estimation)
        else:
            solution = optimize.newton(self.__function, voltage_estimation, maxiter=self.__number_of_iterations)
        # solution = optimize.fsolve(self.__function, voltage_estimation, maxfev = self.__number_of_iterations)

        return solution
Beispiel #12
0
def calc_field(state_e, state_g, freq, unc=None, init=10):
    init = init / 1e4
    state_e = levels[state_e]
    state_g = levels[state_g]
    trans = lambda B: state_e(B) - state_g(B)
    B0 = newton(lambda B: trans(B) - freq, init) * 1e4
    if unc is None:
        return B0
    Delta_B = abs(newton(lambda B: trans(B) - freq - unc, B0) -
                  newton(lambda B: trans(B) - freq + unc, B0)) * 1e4 / 2
    return B0, Delta_B
Beispiel #13
0
def get_Watson_muKappa_ML(X):
    # This function obtains both efficiently and checking the sign and that
    n,d = X.shape
    a = 0.5
    c = float(d)/2
    
    S = np.dot(X.T,X)   # Correlation
    S = S/n             # Not really necesarry

    # Get eigenvalues to obtain the mu
    D,V = np.linalg.eig(S) # Obtain eigenvalues D and vectors V
    
    print D
    
    d_pos = np.argmax(D)
    d_min = np.argmin(D)
    print d_pos, d_min
    ## We first assume it is positive if not we change the mu
    if (D[0] == D[1]):
        print "Warning: Eigenvalue1 = EigenValue2 in MLmean estimation"
    if (D[-1] == D[-2]):
        print "Warning: Eigenvaluep = EigenValuep-1 in MLmean estimation"

    ## We solve the positive and the negative situations and output the one with
    ## the highest likelihood ? 

    mu_pos = V[:,d_pos]  # This is the vector with the highest lambda
    mu_neg = V[:,d_min]  # This is the vector with the lowest lambda
    
    r_pos = np.dot(mu_pos.T,S).dot(mu_pos)
    r_neg = np.dot(mu_neg.T,S).dot(mu_neg)
#    print r

    # General aproximation
    BGG_pos = (c*r_pos -a)/(r_pos*(1-r_pos)) + r_pos/(2*c*(1-r_pos))
#    kappa_pos = BGG_pos
    kappa_pos = newton(get_kappaNewton, BGG_pos, args=([d,r_pos],))

    BGG_neg = (c*r_neg -a)/(r_neg*(1-r_neg)) + r_neg/(2*c*(1-r_neg))
#    kappa_neg = BGG_neg
    kappa_neg = newton(get_kappaNewton, BGG_neg, args=([d,r_neg],))
    
    likelihood_pos = np.sum(Wad.Watson_pdf_log(X.T,mu_pos,kappa_pos))
    likelihood_neg = np.sum(Wad.Watson_pdf_log(X.T,mu_neg,kappa_neg))
    
    print likelihood_pos, likelihood_neg
    print kappa_pos, kappa_neg
    if (likelihood_pos >=likelihood_neg):
        kappa = kappa_pos
        mu = mu_pos
    else:
        kappa = kappa_neg
        mu = mu_neg
    return mu, kappa
    def Get_k(self, init_guess = 1, tol=1.48e-8, maxiter_newton=50):
        """ Use secant method (function optimize.newton()) to solve system for k"""        
        try:
            k = optimize.newton(lambda k: self.integral_lambda(k) - 1, init_guess, tol=tol, maxiter=maxiter_newton) #Integral(lambda(x,k)) == 1
        except RuntimeError as e:
            # Failed to converge after the first <maxiter_newton> iterations.
            print(e)
            new_init = self.growth_rate            
            print("Solving again with initial guess of", new_init)
            k = optimize.newton(lambda k: self.integral_lambda(k) - 1, new_init, tol=tol, maxiter=maxiter_newton) #Integral(lambda(x,k)) == 1    

        return k
Beispiel #15
0
def basin(coefficient_list):
    """
    This function uses root solving methods to produce a plot of the basin's
    of attraction for a given quadratic polynomial.

    Inputs:
        coefficient_list: A list with three elements. The first is the
                         coefficient is the number in front of the x**2 term,
                         the second is the coefficent for the x term, and the
                         third is the constant term.

    Outputs:
        roots: These are the roots of the polynomial.
        plot: A plot is automatically generated that shows the quadratic
              polynomial along with the basin of attraction for each of the two
              roots.
    """

    func = np.poly1d(coefficient_list)
    roots = func.r
    if np.iscomplex(roots).any():
        print "Sorry there are complex roots. Try different coefficents"
        return []

    tries = np.linspace(min(roots) - 2, max(roots) + 2, 100)
    ans_root1 = []
    ans_root2 = []

    for i in range(0, tries.size):
        if (optim.newton(func, tries[i]) -.001 < roots[0]
            < optim.newton(func, tries[i]) +.001):
            ans_root1.append(0)
        else:
            ans_root2.append(0)

    ans_root1 = np.array(ans_root1)
    ans_root2 = np.array(ans_root2)

    plt.figure()
    plt.plot(tries,func(tries),'g')
    plt.plot(tries[0:ans_root1.size], ans_root1, 'bo',
             label = str('x -> ' + str(roots[0])))

    plt.plot(tries[ans_root1.size:], ans_root2, 'ro',
             label = str('x -> ' + str(roots[1])))

    plt.legend(loc = 0)

    plt.show()

    return roots
Beispiel #16
0
 def get_negative_mom_crv(self, angle):
     """Return yield and ultimate curvature."""
     self.set_vals(angle)
     eps_y = self.plateref.f_y / self.plateref.E_s
     self.eps_s = eps_y
     brentq(f=self.get_N, a=0.0, b=self.plateref.eps_cu1, args=("eps_c",))
     yld = (self.moment, self.curvature)
     self.eps_c = self.plateref.eps_cu1
     newton(func=self.get_N, x0=eps_y, args=("eps_s",))
     if self.eps_s > self.plateref.eps_su:
         self.eps_s = self.plateref.eps_su
         brentq(f=self.get_N, a=0.0, b=self.plateref.eps_cu1, args=("eps_c",))
     ult = (self.moment, self.curvature)
     return yld + ult
  def test_secant_finds_all_roots_using_float32(self):
    f = lambda x: (3 * x**2 - 1) / 2.

    x0, x1 = -5, 2
    guess = tf.constant([x0, x1], dtype=tf.float32)

    tolerance = 1e-8
    roots, value_at_roots, _ = self.evaluate(
        tfp.math.secant_root(f, guess, position_tolerance=tolerance))

    expected_roots = [optimize.newton(f, x0), optimize.newton(f, x1)]
    zeros = [0., 0.]

    self.assertAllClose(roots, expected_roots, atol=tolerance)
    self.assertAllClose(value_at_roots, zeros)
  def test_secant_finds_all_roots_from_one_initial_position(self):
    f = lambda x: (63 * x**5 - 70 * x**3 + 15 * x) / 8.

    x0, x1 = -1, 10
    guess = tf.constant([x0, x1], dtype=tf.float64)

    tolerance = 1e-8
    roots, value_at_roots, _ = self.evaluate(
        tfp.math.secant_root(f, guess, position_tolerance=tolerance))

    expected_roots = [optimize.newton(f, x0), optimize.newton(f, x1)]
    zeros = [0., 0.]

    self.assertAllClose(roots, expected_roots, atol=tolerance)
    self.assertAllClose(value_at_roots, zeros)
Beispiel #19
0
    def Ftube(self, Vi, Ri):
        """calculate output voltage of a tube circuit as function of input voltage
        Vi  input voltage
        Ri  value of resistor Ri
        """

        def fi(Vgk, Vi, Ri):
            return Vi - Vgk - Ri * self.Igk_Vgk(Vgk)  # sum of voltages -> zero

        Vgk = newton(fi, self.Igk_Vgk(0), args=(Vi, Ri))  # Vgk(Vi)

        def fp(Vpk, Vgk, Ipk):
            return Vpk + self.Rp * Ipk(Vgk, Vpk) - self.Vp

        return newton(fp, self.Vp / 2, args=(Vgk, self.Ipk))  # Vpk(Vgk)
def impliedTree( V, callput, S, K, r, T, q = 0., t = 0, params = {'stepCount' : 200}):
    #print params 
    #print V, callput, S, K, r, T, q
    def Tree( sigma ):
        a = lrtree( callput, S, K, r*365, T/365., sigma, q*365, t, params)
        return float(a[0]) - V
    if callput == 1 and S - K < V: 
        #print "line 188"
        vol = newton(Tree, 0.5,None,() , 1.e-08, 100, None)
    elif callput == -1 and K - S < V: 
        vol = newton(Tree, 0.5,None,() , 1.e-08, 100, None)
    else: 
        return np.nan
    #vol = brentq(Tree, 0.003, 4, args = (), xtol = 1.e-08)
    return vol / math.sqrt(365)
Beispiel #21
0
def calc(M1,M2):
    G = 6.67E-11 #duh
    a = 5E8 #meters, separation
    MS = 1.989E30 #kg, Sun
    ME = 5.972E24
    M1 *= MS #kg, primary
    M2 *= MS #kg, secondary
    r1 = M2*a/(M1+M2) #meters, M1 distance from CM
    r2 = a - r1 #meters, M2 distance from CM
    w = np.sqrt(G*(M1+M2) / a**3) #rad/sec, orbital angular velocity

    dx = 0.01 #resolution
    x = np.arange(-2,2+dx,dx)*a
    y = -np.arange(-2,2+dx,dx)*a

    #define proper matrices
    x2 = np.asarray([x**2 for i in x])
    y2 = np.transpose(x2)

    #convert to polar coordinates
    r = np.sqrt(x2 + y2)
    t = [[np.arctan2(i,j) for j in x] for i in y]

    #distance from M1 and M2 to point (r,t)
    s1 = np.sqrt(r1**2 + r**2 + 2*r1*r*np.cos(t))
    s2 = np.sqrt(r2**2 + r**2 - 2*r2*r*np.cos(t))

    #effective gravitational potential
    phi = -G*(M1/s1 + M2/s2) - 0.5*w*w*r*r

    #convert everything to be in nice units
    x /= a
    y /= a
    r1 /= a
    r2 /= a
    phi /= G*(M1+M2)/a

    #calculate lagrange points and contour levels
    L1 = (r2+newton(func,1,fprime=func_deriv,args=(-1,1,M1,M2)),0)
    L2 = (r2+newton(func,1,fprime=func_deriv,args=(1,1,M1,M2)),0)
    L3 = (r2+newton(func,-1,fprime=func_deriv,args=(-1,-1,M1,M2),maxiter=10000),0)
    L4 = ((M1-M2)/(M1+M2)/2,np.sqrt(3)/2)
    L5 = ((M1-M2)/(M1+M2)/2,-np.sqrt(3)/2)
    phi1 = (-G*(M1/((r1+L1[0])*a) + M2/((r2-L1[0])*a)) - 0.5*w*w*L1[0]*L1[0]*a*a) / (G*(M1+M2)/a)
    phi2 = (-G*(M1/((r1+L2[0])*a) + M2/((L2[0]-r2)*a)) - 0.5*w*w*L2[0]*L2[0]*a*a) / (G*(M1+M2)/a)
    phi3 = (-G*(M1/((-L3[0]-r1)*a) + M2/((r2-L3[0])*a)) - 0.5*w*w*L3[0]*L3[0]*a*a) / (G*(M1+M2)/a)

    return (x,y,phi,r1,r2,L1,L2,L3,L4,L5,phi1,phi2,phi3)
 def Adiabats(self, pt, pmin, pmax, species = [], num = 200, endpoint = True):
     plev = logspace(log10(pmin), log10(pmax), num = num, endpoint = endpoint)
     result = amap(lambda p: 
             newton(lambda v: self.PotentialT(v, p, species) - pt, 
                 pt * (p / self.Pref)**(self.Rdry / self.cpdry)),
             plev)
     return plev, result
def P(phi, phib, df):
	""" Numerically solve for partition coefficient as a
	    function of \phi_s """
	if f(0,phi,phib,df)*f(1,phi,phib,df) < 0:
		return opt.bisect(f, 0, 1, args=(phi,phib,df), maxiter=500) # Bisection method
	else:
		return opt.newton(f, 1.0, args=(phi,phib,df), maxiter=5000) # Newton-Raphson
def __find_optimal_position(D):
    f = __make_pos_eqn(D)
    R = 0.5 * D

    x0 = newton(f, R / sqrt(2.0))

    return x0
Beispiel #25
0
 def map_dac_to_deflection(self, dac):
     defl = 0
     if self.use_deflection:
         c = self._deflection_correction_factors[:]
         c[-1] -= dac
         defl = optimize.newton(poly1d(c), 1)
     return defl
Beispiel #26
0
def preos_reverse(molecule, T, f, plotcubic=False, printresults=True):
    """
    Reverse Peng-Robinson equation of state (PREOS) to obtain pressure for a particular fugacity
    :param molecule: Molecule molecule of interest
    :param T: float temperature in Kelvin
    :param f: float fugacity in bar
    :param plotcubic: bool plot cubic polynomial in compressibility factor
    :param printresults: bool print off properties

    Returns a Dict() of molecule properties at this T and f.
    """
    # build function to minimize: difference between desired fugacity and that obtained from preos
    def g(P):
        """
        :param P: pressure
        """
        return (f - preos(molecule, T, P, plotcubic=False, printresults=False)["fugacity(bar)"])

    # Solve preos for the pressure
    P = newton(g, f)  # pressure

    # Obtain remaining parameters
    pars = preos(molecule, T, P, plotcubic=plotcubic, printresults=printresults)
    rho = pars["density(mol/m3)"]
    fugacity_coeff = pars["fugacity_coefficient"]
    z = pars["compressibility_factor"]

    return {"density(mol/m3)": rho, "fugacity_coefficient": fugacity_coeff,
            "compressibility_factor": z, "pressure(bar)": P,
            "molar_volume(L/mol)": 1.0 / rho * 1000.0}
Beispiel #27
0
    def find_next_crack(self):
        '''Finds the crack openings given W = sum(w_i) and crack list.
        an iterative procedure of solving non-linear equations'''
        def residuum(W):
            '''Callback method for the identification of the
            next emerging crack calculated as the difference between
            the current matrix stress and strength. See the scipy newton call above.
            '''
            self.W = W
            if self.W <= 0.0 or self.W > 0.3 * len(self.CB_objects_lst):
                min_strength = np.min(self.matrix_strength)
                residuum = min_strength - self.CB_model.E_c * self.W / self.length
            else:
                residuum = np.min(self.matrix_strength - self.eps_m * self.CB_model.E_m)
            return residuum

        W_pre_crack = newton(residuum, self.CB_objects_lst[-1].pre_cracking_W)
        position_new_crack = self.x_arr[np.argmin(self.matrix_strength - self.eps_m * self.CB_model.E_m)]         
        sigmac_pre_crack = self.sorted_CB_lst[0].get_sigma_c(self.w_lst[0])
        if len(self.CB_objects_lst) > 2:
            plt.plot(self.x_arr, self.eps_f, color='red', lw=2)
            plt.plot(self.x_arr, self.eps_m, color='blue', lw=2)
            plt.plot(self.x_arr, self.matrix_strength / self.CB_model.E_m, color='black', lw=2)
            plt.title('lm')
            plt.show()
#         for i, w in enumerate(self.w_lst):
#             print self.sorted_CB_lst[i].get_sigma_c(w)
        #epsc = (np.sum(np.array(w_lst)) + np.trapz(self.sigma_m(w_lst=w_lst),self.x_arr) / self.CB_model.E_m) / self.length
        return sigmac_pre_crack, position_new_crack, W_pre_crack, self.w_lst
Beispiel #28
0
def exact_solution(x, time, mesh_size):
    u_exact = np.zeros(mesh_size)

    for i in (range(mesh_size)):
        u_exact[i] = sco.newton(exact_func, 0.5, fprime=exact_func_prime, args=(x[i], time), maxiter=5000)

    return u_exact
def calc_floorheating(Qh, tm, Qh0, tsh0, trh0, Af):
    nh =0.2
    if Qh > 0:
        tsh0 = tsh0 + 273
        trh0 = trh0 + 273
        tm = tm + 273
        mCw0 = Qh0 / (tsh0 - trh0)
        # minimum
        k1 = 1 / mCw0

        # simple calculation based on SIA 2044, which in turn is based on EMPA's book on TABS
        R_tabs = 0.08       # m2-K/W from SIA 2044
        A_tabs = 0.8 * Af   # m2
        H_tabs = A_tabs / R_tabs

        def fh(x):
            Eq = mCw0 * k2 - (x+k2-tm) * H_tabs
            return Eq

        k2 = Qh * k1
        result = sopt.newton(fh, trh0, maxiter=1000, tol=0.1) - 273
        trh = result.real
        tsh = trh + k2
        mCw = Qh / (tsh - trh)
    else:
        mCw = 0
        tsh = 0
        trh = 0
    return tsh, trh, mCw # C,C, W/C
Beispiel #30
0
def vSpinodal(sigma,alpha,m):
	"""The actual Spinodal generating function which calls vspin
	1. Feed some initial values into the function
	2. Call the function
	3. Phivals picks the range of phi
	4. Given your selected phi, sigma; guess a psi, and try to minimize f2, once f2 is zero, return that psi
	5. Feed back into flory function
	"""
	#Range of Phi
	phivals = np.arange(1e-2,0.10,0.001)

	i=0
	xvals = np.zeros((len(phivals)))
	for phi in phivals:

		#Guess Parameters
		x0 = np.zeros((1))

		#Guess psi
		x0.fill(0.01)

		#Call the solver, return a value of psi
		xvals[i] = newton(vspin, x0, args = (phi,sigma, alpha, m))
		i += 1
	return phivals,xvals
Beispiel #31
0
rmin = 0.90 * r0  # minimum r value
r[0] = rmin
phi[0] = 0

# total energy for the system
E = m2 * g * rmin + Mz**2 / (2 * m1 * rmin**2)
print('E =', E)


# determine rmax by solving cubic equation
def f(rmax, m1, m2, g, Mz):
    y = E - (m2 * g * rmax + Mz**2 / (2 * m1 * rmax**2))
    return y


rmax = optimize.newton(f, 1.1 * r0, args=(m1, m2, g, Mz))
print('rmin =', rmin)
print('r0 =', r0)
print('rmax =', rmax)

# plot effective potential
rs = np.linspace(0.2, 1.2 * rmax, 1000)
Ueff = m2 * g * rs + Mz**2 / (2 * m1 * rs**2)
plt.figure()
plt.plot(rs, Ueff)
plt.axhline(E, color='grey', ls='dashed')
plt.axvline(rmin, color='grey')
plt.axvline(rmax, color='grey')
plt.xlabel('r')
plt.ylabel('Ueff')
plt.savefig('Ueff_small_oscillations.pdf')
Beispiel #32
0
def ar1(x):
    r"""Allen and Smith AR(1) model estimation.
    Syntax: g, a, mu2 = ar1(x)

    Input:  x - time series (univariate).

    Output: g - estimate of the lag-one autocorrelation.
        a - estimate of the noise variance.
        mu2 - estimated square on the mean.

    AR1 uses the algorithm described by Allen and Smith 1995, except that
    Matlab's 'fzero' is used rather than Newton-Raphson.

    Fzero in general can be rather picky - although I haven't had any problem
    with its implementation here, I recommend occasionally checking the output
    against the simple estimators in AR1NV.

    Alternative AR(1) estimatators: ar1cov, ar1nv, arburg, aryule

    Written by Eric Breitenberger.      Version 1/21/96
    Please send comments and suggestions to [email protected]
    Updated,optimized&stabilized by Aslak Grinsted 2003-2005.
    """

    N = len(x)
    m = x.mean()
    x = x - m

    # Lag zero and one covariance estimates:
    c0 = np.dot(x, x / N)
    c1 = np.dot(x[0:N - 1], x[1:N]) / (N - 1)

    g0 = c1 / c0  # Initial estimate for gamma.

    # Optimize gammest.
    def gammest(gin):
        r"""Used by AR1 to compute a function for minimization by fzero.

        Written by Eric Breitenberger.  Version 1/21/96
        Please send comments and suggestions to [email protected]
        """

        gk = np.arange(1, N)
        gk = gin**gk
        mu2 = (1.0 / N) + (2.0 / N**2.0) * np.sum(np.arange(N - 1, 0, -1) * gk)
        gout = (1.0 - g0) * mu2 + g0 - gin
        if gout > 1:
            gout = np.NaN

        return gout

    # Find g by getting zero of `gammest`.
    # There are tons of optimizition algorithms in SciPy.  I'm not sure which
    # compares better with fzero.
    g = optimize.newton(gammest, g0, tol=0.0001)

    gk = np.arange(1, N)
    gk = g**gk
    mu2 = (1.0 /
           N) + (1.0 / N**2.0) * 2.0 * np.sum(np.arange(N - 1, 0, -1) * gk)
    c0est = c0 / (1.0 - mu2)
    a = np.sqrt((1.0 - g**2) * c0est)

    return g, a, mu2
Beispiel #33
0
def group_lasso(X,
                y,
                alpha,
                groups,
                max_iter=MAX_ITER,
                rtol=1e-6,
                verbose=False):
    """
    Linear least-squares with l2/l1 regularization solver.

    Solves problem of the form:

               .5 * |Xb - y| + n_samples * alpha * Sum(w_j * |b_j|)

    where |.| is the l2-norm and b_j is the coefficients of b in the
    j-th group. This is commonly known as the `group lasso`.

    Parameters
    ----------
    X : array of shape (n_samples, n_features)
        Design Matrix.

    y : array of shape (n_samples,)

    alpha : float or array
        Amount of penalization to use.

    groups : array of shape (n_features,)
        Group label. For each column, it indicates
        its group apertenance.

    rtol : float
        Relative tolerance. ensures ||(x - x_) / x_|| < rtol,
        where x_ is the approximate solution and x is the
        true solution.

    Returns
    -------
    x : array
        vector of coefficients

    References
    ----------
    "Efficient Block-coordinate Descent Algorithms for the Group Lasso",
    Qin, Scheninberg, Goldfarb
    """

    # .. local variables ..
    X, y, groups, alpha = map(np.asanyarray, (X, y, groups, alpha))
    if len(groups) != X.shape[1]:
        raise ValueError("Incorrect shape for groups")
    w_new = np.zeros(X.shape[1], dtype=X.dtype)
    alpha = alpha * X.shape[0]

    # .. use integer indices for groups ..
    group_labels = [np.where(groups == i)[0] for i in np.unique(groups)]
    H_groups = [np.dot(X[:, g].T, X[:, g]) for g in group_labels]
    eig = map(linalg.eigh, H_groups)
    Xy = np.dot(X.T, y)
    initial_guess = np.zeros(len(group_labels))

    def f(x, qp2, eigvals, alpha):
        return 1 - np.sum(qp2 / ((x * eigvals + alpha)**2))

    def df(x, qp2, eigvals, penalty):
        # .. first derivative ..
        return np.sum((2 * qp2 * eigvals) / ((penalty + x * eigvals)**3))

    if X.shape[0] > X.shape[1]:
        H = np.dot(X.T, X)
    else:
        H = None

    for n_iter in range(max_iter):
        w_old = w_new.copy()
        for i, g in enumerate(group_labels):
            # .. shrinkage operator ..
            eigvals, eigvects = eig[i]
            w_i = w_new.copy()
            w_i[g] = 0.
            if H is not None:
                X_residual = np.dot(H[g], w_i) - Xy[g]
            else:
                X_residual = np.dot(X.T, np.dot(X[:, g], w_i)) - Xy[g]
            qp = np.dot(eigvects.T, X_residual)
            if len(g) < 2:
                # for single groups we know a closed form solution
                w_new[g] = -np.sign(X_residual) * max(
                    abs(X_residual) - alpha, 0)
            else:
                if alpha < linalg.norm(X_residual, 2):
                    initial_guess[i] = optimize.newton(f,
                                                       initial_guess[i],
                                                       df,
                                                       tol=.5,
                                                       args=(qp**2, eigvals,
                                                             alpha))
                    w_new[g] = -initial_guess[i] * np.dot(
                        eigvects / (eigvals * initial_guess[i] + alpha), qp)
                else:
                    w_new[g] = 0.

        # .. dual gap ..
        max_inc = linalg.norm(w_old - w_new, np.inf)
        if True:  #max_inc < rtol * np.amax(w_new):
            residual = np.dot(X, w_new) - y
            group_norm = alpha * np.sum(
                [linalg.norm(w_new[g], 2) for g in group_labels])
            if H is not None:
                norm_Anu = [linalg.norm(np.dot(H[g], w_new) - Xy[g]) \
                           for g in group_labels]
            else:
                norm_Anu = [linalg.norm(np.dot(H[g], residual)) \
                           for g in group_labels]
            if np.any(norm_Anu > alpha):
                nnu = residual * np.min(alpha / norm_Anu)
            else:
                nnu = residual
            primal_obj = .5 * np.dot(residual, residual) + group_norm
            dual_obj = -.5 * np.dot(nnu, nnu) - np.dot(nnu, y)
            dual_gap = primal_obj - dual_obj
            if verbose:
                print 'Relative error: %s' % (dual_gap / dual_obj)
            if np.abs(dual_gap / dual_obj) < rtol:
                break

    return w_new
Beispiel #34
0
def _estimate_dofs(resp,
                   gamma_priors,
                   current_dofs,
                   n_features,
                   max_iter=100,
                   tol=1e-3):
    """Estimates the degrees-of-freedom.

    Parameters
    ----------
    resp : array-like, shape (n_samples, n_components)
        The responsibilities for each data sample in X.

    gamma_priors : array-like, shape (n_samples, n_components)
        The gamma priors for each data sample in X.

    current_dofs : array-like, shape (n_components,)
        Current degrees-of-freedom estimation.

    n_features : int
        Number of features.

    max_iter : int, defaults to 100.
        The number of iterations to perform for degrees-of-freedom estimation.

    tol : float, defaults to 1e-3.
        The degrees-of-freedom estimation convergence threshold.

    Returns
    -------
    dofs : array-like, shape (n_components,)
        The degrees-of-freedom estimation.
    """
    n_components = current_dofs.shape[0]
    dofs = np.zeros((n_components, ))
    for k in range(n_components):
        constant = (
            1.0 + (1.0 / resp[:, k].sum(axis=0)) *
            (resp[:, k] *
             (np.log(gamma_priors[:, k]) - gamma_priors[:, k])).sum(axis=0) +
            digamma(1.0 * (current_dofs[k] + n_features) / 2.0) -
            np.log(1.0 * (current_dofs[k] + n_features) / 2.0))

        def function(df):
            return -digamma(1.0 * df / 2.0) + np.log(1.0 * df / 2.0) + constant

        def first_derivative(df):
            return -(1.0 / 2.0) * polygamma(1, 1.0 * df / 2.0) + 1.0 / df

        def second_derivative(df):
            return -(1.0 / 4.0) * polygamma(2,
                                            1.0 * df / 2.0) - 1.0 / (df * df)

        dofs[k] = newton(
            function,
            current_dofs[k],
            first_derivative,
            args=(),
            maxiter=max_iter,
            tol=tol,
            fprime2=second_derivative,
            full_output=False,
            disp=False,
        )

    return dofs
Beispiel #35
0

upper_bound_eos = []
upper_bound_pc = []
lower_bound_eos = []
lower_bound_pc = []

baryon_density1 = 1.85 * 0.16
baryon_density2 = 3.2 * 0.16
pressure1 = 30
pressure2 = causality_p2(pressure1)
Preset_Pressure_final = 1e-8
Preset_rtol = 1e-6
pressure3 = opt.newton(caulality_central_pressure_at_peak,
                       trial_p3(pressure1, pressure2),
                       tol=0.1,
                       args=(pressure1, pressure2, Preset_Pressure_final,
                             Preset_rtol))
upper_bound_eos.append(
    EOS_BPSwithPoly([
        baryon_density0, pressure1, baryon_density1, pressure2,
        baryon_density2, pressure3, baryon_density3
    ]))
upper_bound_pc.append(
    Maxmass(Preset_Pressure_final, 1e-4, upper_bound_eos[-1])[1])
print upper_bound_eos[-1].args
pressure1 = 8.4
pressure2, pressure3, pressure_center = p2p3_ofmaxmass(2.0, Maxmass,
                                                       Preset_Pressure_final,
                                                       Preset_rtol, pressure1)
lower_bound_eos.append(
Beispiel #36
0
import numpy as np
import scipy.optimize as sp


def solve(a):
    return np.sin(a) - (5 * a / 6)


scl = sp.newton(solve, 1.5)

print(f'Solução {scl} rad')

a = 5 / np.sin(scl)
print(f'raio (a) = {a} cm')

OC = np.cos(scl) * a
print(f'OC = {OC} cm')

print(f'CD = {a - OC} cm')
Beispiel #37
0
    def getQP(self,
              e0,
              bandmin=None,
              bandmax=None,
              debug=False,
              secant=True,
              braket=None):
        """
        Get quasiparticle states
    
        Arguments:
        e0 -> bare eigenvalues in eV
        """
        from scipy.optimize import bisect, newton
        from scipy.interpolate import interp1d
        from scipy.misc import derivative

        #check if the eigenvalues have the correct dimensions
        if len(e0) != self.nqps:
            raise ValueError('Wrong dimensions in bare eigenvalues')

        #in case something is strange we plot the stuff
        def error(nqp):
            ax = plt.gca()

            #plot 0
            ax.axhline(0, c='k', lw=1)

            #se limits
            semin = min(self.se[nqp].real)
            semax = max(self.se[nqp].real)
            plt.ylim(semin, semax)

            #plot self energy
            self.plot(ax, nqp=nqp)

            #plot omega-e0
            emin = min(self.energies[nqp].real)
            emax = max(self.energies[nqp].real)
            x = np.linspace(emin, emax, 100)
            plt.plot(x, x - e0[nqp])

            #plot imaginary part of greens funciton
            x = self.energies[nqp].real
            y = self.green[nqp].imag
            plt.plot(x, y / max(y) * semax)

            #plot eqp
            #plt.axvline(self.eqp[nqp],lw=1)
            #plt.axvline(e0[nqp],lw=1)

            plt.legend(frameon=False)
            plt.show()

        if bandmin is None: bandmin = self.bandmin
        if bandmax is None: bandmax = self.bandmax

        self.eqp = np.zeros([self.nqps], dtype=complex)
        self.z = np.zeros([self.nqps], dtype=complex)
        for nqp in range(self.nqps):

            band = self.band1[nqp]
            kpt = self.kindex[nqp]
            if debug: print("%3d %3d %3d %8.4lf" % (nqp, kpt, band, e0[nqp]))

            if not (bandmin <= band <= bandmax):
                continue

            #get x and y
            x = self.energies[nqp].real
            y = self.se[nqp]

            #interpolate real part of function
            f = interp1d(x, y.real - x + e0[nqp], kind='slinear')

            #find zero
            if secant:
                try:
                    eqp = newton(f, e0[nqp], maxiter=200)
                except ValueError as msg:
                    print(msg)
                    if debug: error(nqp)
            else:
                if braket:
                    emin = e0[nqp] - braket
                    emax = e0[nqp] + braket
                else:
                    emin = min(x)
                    emax = max(x)

                eqp = bisect(f, emin, emax)

            #interpolate whole function
            f = interp1d(x, y)

            #calculate Z factors
            dse = derivative(f, eqp, dx=1e-8)
            z = 1. / (1 - dse)

            #find Im(Se(EQP)) which corresponds to the lifetime
            lif = f(eqp).imag
            eqp += 1j * lif

            #store values
            self.eqp[nqp] = eqp
            self.z[nqp] = z

            #cehck for potential errors
            if z > 1 and debug:
                print(z)
                error(nqp)

        return self.eqp, self.z
Beispiel #38
0
 def doselfconsistent( self ):
 
     iteration = 0
     u_diff = 1.0
     convergence_threshold = 1e-5
     print "RHF energy =", self.ints.fullEhf
     
     while ( u_diff > convergence_threshold ):
     
         iteration += 1
         print "DMET iteration", iteration
         umat_old = np.array( self.umat, copy=True )
         rdm_old = self.transform_ed_1rdm() # At the very first iteration, this matrix will be zero
         
         # Find the chemical potential for the correlated impurity problem
         start_ed = time.time()
         if (( self.method == 'CC' ) and ( self.CC_E_TYPE == 'CASCI' )):
             self.mu_imp = 0.0
             self.doexact( self.mu_imp )
         else:
             self.mu_imp = optimize.newton( self.numeleccostfunction, self.mu_imp )
             print "   Chemical potential =", self.mu_imp
         stop_ed = time.time()
         self.time_ed += ( stop_ed - start_ed )
         print "   Energy =", self.energy
         # self.verify_gradient( self.square2flat( self.umat ) ) # Only works for self.doSCF == False!!
         if ( self.SCmethod != 'NONE' and not(self.altcostfunc) ):
             self.hessian_eigenvalues( self.square2flat( self.umat ) )
         
         # Solve for the u-matrix
         start_cf = time.time()
         if ( self.altcostfunc and self.SCmethod == 'BFGS' ):
             result = optimize.minimize( self.alt_costfunction, self.square2flat( self.umat ), jac=self.alt_costfunction_derivative, options={'disp': False} )
             self.umat = self.flat2square( result.x )
         elif ( self.SCmethod == 'LSTSQ' ):
             result = optimize.leastsq( self.rdm_differences, self.square2flat( self.umat ), Dfun=self.rdm_differences_derivative, factor=0.1 )
             self.umat = self.flat2square( result[ 0 ] )
         elif ( self.SCmethod == 'BFGS' ):
             result = optimize.minimize( self.costfunction, self.square2flat( self.umat ), jac=self.costfunction_derivative, options={'disp': False} )
             self.umat = self.flat2square( result.x )
         self.umat = self.umat - np.eye( self.umat.shape[ 0 ] ) * np.average( np.diag( self.umat ) ) # Remove arbitrary chemical potential shifts
         if ( self.altcostfunc ):
             print "   Cost function after convergence =", self.alt_costfunction( self.square2flat( self.umat ) )
         else:
             print "   Cost function after convergence =", self.costfunction( self.square2flat( self.umat ) )
         stop_cf = time.time()
         self.time_cf += ( stop_cf - start_cf )
         
         # Possibly print the u-matrix / 1-RDM
         if self.print_u:
             self.print_umat()
         if self.print_rdm:
             self.print_1rdm()
         
         # Get the error measure
         u_diff   = np.linalg.norm( umat_old - self.umat )
         rdm_diff = np.linalg.norm( rdm_old - self.transform_ed_1rdm() )
         self.umat = self.relaxation * umat_old + ( 1.0 - self.relaxation ) * self.umat
         print "   2-norm of difference old and new u-mat =", u_diff
         print "   2-norm of difference old and new 1-RDM =", rdm_diff
         print "******************************************************"
         
         if ( self.SCmethod == 'NONE' ):
             u_diff = 0.1 * convergence_threshold # Do only 1 iteration
     
     print "Time cf func =", self.time_func
     print "Time cf grad =", self.time_grad
     print "Time dmet ed =", self.time_ed
     print "Time dmet cf =", self.time_cf
     
     return self.energy
#!/usr/bin/env python
# this program help to determine the zero of a function in a certain interval
import scipy.optimize as sc #this module this module contain the brentq fonction which determine the root of a function

def fonction(x):
	return x**3+x**2-x+3
x0=-5
a=-5
b=10
print sc.brentq(fonction,a,b) 
print sc.bisect(fonction,a,b) 
print sc.newton(fonction,x0) 
Beispiel #40
0
 def solver(f, x):
     if method == "scipy":
         root = newton(f, x)
     else:
         raise NotImplementedError("Unknown method.")
     return root
Beispiel #41
0
def get_bond_slip(s_arr, tau_pi_bar=10, Ad=0.5, s0=5e-3, G=36000.0):
    '''for plotting the bond slip relationship-Non analytical
    '''
    # arrays to store the values
    # nominal stress
    tau_arr = np.zeros_like(s_arr)
    # sliding stress
    tau_pi_arr = np.zeros_like(s_arr)
    # damage factor
    w_arr = np.zeros_like(s_arr)
    # sliding slip
    xs_pi_arr = np.zeros_like(s_arr)

    # material parameters
    # shear modulus [MPa]
    G = G
    # damage - brittleness [MPa^-1]
    Ad = Ad
    # Kinematic hardening modulus [MPa]
    gamma = 0
    # constant in the sliding threshold function
    tau_pi_bar = tau_pi_bar

    Z = lambda z: 1. / Ad * (-z) / (1 + z)

    # damage - Threshold
    s0 = s0
    Y0 = 0.5 * G * s0**2
    # damage function

    # state variables
    tau_pi_i = 0
    alpha_i = 0.
    xs_pi_i = 0
    z_i = 0.
    w_i = 0.  # damage
    X_i = gamma * alpha_i

    def Fw(s_i, s_pi_i, w_i, dw):
        Yw_i = 0.5 * G * s_i**2
        Ypi_i = 0.5 * G * (s_i - s_pi_i)**2
        Y_i = Yw_i + Ypi_i
        fw = Yw_i - (Y0 + Z(z_i))
        return fw

    for i in range(1, len(s_arr)):
        print 'increment', i
        s_i = s_arr[i]
        ds_i = s_i - s_arr[i - 1]
        Yw_i = 0.5 * G * s_i**2
        # damage threshold function
        Ypi_i = 0.5 * G * (s_i - xs_pi_i)**2
        Y_i = Yw_i + Ypi_i
        fw = Yw_i - (Y0 + Z(z_i))
        #fw = Y_i - (Y0 + Z(z_i))
        # in case damage is activated
        fw_newton = Fw(s_i, xs_pi_i, w_i, 0)
        print 'fw', fw, fw_newton

        if fw > 1e-8:
            dw = 0
            dw_newton = newton(lambda dw: Fw(s_i, xs_pi_i, w_i, dw), 0)
            f_dw = dw - G * (s_i**2) * Ad * (1 + z_i - dw)**2
            # implicit equation of damage evolution
            it = 0
            while abs(f_dw) > 1e-10:
                # Newton-Raphson scheme
                it += 1
                f_dw = dw - G * \
                    (s_i) * (s_i - s_arr[i - 1]) * Ad * (1 + z_i - dw)**2
                d_f_dw = 1 + 2 * G * (s_i**2) * Ad * (1 + z_i - dw)
                dw_new = dw - (f_dw / d_f_dw)
                dw = dw_new
            print 'obtained', dw, dw_newton

            w_i = w_i + dw
            z_i = -w_i

        print 'w = ', w_i
        tau_pi_i = w_i * G * (s_i - xs_pi_i)
        f_pi_i = np.fabs(tau_pi_i - X_i) - tau_pi_bar

        if f_pi_i > 1e-6:
            # Return mapping
            d_lamda = f_pi_i / (w_i * G + gamma)
            tau_pi_i = tau_pi_i - w_i * G * d_lamda * np.sign(tau_pi_i - X_i)
            xs_pi_i = s_i - (tau_pi_i / (w_i * G))
            X_i = X_i + gamma * d_lamda * np.sign(tau_pi_i - X_i)

        # update all the state variables

        tau = (1 - w_i) * G * s_i + w_i * G * (s_i - xs_pi_i)
        tau_arr[i] = tau
        tau_pi_arr[i] = tau_pi_i
        w_arr[i] = w_i
        xs_pi_arr[i] = xs_pi_i

        print 'stress =', tau
        print 'sliding - stress =', tau_pi_i
        print 'strain =', s_i
        # print 'sliding strain ' , xs_pi_i
        # print 'total strain ' , s_i
        print '------------------------ '

    return s_arr, tau_arr, tau_pi_arr, w_arr, xs_pi_arr
 def iter_func(guess):
     # guess_updated = (2 * guess ** 3 + 1) / (3 * guess ** 2 - 3)
     guess_updated = newton(objective_func, guess, maxiter=1, tol=1e10)
     # guess_updated = np.sin(guess * 5) / (5 * np.cos(guess * 5))
     # guess_updated = np.sin(guess * 5) / (5 * np.cos(guess * 5))
     return guess_updated
def simulate_inhomogeneous_poisson(t0, t1, lambda_rate_fn,
                                   integrated_lambda_rate_fn, M):
    """ Simulation of homogeneours Poisson process in [t0, t1]
    
    Parameters
    ----------
    t0 : float
        Initial time for the simulation
    t1 : float
        Final time for the simulation
    lambda_rate_fn: callable
        Function of t that retruns the instantaneous rate
    integrated_lambda_rate_fn: callable
       Function of `s` and `t` that returns the integrated rate in [s,t]
    M: int
        Number of count sequences in simulation
            
    Returns
    -------
    times: list of M lists 
        Simulation consisting of M sequences (lists) of arrival times.
 
    Example 1
    ---------

    >>> from scipy.integrate import quad
    >>> from scipy.stats import expon
    >>> import matplotlib.pyplot as plt
    >>> import stochastic_plots as stoch
    >>> import arrival_processs_simulation as arrival

    >>> t0 = 0.0
    >>> t1 = 2000.0
    >>> M = 3 
    >>> lambda_rate = 0.5
    >>> beta_scale = 1.0/lambda_rate
    >>> def lambda_rate_fn(t): return lambda_rate
    >>> def integrated_lambda_rate_fn(s,t): return (lambda_rate * (t-s))

    >>> arrival_times = arrival.simulate_inhomogeneous_poisson(t0, t1, 
                                               lambda_rate_fn, 
                                               integrated_lambda_rate_fn, 
                                               M)
    >>> fig, axs = plt.subplots(M, sharex=True, num=1, figsize=(10,8))
    >>> for m in range(M):
            axs[m].bar(arrival_times[m], np.ones_like(arrival_times[m]))
    >>> 
    >>> interarrival_times = np.diff(arrival_times[0])
    >>> def pdf(x): return expon.pdf(x, loc = 0.0, scale = beta_scale)

    >>> stoch.plot_pdf(interarrival_times, pdf, fig_num=2)
    >>> _ = plt.xlabel('t')
    >>> _ = plt.ylabel('pdf(t)')

    Example 2
    ---------

    >>> from scipy.integrate import quad
    >>> from scipy.stats import expon
    >>> import matplotlib.pyplot as plt
    >>> import stochastic_plots as stoch
    >>> import arrival_process_simulation as arrival

    >>> t0 = 0.0
    >>> t1 = 2000.0
    >>> M = 3 
    >>> def lambda_rate_fn(t): return 1.0001 + np.sin(0.01*t)
    >>> def integrated_lambda_rate_fn(s,t): return quad(lambda_rate_fn, s, t)[0]

    >>> arrival_times = arrival.simulate_inhomogeneous_poisson(t0, t1, 
                                               lambda_rate_fn, 
                                               integrated_lambda_rate_fn, 
                                               M)
    >>> fig, axs = plt.subplots(M+2, sharex=True, num=1, figsize=(10,8))
    >>> for m in range(M):
            axs[m].bar(arrival_times[m], np.ones_like(arrival_times[m]))

    >>> n_plot = 1000
    >>> t_plot = np.linspace(t0,t1,n_plot)
    >>> _ = axs[M].hist(arrival_times[0], bins=50, density=True)
    >>> _ = axs[M+1].plot(t_plot, lambda_rate_fn(t_plot))
    
    """

    arrival_times = [[] for _ in range(M)]

    for m in np.arange(M):
        arrival_time = t0

        while True:
            target = np.random.exponential(1)

            def f_target(t):
                return integrated_lambda_rate_fn(arrival_time, t) - target

            arrival_time = newton(f_target,
                                  x0=arrival_time,
                                  fprime=lambda_rate_fn)

            if (arrival_time > t1):
                break

            arrival_times[m].append(arrival_time)

    return arrival_times
Beispiel #44
0
def kernel(gw,
           mo_energy,
           mo_coeff,
           orbs=None,
           kptlist=None,
           nw=None,
           verbose=logger.NOTE):
    '''
    GW-corrected quasiparticle orbital energies
    Returns:
        A list :  converged, mo_energy, mo_coeff
    '''
    mf = gw._scf
    assert (gw.frozen is 0 or gw.frozen is None)

    nmoa, nmob = gw.nmo
    nocca, noccb = gw.nocc
    nvira = nmoa - nocca
    nvirb = nmob - noccb

    if orbs is None:
        orbs = range(nmoa)
    if kptlist is None:
        kptlist = range(gw.nkpts)

    nkpts = gw.nkpts
    nklist = len(kptlist)
    norbs = len(orbs)

    # v_xc
    dm = np.array(mf.make_rdm1())
    v_mf = np.array(mf.get_veff())
    vj = np.array(mf.get_j(dm_kpts=dm))
    v_mf[0] = v_mf[0] - (vj[0] + vj[1])
    v_mf[1] = v_mf[1] - (vj[0] + vj[1])
    for s in range(2):
        for k in range(nkpts):
            v_mf[s, k] = reduce(
                numpy.dot,
                (mo_coeff[s, k].T.conj(), v_mf[s, k], mo_coeff[s, k]))

    # v_hf from DFT/HF density
    if gw.fc:
        exxdiv = 'ewald'
    else:
        exxdiv = None
    uhf = scf.KUHF(gw.mol, gw.kpts, exxdiv=exxdiv)
    uhf.with_df = gw.with_df
    uhf.with_df._cderi = gw.with_df._cderi
    vk = uhf.get_veff(gw.mol, dm_kpts=dm)
    vj = uhf.get_j(gw.mol, dm_kpts=dm)
    vk[0] = vk[0] - (vj[0] + vj[1])
    vk[1] = vk[1] - (vj[0] + vj[1])
    for s in range(2):
        for k in range(nkpts):
            vk[s,
               k] = reduce(numpy.dot,
                           (mo_coeff[s, k].T.conj(), vk[s, k], mo_coeff[s, k]))

    # Grids for integration on imaginary axis
    freqs, wts = _get_scaled_legendre_roots(nw)

    # Compute self-energy on imaginary axis i*[0,iw_cutoff]
    sigmaI, omega = get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=5.)

    # Analytic continuation
    coeff_a = []
    coeff_b = []
    if gw.ac == 'twopole':
        for k in range(nklist):
            coeff_a.append(AC_twopole_diag(sigmaI[0, k], omega[0], orbs,
                                           nocca))
            coeff_b.append(AC_twopole_diag(sigmaI[1, k], omega[1], orbs,
                                           noccb))
    elif gw.ac == 'pade':
        for k in range(nklist):
            coeff_a_tmp, omega_fit_a = AC_pade_thiele_diag(
                sigmaI[0, k], omega[0])
            coeff_b_tmp, omega_fit_b = AC_pade_thiele_diag(
                sigmaI[1, k], omega[1])
            coeff_a.append(coeff_a_tmp)
            coeff_b.append(coeff_b_tmp)
        omega_fit = np.asarray((omega_fit_a, omega_fit_b))
    coeff = np.asarray((coeff_a, coeff_b))

    conv = True
    # This code does not support metals
    h**o = -99.
    lumo = 99.
    mo_energy = np.asarray(mf.mo_energy)
    for k in range(nkpts):
        if h**o < max(mo_energy[0, k][nocca - 1], mo_energy[1, k][noccb - 1]):
            h**o = max(mo_energy[0, k][nocca - 1], mo_energy[1, k][noccb - 1])
        if lumo > min(mo_energy[0, k][nocca], mo_energy[1, k][noccb]):
            lumo = min(mo_energy[0, k][nocca], mo_energy[1, k][noccb])
    ef = (h**o + lumo) / 2.

    mo_energy = np.zeros_like(np.array(mf.mo_energy))
    for s in range(2):
        for k in range(nklist):
            kn = kptlist[k]
            for p in orbs:
                if gw.linearized:
                    # linearized G0W0
                    de = 1e-6
                    ep = mf.mo_energy[s][kn][p]
                    #TODO: analytic sigma derivative
                    if gw.ac == 'twopole':
                        sigmaR = two_pole(ep - ef, coeff[s, k, :,
                                                         p - orbs[0]]).real
                        dsigma = two_pole(
                            ep - ef + de,
                            coeff[s, k, :, p - orbs[0]]).real - sigmaR.real
                    elif gw.ac == 'pade':
                        sigmaR = pade_thiele(ep - ef, omega_fit[s,
                                                                p - orbs[0]],
                                             coeff[s, k, :, p - orbs[0]]).real
                        dsigma = pade_thiele(
                            ep - ef + de, omega_fit[s, p - orbs[0]],
                            coeff[s, k, :, p - orbs[0]]).real - sigmaR.real
                    zn = 1.0 / (1.0 - dsigma / de)
                    e = ep + zn * (sigmaR.real + vk[s, kn, p, p].real -
                                   v_mf[s, kn, p, p].real)
                    mo_energy[s, kn, p] = e
                else:
                    # self-consistently solve QP equation
                    def quasiparticle(omega):
                        if gw.ac == 'twopole':
                            sigmaR = two_pole(omega - ef,
                                              coeff[s, k, :, p - orbs[0]]).real
                        elif gw.ac == 'pade':
                            sigmaR = pade_thiele(omega - ef,
                                                 omega_fit[s, p - orbs[0]],
                                                 coeff[s, k, :,
                                                       p - orbs[0]]).real
                        return omega - mf.mo_energy[s][kn][p] - (
                            sigmaR.real + vk[s, kn, p, p].real -
                            v_mf[s, kn, p, p].real)

                    try:
                        e = newton(quasiparticle,
                                   mf.mo_energy[s][kn][p],
                                   tol=1e-6,
                                   maxiter=100)
                        mo_energy[s, kn, p] = e
                    except RuntimeError:
                        conv = False
    mo_coeff = mf.mo_coeff

    if gw.verbose >= logger.DEBUG:
        numpy.set_printoptions(threshold=nmoa)
        for k in range(nkpts):
            logger.debug(gw, '  GW mo_energy spin-up @ k%d =\n%s', k,
                         mo_energy[0, k])
        for k in range(nkpts):
            logger.debug(gw, '  GW mo_energy spin-down @ k%d =\n%s', k,
                         mo_energy[1, k])
        numpy.set_printoptions(threshold=1000)

    return conv, mo_energy, mo_coeff
Beispiel #45
0
def stretch_map(
    density_function: Callable[[float], float],
    positions: ndarray,
    coordinate_min: float,
    coordinate_max: float,
    geometry: str = 'None',
    coordinate: str = 'None',
) -> ndarray:
    """Stretch mapping.

    Deform a uniform particle distribution in one dimension with an
    arbitrary scalar function.

    Parameters
    ----------
    density_function
        The scalar function. This should be a function of one variable.
        It is best to vectorize it via numba.vectorize, or it will be
        slow. See notes below.
    positions
        The uniform particle positions in Cartesian form as a (N, 3)
        ndarray.
    coordinate_min
        The minimum coordinate value for the stretch mapping.
    coordinate_max
        The maximum coordinate value for the stretch mapping.
    geometry
        The geometry: either 'cartesian', 'cylindrical', or 'spherical'.
    coordinate
        The coordinate for the function. Options are: 'x', 'y', 'z',
        'r', 'phi', 'theta'.

    Returns
    -------
    ndarray
        The particle positions after the stretch mapping.

    Notes
    -----
    To make a fast numba function, write a function as if for a scalar
    value then decorate it with `@numba.vectorize([float64(float64)])`.
    For example

    >>> @numba.vectorize([float64(float64)])
    ... def my_func(x):
    ...     return 1 + np.sin(x) ** 2
    """
    if geometry == 'None':
        geometry = 'cartesian'
    geometry = geometry.lower()
    if geometry not in _GEOMETRIES:
        raise ValueError(
            '"geometry" must be in ("cartesian", "cylindrical", "spherical")')
    if coordinate == 'None':
        if geometry == 'cartesian':
            coordinate = 'x'
        elif geometry in ('cylindrical', 'spherical'):
            coordinate = 'r'
    coordinate = coordinate.lower()
    if coordinate not in ('x', 'y', 'z', 'r', 'phi', 'theta'):
        raise ValueError(
            '"coordinate" must be in ("x", "y", "z", "r", "phi", "theta")')
    if geometry == 'spherical' and coordinate == 'r':
        if coordinate_min < 0.0:
            raise ValueError('"coordinate_min" < 0.0 for radius: not physical')
        i_area_element = 3
    elif geometry == 'cylindrical' and coordinate == 'r':
        if coordinate_min < 0.0:
            raise ValueError('"coordinate_min" < 0.0 for radius: not physical')
        i_area_element = 2
    else:
        i_area_element = 1

    if i_area_element == 1:

        @numba.vectorize([float64(float64)])
        def rho_dS(x):
            rho_dS = density_function(x)
            return rho_dS

    elif i_area_element == 2:

        @numba.vectorize([float64(float64)])
        def rho_dS(x):
            rho_dS = 2 * np.pi * x * density_function(x)
            return rho_dS

    elif i_area_element == 3:

        @numba.vectorize([float64(float64)])
        def rho_dS(x):
            rho_dS = 4 * np.pi * x**2 * density_function(x)
            return rho_dS

    @numba.vectorize([float64(float64, float64)])
    def mass(x, x_min):
        _x = np.linspace(x_min, x)
        m = np.trapz(rho_dS(_x), _x)
        return m

    def func(x, x_min, x_max, x_original):
        f = mass(x, x_min) / mass(
            x_max, x_min) - (x_original - x_min) / (x_max - x_min)
        return f

    def dfunc(x, x_min, x_max, x_original):
        df = rho_dS(x) / mass(x_max, x_min)
        return df

    if geometry in ('cylindrical', 'spherical'):
        _positions: ndarray = coordinate_transform(position=positions,
                                                   geometry_from='cartesian',
                                                   geometry_to=geometry)
    else:
        _positions = np.copy(positions)

    x_original = _positions[:, 0]
    x_min = coordinate_min
    x_max = coordinate_max
    x_guess = x_original
    x_stretched = optimize.newton(func,
                                  x_guess,
                                  fprime=dfunc,
                                  args=(x_min, x_max, x_original))

    _positions[:, 0] = x_stretched
    if geometry in ('cylindrical', 'spherical'):
        positions = coordinate_transform(position=_positions,
                                         geometry_from=geometry,
                                         geometry_to='cartesian')
    else:
        positions = _positions
    return positions
Beispiel #46
0
def ImpliedVolatility(CP, marketPrice, K, T, S_0, r, initialVol=0.4):
    func = lambda sigma: np.power(BS_Call_Option_Price(CP,S_0,K,sigma,T,r) \
                                  - marketPrice, 1.0)
    impliedVol = optimize.newton(func, initialVol, tol=1e-7)

    return impliedVol
Beispiel #47
0
def causality_p2(p1):
    density1 = Density_i(p1, baryon_density1, pressure0, baryon_density0,
                         density0)[1]
    return opt.newton(causality_i,
                      200.,
                      args=(baryon_density2, p1, baryon_density1, density1))
Beispiel #48
0
def XIIR(values, dates):
    return newton(lambda r: xnpv(r, values, dates), 0)
def get_CP_TD_Law(eps, sigma_0, K, gamma, E, eps_0, Ad, m, a):

    sigma_arr = zeros_like(eps)

    eps_N_p_arr = zeros_like(eps)

    w_N_arr = zeros_like(eps)

    sigma_i = 0.0
    alpha_i = 0.0
    r_i = 0.0
    eps_N_p_i = 0.0

    w_i = 0.0
    z_i = 0.0

    for i in range(1, len(eps)):

        eps_i = eps[i]
        H = get_heviside(sigma_i)
        sigma_i = (1 - H * w_i) * E * (eps_i - eps_N_p_i)

        h = max(0., (sigma_0 + K * r_i ** (m + 1.0)))
        f_trial = abs(sigma_i - gamma * alpha_i) - h

        # plasticity yield function
        if f_trial > 1e-6:

            def f_dw_n(delta_lamda_p): return delta_lamda_p - f_trial / \
                (E + abs((m + 1.0) * r_i * K ** m) + gamma)
#             f_dw_n2 = lambda dw_n: 1 + (f_trial * abs((m + 1.0) * K ** m)) /\
#                 (E + abs((m + 1.0) * delta_lamda_p * K ** m) + gamma)**2.0
            delta_lamda_p = newton(
                f_dw_n, 0., tol=1e-6, maxiter=10)

#             delta_lamda_p = f_trial / \
#                 (E + abs((m + 1.0) * r_i * K ** m) + gamma)
            eps_N_p_i = eps_N_p_i + delta_lamda_p * \
                sign(sigma_i - gamma * alpha_i)
            r_i = r_i + delta_lamda_p
            alpha_i = alpha_i + \
                (delta_lamda_p * sign(sigma_i - gamma * alpha_i)) / (gamma * a)

#         Y_0 = 0.5 * E * eps_0**2.0
#         Y_N = 0.5 * H * E * (eps_i - eps_N_p_i)**2.0
#         Z_N = (1.0 / Ad) * (- z_i / (1 + z_i))
#         f_w_trial = Y_N - (Y_0 + Z_N)
#
#         # damage threshold
#         if f_w_trial > 1e-6:
#
#             #             delta_lamda_w = E * eps_i * Ad * \
#             #                 (1 + z_i)**2.0 * (eps[i] - eps[i - 1])
#
#             f_dw_n = lambda dw_n:  dw_n - E * \
#                 (eps_i) * (eps_i - eps[i - 1]) * Ad * (1 + z_i - dw_n) ** 2
#             f_dw_n2 = lambda dw_n: 1 + 2 * E * \
#                 (eps_i) * (eps_i - eps[i - 1]) * Ad * (1 + z_i - dw_n)
#             dw_n = newton(f_dw_n, 0., fprime=f_dw_n2, tol=1e-6, maxiter=50)
#
#             w_i = w_i + dw_n
#             z_i = z_i - dw_n

        def Z_N(z_N): return 1. / Ad * (-z_i) / (1 + z_i)
        Y_N = 0.5 * H * E * eps_i ** 2
        Y_0 = 0.5 * E * eps_0 ** 2
        f = Y_N - (Y_0 + Z_N(z_i))

        if f > 1e-6:
            def f_w(Y): return 1 - 1. / (1 + Ad * (Y - Y_0))

            w_i = f_w(Y_N)
            z_i = - w_i

        sigma_i = (1 - H * w_i) * E * (eps_i - eps_N_p_i)

        sigma_arr[i] = sigma_i
        eps_N_p_arr[i] = eps_N_p_i
        w_N_arr[i] = w_i

    return eps, sigma_arr, eps_N_p_arr, w_N_arr
Beispiel #50
0
 def F(self, F_t, dt_t):
     """
     Solve Equation of Green-Ampt Cumulative Infiltration __EqnF
     """
     F_t_next = lambda F: self.__EqnF(F_t, dt_t, F)
     return newton(F_t_next, 3)
import numpy as np

def f(x):
	return(np.sin(np.cos(np.exp(x))))
	

def df(x):
	return(-np.exp(x)*np.cos(np.cos(np.exp(x)))*np.sin(np.exp(x)))

from scipy import optimize

root = optimize.newton(f,-.1,fprime = df)

print("The obtained root is",root)
print("The value of the function at the calculated root",f(root))

print("The answer change if we change our initial guess beacuse Newton-Raphson method converge to a root depending on the initial guess")
    def run_one_step(self, dt, current_time=0.0, runoff_rate=None, **kwds):
        """Calculate water flow for a time period `dt`.
        """
        # Handle runoff rate
        if runoff_rate is None:
            runoff_rate = self.runoff_rate

        # If it's our first iteration, or if the topography may be changing,
        # do flow routing and calculate square root of slopes at links
        if self.changing_topo or self.first_iteration:

            # Calculate the ground-surface slope
            self.slope[self.grid.active_links] = \
                self._grid.calc_grad_at_link(self.elev)[self._grid.active_links]

            # Take square root of slope magnitude for use in velocity eqn
            self.sqrt_slope = np.sqrt(np.abs(self.slope))

            # Re-route flow, which gives us the downstream-to-upstream
            # ordering
            self.flow_accum.run_one_step()
            self.nodes_ordered = self.grid.at_node['flow__upstream_node_order']
            self.flow_lnks = self.grid.at_node['flow__links_to_receiver_nodes']

            # (Re)calculate, for each node, sum of sqrt(gradient) x width
            self.grad_width_sum[:] = 0.0
            for i in range(self.flow_lnks.shape[1]):
                self.grad_width_sum[:] += (
                    self.sqrt_slope[self.flow_lnks[:, i]] *
                    self._grid.width_of_face[self.grid.face_at_link[
                        self.flow_lnks[:, i]]])

            # Calculate values of alpha, which is defined as
            #
            #   $\alpha = \frac{\Sigma W S^{1/2} \Delta t}{A C_r}$
            cores = self.grid.core_nodes
            self.alpha[cores] = (
                self.vel_coef * self.grad_width_sum[cores] * dt /
                (self.grid.area_of_cell[self.grid.cell_at_node[cores]]))

        # Zero out inflow discharge
        self.disch_in[:] = 0.0

        # Upstream-to-downstream loop
        for i in range(len(self.nodes_ordered) - 1, -1, -1):
            n = self.nodes_ordered[i]
            if self.grid.status_at_node[n] == 0:

                # Solve for new water depth
                aa = self.alpha[n]
                cc = self.depth[n]
                ee = ((dt * runoff_rate) +
                      (dt * self.disch_in[n] /
                       self.grid.area_of_cell[self.grid.cell_at_node[n]]))
                self.depth[n] = newton(water_fn,
                                       self.depth[n],
                                       args=(aa, self.weight, cc,
                                             self.depth_exp, ee))

                # Calc outflow
                Heff = (self.weight * self.depth[n] + (1.0 - self.weight) * cc)
                outflow = (self.vel_coef * (Heff**self.depth_exp) *
                           self.grad_width_sum[n]
                           )  # this is manning/chezy/darcy

                # Send flow downstream. Here we take total inflow discharge
                # and partition it among the node's neighbors. For this, we use
                # the flow director's "proportions" array, which contains, for
                # each node, the proportion of flow that heads out toward each
                # of its N neighbors. The proportion is zero if the neighbor is
                # uphill; otherwise, it is S^1/2 / sum(S^1/2). If for example
                # we have a raster grid, there will be four neighbors and four
                # proportions, some of which may be zero and some between 0 and
                # 1.
                self.disch_in[self.grid.adjacent_nodes_at_node[n]] += (
                    outflow * self.flow_accum.flow_director.proportions[n])
Beispiel #53
0
    def _buildCurveUsing1DSolver(self):
        ''' Construct the discount curve using a bootstrap approach. This is
        the non-linear slower method that allows the user to choose a number
        of interpolation approaches between the swap rates and other rates. It
        involves the use of a solver. '''

        self._interpolator = FinInterpolator(self._interpType)
        self._times = np.array([])
        self._dfs = np.array([])

        # time zero is now.
        tmat = 0.0
        dfMat = 1.0
        self._times = np.append(self._times, 0.0)
        self._dfs = np.append(self._dfs, dfMat)
        self._interpolator.fit(self._times, self._dfs)

        for depo in self._usedDeposits:
            dfSettle = self.df(depo._startDate)
            dfMat = depo._maturityDf() * dfSettle
            tmat = (depo._maturityDate - self._valuationDate) / gDaysInYear
            self._times = np.append(self._times, tmat)
            self._dfs = np.append(self._dfs, dfMat)
            self._interpolator.fit(self._times, self._dfs)

        oldtmat = tmat

        for fra in self._usedFRAs:

            tset = (fra._startDate - self._valuationDate) / gDaysInYear
            tmat = (fra._maturityDate - self._valuationDate) / gDaysInYear

            # if both dates are after the previous FRA/FUT then need to
            # solve for 2 discount factors simultaneously using root search

            if tset < oldtmat and tmat > oldtmat:
                dfMat = fra.maturityDf(self)
                self._times = np.append(self._times, tmat)
                self._dfs = np.append(self._dfs, dfMat)
            else:
                self._times = np.append(self._times, tmat)
                self._dfs = np.append(self._dfs, dfMat)
                argtuple = (self, self._valuationDate, fra)
                dfMat = optimize.newton(_g,
                                        x0=dfMat,
                                        fprime=None,
                                        args=argtuple,
                                        tol=swaptol,
                                        maxiter=50,
                                        fprime2=None)

        for swap in self._usedSwaps:
            # I use the lastPaymentDate in case a date has been adjusted fwd
            # over a holiday as the maturity date is usually not adjusted CHECK
            maturityDate = swap._fixedLeg._paymentDates[-1]
            tmat = (maturityDate - self._valuationDate) / gDaysInYear

            self._times = np.append(self._times, tmat)
            self._dfs = np.append(self._dfs, dfMat)

            argtuple = (self, self._valuationDate, swap)

            dfMat = optimize.newton(_f,
                                    x0=dfMat,
                                    fprime=None,
                                    args=argtuple,
                                    tol=swaptol,
                                    maxiter=50,
                                    fprime2=None,
                                    full_output=False)

        if self._checkRefit is True:
            self._checkRefits(1e-10, swaptol, 1e-5)
Beispiel #54
0
    def _buildCurveLinearSwapRateInterpolation(self):
        ''' Construct the discount curve using a bootstrap approach. This is
        the linear swap rate method that is fast and exact as it does not
        require the use of a solver. It is also market standard. '''

        self._interpolator = FinInterpolator(self._interpType)

        self._times = np.array([])
        self._dfs = np.array([])

        # time zero is now.
        tmat = 0.0
        dfMat = 1.0
        self._times = np.append(self._times, 0.0)
        self._dfs = np.append(self._dfs, dfMat)

        for depo in self._usedDeposits:
            dfSettle = self.df(depo._startDate)
            dfMat = depo._maturityDf() * dfSettle
            tmat = (depo._maturityDate - self._valuationDate) / gDaysInYear
            self._times = np.append(self._times, tmat)
            self._dfs = np.append(self._dfs, dfMat)
            self._interpolator.fit(self._times, self._dfs)

        oldtmat = tmat

        for fra in self._usedFRAs:

            tset = (fra._startDate - self._valuationDate) / gDaysInYear
            tmat = (fra._maturityDate - self._valuationDate) / gDaysInYear

            # if both dates are after the previous FRA/FUT then need to
            # solve for 2 discount factors simultaneously using root search

            if tset < oldtmat and tmat > oldtmat:
                dfMat = fra.maturityDf(self)
                self._times = np.append(self._times, tmat)
                self._dfs = np.append(self._dfs, dfMat)
                self._interpolator.fit(self._times, self._dfs)
            else:
                self._times = np.append(self._times, tmat)
                self._dfs = np.append(self._dfs, dfMat)
                self._interpolator.fit(self._times, self._dfs)

                argtuple = (self, self._valuationDate, fra)
                dfMat = optimize.newton(_g,
                                        x0=dfMat,
                                        fprime=None,
                                        args=argtuple,
                                        tol=swaptol,
                                        maxiter=50,
                                        fprime2=None)

        if len(self._usedSwaps) == 0:
            if self._checkRefit is True:
                self._checkRefits(1e-10, swaptol, 1e-5)
            return

        #######################################################################
        # ADD SWAPS TO CURVE
        #######################################################################

        # Find where the FRAs and Depos go up to as this bit of curve is done
        foundStart = False
        lastDate = self._valuationDate
        if len(self._usedDeposits) != 0:
            lastDate = self._usedDeposits[-1]._maturityDate

        if len(self._usedFRAs) != 0:
            lastDate = self._usedFRAs[-1]._maturityDate

        # We use the longest swap assuming it has a superset of ALL of the
        # swap flow dates used in the curve construction
        longestSwap = self._usedSwaps[-1]
        couponDates = longestSwap._adjustedFixedDates
        numFlows = len(couponDates)

        # Find where first coupon without discount factor starts
        startIndex = 0
        for i in range(0, numFlows):
            if couponDates[i] > lastDate:
                startIndex = i
                foundStart = True
                break

        if foundStart is False:
            raise FinError("Found start is false. Swaps payments inside FRAs")

        swapRates = []
        swapTimes = []

        # I use the last coupon date for the swap rate interpolation as this
        # may be different from the maturity date due to a holiday adjustment
        # and the swap rates need to align with the coupon payment dates
        for swap in self._usedSwaps:
            swapRate = swap._fixedCoupon
            maturityDate = swap._adjustedFixedDates[-1]
            tswap = (maturityDate - self._valuationDate) / gDaysInYear
            swapTimes.append(tswap)
            swapRates.append(swapRate)

        interpolatedSwapRates = [0.0]
        interpolatedSwapTimes = [0.0]

        for dt in couponDates[1:]:
            swapTime = (dt - self._valuationDate) / gDaysInYear
            swapRate = np.interp(swapTime, swapTimes, swapRates)
            interpolatedSwapRates.append(swapRate)
            interpolatedSwapTimes.append(swapTime)

        # Do I need this line ?
        interpolatedSwapRates[0] = interpolatedSwapRates[1]

        accrualFactors = longestSwap._fixedYearFracs

        acc = 0.0
        df = 1.0
        pv01 = 0.0
        dfSettle = self.df(longestSwap._startDate)

        for i in range(1, startIndex):
            dt = couponDates[i]
            df = self.df(dt)
            acc = accrualFactors[i - 1]
            pv01 += acc * df

        for i in range(startIndex, numFlows):

            dt = couponDates[i]
            tmat = (dt - self._valuationDate) / gDaysInYear
            swapRate = interpolatedSwapRates[i]
            acc = accrualFactors[i - 1]
            pv01End = (acc * swapRate + 1.0)

            dfMat = (dfSettle - swapRate * pv01) / pv01End

            self._times = np.append(self._times, tmat)
            self._dfs = np.append(self._dfs, dfMat)
            self._interpolator.fit(self._times, self._dfs)

            pv01 += acc * dfMat

        if self._checkRefit is True:
            self._checkRefits(1e-10, swaptol, 1e-5)
Beispiel #55
0
def convolutional_barycenter_cpu(Hv, reg, alpha, stabThresh = 1e-30, niter = 1500, tol = 1e-9, sharpening = False, verbose = False):
    """Main function solving wasserstein barycenter problem using cpu

    Arguments:
        Hv {Set of distributions (ndarray)} -- 
        reg {regularization term "gamma"} -- float superior to 0, generally equals size of space/40
        alpha {list} -- set of weights

    Keyword Arguments:
        stabThresh {float} -- Stabilization threshold to prevent division by 0 (default: {1e-30})
        niter {int} -- Maximum number of loop iteration (default: {1500})
        tol {float} -- convergence tolerance at which point iterations stop (default: {1e-9})
        sharpening {bool} -- Whether or not entropic sharpening is used (default: {False})
        verbose {bool} --  verbose option

    Returns:
        ndarray -- solution of weighted wassertein barycenter problem
    """

    def K(x):
        
        return gaussian_filter(x,sigma=reg)
    
    def to_find_root(barycenter, H0, beta):
        return entropy(barycenter**beta) - H0
    
    alpha = np.array(alpha)/np.array(alpha).sum()
    Hv = np.array(Hv)
    mean_weights = (Hv[0].sum()*alpha[0]+Hv[1].sum()*alpha[1])

    #print('mean weights', mean_weights)
    for i in range(len(Hv)):

        Hv[i] = (Hv[i]-Hv[i].min())/Hv[i].sum()
        
    entropy_max = max_entropy(Hv)
    
    v = np.ones(Hv.shape)
    Kw = np.ones(Hv.shape)
    barycenter = np.zeros(Hv[0].shape)
    
    change = 1
    for j in range(niter):
        t0 = time.time()
        barycenterOld = barycenter
        
        barycenter = np.zeros_like(Hv[0, :, :])
        for i in range(Hv.shape[0]):

            
            Kw[i,:,:] = K(Hv[i, :, :] / np.maximum(stabThresh,K(v[i, :, :])) )
            
            
            barycenter += alpha[i] * np.log(np.maximum(stabThresh, v[i, :, :]*Kw[i, :, :]))
            

        barycenter = np.exp(barycenter)  
        change = np.sum(np.abs(barycenter-barycenterOld))
        
        if sharpening :
            if (entropy(barycenter)) > (entropy_max): 
                beta = newton(lambda beta : to_find_root(barycenter,entropy_max,beta), 1, tol=1e-6)
                if beta < 0 : 
                    beta = 1
            else :
                beta = 1
                
            barycenter = barycenter**beta
        
        for i in range(Hv.shape[0]):
            v[i, :, :] =  barycenter / np.maximum(stabThresh, Kw[i, :, :])
            
        if verbose : 
            print("iter : ",j , "change : ", change, "time :" , time.time()-t0)
            print("\n")
            print("\n")
        if change<tol :
            break

    return barycenter*mean_weights
Beispiel #56
0
def kernel(gw,
           mo_energy,
           mo_coeff,
           td_e,
           td_xy,
           eris=None,
           orbs=None,
           verbose=logger.NOTE):
    '''GW-corrected quasiparticle orbital energies

    Returns:
        A list :  converged, mo_energy, mo_coeff
    '''
    # mf must be DFT; for HF use xc = 'hf'
    mf = gw._scf
    assert (isinstance(mf, (dft.rks.RKS, dft.uks.UKS, dft.roks.ROKS,
                            dft.uks.UKS, dft.rks_symm.RKS, dft.uks_symm.UKS,
                            dft.rks_symm.ROKS, dft.uks_symm.UKS)))
    assert (gw.frozen == 0 or gw.frozen is None)

    if eris is None:
        eris = gw.ao2mo(mo_coeff)
    if orbs is None:
        orbs = range(gw.nmo)

    v_mf = mf.get_veff() - mf.get_j()
    v_mf = reduce(numpy.dot, (mo_coeff.T, v_mf, mo_coeff))

    nocc = gw.nocc
    nmo = gw.nmo
    nvir = nmo - nocc

    vk_oo = -np.einsum('piiq->pq', eris.oooo)
    vk_ov = -np.einsum('iqpi->pq', eris.ovoo)
    vk_vv = -np.einsum('ipqi->pq', eris.ovvo).conj()
    vk = np.block([[vk_oo, vk_ov], [vk_ov.T, vk_vv]])

    nexc = len(td_e)
    # factor of 2 for normalization, see tdscf/rhf.py
    td_xy = 2 * np.asarray(td_xy)  # (nexc, 2, nocc, nvir)
    td_z = np.sum(td_xy, axis=1).reshape(nexc, nocc, nvir)
    tdm_oo = einsum('via,iapq->vpq', td_z, eris.ovoo)
    tdm_ov = einsum('via,iapq->vpq', td_z, eris.ovov)
    tdm_vv = einsum('via,iapq->vpq', td_z, eris.ovvv)
    tdm = []
    for oo, ov, vv in zip(tdm_oo, tdm_ov, tdm_vv):
        tdm.append(np.block([[oo, ov], [ov.T, vv]]))
    tdm = np.asarray(tdm)

    conv = True
    mo_energy = np.zeros_like(gw._scf.mo_energy)
    for p in orbs:
        tdm_p = tdm[:, :, p]
        if gw.linearized:
            ep = gw._scf.mo_energy[p]
            sigma = get_sigma_element(gw, ep, tdm_p, tdm_p, td_e).real
            dsigma_dw = get_sigma_deriv_element(gw, ep, tdm_p, tdm_p,
                                                td_e).real
            zn = 1.0 / (1 - dsigma_dw)
            mo_energy[p] = ep + zn * (sigma.real + vk[p, p] - v_mf[p, p])
        else:

            def quasiparticle(omega):
                sigma = get_sigma_element(gw, omega, tdm_p, tdm_p, td_e)
                return omega - gw._scf.mo_energy[p] - (sigma.real + vk[p, p] -
                                                       v_mf[p, p])

            try:
                mo_energy[p] = newton(quasiparticle,
                                      gw._scf.mo_energy[p],
                                      tol=1e-6,
                                      maxiter=100)
            except RuntimeError:
                conv = False
                mo_energy[p] = gw._scf.mo_energy[p]
                logger.warn(
                    gw, 'Root finding for GW eigenvalue %s did not converge. '
                    'Setting it equal to the reference MO energy.' % (p))
    mo_coeff = gw._scf.mo_coeff

    if gw.verbose >= logger.DEBUG:
        numpy.set_printoptions(threshold=nmo)
        logger.debug(gw, '  GW mo_energy =\n%s', mo_energy)
        numpy.set_printoptions(threshold=1000)

    return conv, mo_energy, mo_coeff
Beispiel #57
0
def v_terminal(D, rhop, rho, mu, Method=None):
    r'''Calculates terminal velocity of a falling sphere using any drag
    coefficient method supported by `drag_sphere`. The laminar solution for
    Re < 0.01 is first tried; if the resulting terminal velocity does not
    put it in the laminar regime, a numerical solution is used.

    .. math::
        v_t = \sqrt{\frac{4 g d_p (\rho_p-\rho_f)}{3 C_D \rho_f }}

    Parameters
    ----------
    D : float
        Diameter of the sphere, [m]
    rhop : float
        Particle density, [kg/m^3]
    rho : float
        Density of the surrounding fluid, [kg/m^3]
    mu : float
        Viscosity of the surrounding fluid [Pa*s]
    Method : string, optional
        A string of the function name to use, as in the dictionary
        drag_sphere_correlations

    Returns
    -------
    v_t : float
        Terminal velocity of falling sphere [m/s]

    Notes
    -----
    As there are no correlations implemented for Re > 1E6, an error will be
    raised if the numerical solver seeks a solution above that limit.

    The laminar solution is given in [1]_ and is:

    .. math::
        v_t = \frac{g d_p^2 (\rho_p - \rho_f)}{18 \mu_f}

    Examples
    --------
    >>> v_terminal(D=70E-6, rhop=2600., rho=1000., mu=1E-3)
    0.004142497244531304

    References
    ----------
    .. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
       Eighth Edition. McGraw-Hill Professional, 2007.
    .. [2] Rushton, Albert, Anthony S. Ward, and Richard G. Holdich.
       Solid-Liquid Filtration and Separation Technology. 1st edition. Weinheim ;
       New York: Wiley-VCH, 1996.
    '''
    '''The following would be the ideal implementation. The actual function is
    optimized for speed, not readability
    def err(V):
        Re = rho*V*D/mu
        Cd = Barati_high(Re)
        V2 = (4/3.*g*D*(rhop-rho)/rho/Cd)**0.5
        return (V-V2)
    return fsolve(err, 1.)'''
    v_lam = g * D**2 * (rhop - rho) / (18 * mu)
    Re_lam = Reynolds(V=v_lam, D=D, rho=rho, mu=mu)
    if Re_lam < 0.01:
        return v_lam

    Re_almost = rho * D / mu
    main = 4 / 3. * g * D * (rhop - rho) / rho
    V_max = 1E6 / rho / D * mu  # where the correlation breaks down, Re=1E6

    def err(V):
        Cd = drag_sphere(Re_almost * V, Method=Method)
        return V - (main / Cd)**0.5

    # Begin the solver with 1/100 th the velocity possible at the maximum
    # Reynolds number the correlation is good for
    return float(newton(err, V_max / 100, tol=1E-12))
Beispiel #58
0
# create list of functions and initial values
funcs = [
    aSillyFunction, math.sin, lambda x: math.log2(x) - 128,
    createPolynomial((1, 0, 0, 0, 0, 0, 8)),
    createPolynomial((3, 0, -2, 2))
]

x0s = [1, 2, 1, 1, 0]

xs1 = [v1, v2, 'NA', 'NA', 'NA']
xs2 = []
xs3 = []
# How does it compare to what newton in the scipy optimization module returns?
# How about fsolve in the same module?
for f, x0 in zip(funcs, x0s):
    xs2.append(optimize.newton(f, x0))
    xs3.append(optimize.fsolve(f, x0)[0])

rownames = ['x^x-10', 'sin(x)', 'log2(x)-128', '8*x^6 + 1', '2*x^3 -2*x^2 + 3']
colnames = ['fsolve', 'optimize.newton', 'optimize.fsolve']

data = np.array([xs1, xs2, xs3])
data = np.transpose(data)

pandas.DataFrame(data, rownames, colnames)

# What is the advantage of using the fprime parameter?
# (hint: try it with the function lambda x: x**4).

# misc.derivative(lambda x: x**4,1,dx=1e-6)
Beispiel #59
0
import math as m
from scipy import optimize
import matplotlib.pyplot as plt
import numpy as np

def f(x):
	return m.sin(m.cos(m.exp(x)))
	
def fprime(x):
	return -m.exp(x)*m.sin(m.exp(x))*m.cos(m.cos(m.exp(x)))


root=optimize.newton(f,-0.1,fprime)
print("Value of root",root)
print("Function value at root",f(root))
"""
The newton raphsion gave a different root than bisection method when -1 is initial guess.
for -1
Value of root 9.179198883610521
Function value at root -5.921874465645097e-12

The newton raphsion gave same root as bisection method when -0.1 is initial guess.
for -0.1
Value of root 0.4515827052894549
Function value at root 6.123233995736766e-17

The method drives away or toward a root depending upon the initial guess and value of derivative at the initial guess.
"""
Beispiel #60
0
if __name__ == '__main__':
    sample_list = [30, 60]
    fname_sample = "MPF.dat"
    F = open(fname_sample, "w")
    for N_sample in sample_list:
        fname = "sample" + str(N_sample) + "-MPF.dat"
        f = open(fname, "w")
        J_model_list = np.zeros(n_estimation)
        for nf in range(n_estimation):
            J_data = 1.0  # =theta_sample
            #SAMPLING-Tmat
            c_mean_data = 0.0
            for n in range(N_sample):
                x = get_sample(J_data)
                if (n == 0):
                    X_sample = np.copy(x)
                elif (n > 0):
                    X_sample = np.vstack((X_sample, np.copy(x)))
            J_newton = newton(myob, 0.5, args=(X_sample, ))
            J_model_list[nf] = J_newton
            f.write(
                str(J_newton) + "  " + str(np.abs(J_newton - J_data)) + "\n")
        f.write("#" + str(N_sample) + "  " + str(np.mean(J_model_list)) +
                "  " + str(np.std(J_model_list)) + "\n")
        f.close()
        F.write(
            str(N_sample) + "  " + str(np.mean(J_model_list)) + "  " +
            str(np.std(J_model_list)) + "\n")
    F.close()