Esempio n. 1
0
def get_mete_rad(S, N, beta=None, beta_dict={}):
    """Use beta to generate SAD predicted by the METE

    Keyword arguments:
    S -- the number of species
    N -- the total number of individuals
    beta -- allows input of beta by user if it has already been calculated

    """

    assert S > 1, "S must be greater than 1"
    assert N > 0, "N must be greater than 0"
    assert S/N < 1, "N must be greater than S"

    if beta is None:
        beta = get_beta(S, N, beta_dict=beta_dict)
    p = e ** -beta
    abundance  = list(empty([S]))
    rank = range(1, int(S)+1)
    rank.reverse()

    if p >= 1:
        for i in range(0, int(S)):
            y = lambda x: trunc_logser_cdf(x, p, N) - (rank[i]-0.5) / S
            if y(1) > 0:
                abundance[i] = 1
            else:
                abundance[i] = int(round(bisect(y,1,N)))
    else:
        for i in range(0, int(S)):
            y = lambda x: logser.cdf(x,p) / logser.cdf(N,p) - (rank[i]-0.5) / S
            abundance[i] = int(round(bisect(y, 0, N)))
    return (abundance, p)
def compute_M(x, A):
    '''Computes Mach number in the nozzle.
    
    Parameters:
    ----------
    x : 1D array of float
        x grid
    A : 1D array of float
        nozzle area at each grid point
    
    Returns:
    -------
    M : 1D array of float
        Mach number at each grid point
    '''
    
    M = numpy.zeros_like(x)
    
    for i, x_val in enumerate(x):
        if x_val < 1.5:
            M[i] = optimize.bisect(f, 1e-6, 1, args=(A[i],))
        elif x_val == 1.5:
            M[i] = 1
        else:
            M[i] = optimize.bisect(f, 1, 50, args=(A[i],))
    
    return M
Esempio n. 3
0
def choque(vn,yn,tn):
    A=1
    g=-1
    n=1
    t_0=0
    n=0.15
    w=1.66
    e=0.0001
    ys= lambda x: A*np.sin(w*(x))
    yp= lambda x: (((1/2.)*g*(x-tn)**2)+vn*(x-tn)+yn)
    y= lambda x: yp(x)-ys(x)
    vs= lambda x: A*w*np.cos(w*(x))
    vp= lambda x: (g*(x-tn)+vn)
    das= lambda x: -a*w**3*np.cos(w*(x))
    a=-vn/g+tn
    b=(-vn-(vn**2-2*g*(yn+A))**(0.5))/g+tn
    if vs(tn)<=vn:
        s=y(tn)*y(a)
        if s<0:
            t=opt.bisect(y,yn+e,a)
        else:
            t=opt.bisect(y,a,b)
        v=(1+n)*vs(t)-n*vp(t)
    else:
        c=tn
        d=tn+np.pi/w
        t=opt.bisect(das,c,d)
        v=vs(t)

    p=ys(t)
    return v,p,t
Esempio n. 4
0
def get_sunriseset(t,lat,lon,height,refalt):
    loc=EarthLocation(lat=lat*u.deg,lon=lon*u.deg,height=height*u.m)
    t0=Time(t,format='unix')
    print(t0.isot)
    # Compute solar position every 10 minutes
    t=Time(t0.mjd+np.arange(160)/160.0,format='mjd',scale='utc')
    psun=get_sun(t)
    hor=psun.transform_to(AltAz(obstime=t,location=loc))
    
    # Interpolating function
    alt_diff=hor.alt.deg-refalt
    f=interpolate.interp1d(t.mjd,alt_diff)
    
    # Find sunrise/sunset
    sign=alt_diff[1:]*alt_diff[:-1]
    idx=np.where(sign<0.0)[0]
    print(idx)
    for i in idx:
        # Set
        if alt_diff[i]>alt_diff[i+1]:
            tset=Time(optimize.bisect(f,t[i].mjd,t[i+1].mjd),format='mjd',scale='utc')
            # Rise
        else:
            trise=Time(optimize.bisect(f,t[i].mjd,t[i+1].mjd),format='mjd',scale='utc')

    return trise.unix,tset.unix
Esempio n. 5
0
def hpd_beta(y, n, h=.1, a=1, b=1, plot=False, **plot_kwds):
    apost = y + a
    bpost = n - y + b
    if apost > 1 and bpost > 1:
        mode = (apost - 1)/(apost + bpost - 2)
    else:
        raise Exception("mode at 0 or 1: HPD not implemented yet")

    post = stats.beta(apost, bpost)

    dmode = post.pdf(mode)

    lt = opt.bisect(lambda x: post.pdf(x) / dmode - h, 0, mode)
    ut = opt.bisect(lambda x: post.pdf(x) / dmode - h, mode, 1)

    coverage = post.cdf(ut) - post.cdf(lt)
    if plot:
        plt.figure()
        plotf(post.pdf)
        plt.axhline(h*dmode)
        plt.plot([ut, ut], [0, post.pdf(ut)])
        plt.plot([lt, lt], [0, post.pdf(lt)])
        plt.title(r'$p(%s < \theta < %s | y)$' %
                  tuple(np.around([lt, ut], 2)))

    return lt, ut, coverage, h
Esempio n. 6
0
def calculate_all_tcrits(theta):
    tau, area = theta_unsqueeze(theta)
    tcrits = np.empty((3, len(tau)-1))
    misclassified = np.empty((3, len(tau)-1, 4))
    for i in range(len(tau)-1):
        try:
            tcrit = bisect(expPDF_tcrit_DC, tau[i], tau[i+1], args=(tau, area, i+1))
            enf, ens, pf, ps = expPDF_misclassified(tcrit, tau, area, i+1)
        except:
            print('Bisection with DC criterion failed.\n')
            tcrit = None
            enf, ens, pf, ps = None, None, None, None
        tcrits[0, i] = tcrit
        misclassified[0, i] = np.array([enf, ens, pf, ps])

        try:
            tcrit = bisect(expPDF_tcrit_CN, tau[i], tau[i+1], args=(tau, area, i+1))
            enf, ens, pf, ps = expPDF_misclassified(tcrit, tau, area, i+1)
        except:
            print('Bisection with Clapham & Neher criterion failed.\n')
            tcrit = None
            enf, ens, pf, ps = None, None, None, None
        tcrits[1, i] = tcrit
        misclassified[1, i] = np.array([enf, ens, pf, ps])

        try:
            tcrit = bisect(expPDF_tcrit_Jackson, tau[i], tau[i+1], args=(tau, area, i+1))
            enf, ens, pf, ps = expPDF_misclassified(tcrit, tau, area, i+1)
        except:
            print('Bisection with Jackson criterion failed.\n')
            tcrit = None
            enf, ens, pf, ps = None, None, None, None
        tcrits[2, i] = tcrit
        misclassified[2, i] = np.array([enf, ens, pf, ps])
    return tcrits, misclassified
Esempio n. 7
0
def getSigmaFromChiSquared(chi_sq, dof):
    """
    Compute the significance (in sigma) of a chi squared value and degrees 
    of freedom (from Numerical Recipes, section 6.2)
    
    Parameters
    ----------
    chi_sq : float 
        the chi squared value
    dof : int 
        the degrees of freedom
    
    Returns
    -------
    sigma : float
        the significance in sigma
    p-value : float
        p_value is the probability of obtaining a test statistic at least as 
        extreme as the one that was actually observed
    """
    def func1(x):
        return x - 1.0 + gammaincc(dof/2.0, chi_sq/2.0)
    
    # first calculate p, the confidence limit
    p = sopt.bisect(func1, 0, 1.0)

    def func2(x):
        val = sint.quad(lambda y: 1.0/np.sqrt(2.0*np.pi)*np.exp(-y**2/2.0), -x, x)
        return val[0] - p

    # now calculate sigma
    sigma = sopt.bisect(func2, 0, 100)

    # p is probability that random variates could have this chi-squared value
    return sigma, 1-p
def fwhm(rmesh, intensity, f_int):
    """Compute fwhm precisely from starting grid.
    For extreme values of B0/eta2 (extremely smeared, no peak exists),
    code prints error messages and returns FWHM = 1 (max possible value)"""
    # Tolerance for finding intensity max position
    eps2 = np.finfo(float).eps * 2  # Tolerance, 1 ~= 1+eps2/2.

    # Compute half max from grid initial guess by bracketing intensity max
    idxmax = np.argmax(intensity)
    if idxmax == 0:
        #print 'Warning: intensity max not found (rminarc too small)'
        # No need to warn, nowadays
        return 1  # Can't search r < rmin, no disttab/emisttab values computed
    elif idxmax == len(intensity)-1:
        #print 'Warning: intensity max not resolved in grid on right; searching'
        rpk_a = rmesh[-2]  # CANNOT be rmesh[-1], to find max
        rpk_b = search_crossing(rmesh[-1], 1., lambda r:f_int(r)-intensity[-1],
                                eps2)  # Find r s.t. f_int(r) < intensity[-1]
        if rpk_b is None:  # This should never happen, honestly
            print 'ERROR: intensity max not found (stalled on right) (?!)'
            return 1
    else:
        rpk_a = rmesh[idxmax - 1]
        rpk_b = rmesh[idxmax + 1]

    # option 'xatol' requires SciPy 0.14.0
    # 'xatol' -- absolute error in res.x acceptable for convergence
    # (as res.x is order 1, eps2 should be appropriate)
    res = spopt.minimize_scalar(lambda x: -1*f_int(x), method='bounded',
                                bounds=(rpk_a, rpk_b), options={'xatol':eps2})
    rpk = res.x
    pk = f_int(rpk)
    halfpk = 0.5 * pk
    def f_thrsh(r):  # For rootfinding
        return f_int(r) - halfpk

    # Right (upstream) FWHM crossing -- do not search on grid
    # (grid is too coarse upstream of rim, will not find FWHM;
    #  spurious crossings occur if profile not monotonic, e.g. w/ damping)
    rmax = spopt.bisect(f_thrsh, rpk, 1.)

    # Left (downstream) FWHM crossing -- find bracketing r (position) values
    # requires that rminarc is large enough so that grid contains crossing
    cross = np.diff(np.sign(intensity - halfpk))
    inds_rmin = np.where(cross > 0)[0]  # Left (neg to pos)

    if inds_rmin.size == 0:  # No left crossing found
        #print ('Warning: FWHM edge (rmin) not found '
        #       '(rminarc too small or peak FWHM cannot be resolved in profile)')
        return 1.  # Can't search r < rmin, no disttab/emisttab values computed
    else:
        rmin_a = rmesh[inds_rmin[-1]]  # Crossing closest to peak (largest r)
        rmin_b = rmesh[inds_rmin[-1] + 1]

    rmin = spopt.bisect(f_thrsh, rmin_a, rmin_b)

    return rmax - rmin
Esempio n. 9
0
def stability_limits(S2, S1=S1, W=W):
    func = lambda x: psi(x, S2, S1, W) + 1./x
    rhocrit = critical_density(S2, S1, W)
    if (psi(rhocrit, S2, S1, W) < -1./rhocrit):
        rhomin = optimize.bisect(func, R2, rhocrit)
        rhomax = optimize.bisect(func, rhocrit, 10.)
    else:
        rhomin = rhocrit
        rhomax = rhocrit
    return rhomin, rhomax
Esempio n. 10
0
def hpd_unimodal(dist, mode_guess=0., lo=0., hi=1e6, alpha=0.10):
    # TODO: fix this to work with unimodal but not symmetric dist'n

    mode = opt.fmin(lambda x: -dist.pdf(x), 0)[0]

    lt = opt.bisect(lambda x: dist.pdf(x) / mode - alpha, lo, mode)
    ut = opt.bisect(lambda x: dist.pdf(x) / mode - alpha, mode, hi)

    coverage = dist.cdf(ut) - dist.cdf(lt)

    return lt, ut, coverage
 def bisection(self):
     def f(sigma_hat):
         gamma = 1/(2*self.sigma**2) - 1 /(2*sigma_hat**2)
         v = self.cumulants(gamma)[1] - (self.Y**2).sum()
         print 'here', v, sigma_hat
         print v
     factor = 3
     try:
         return bisect(f, self.sigma/factor, factor*self.sigma, maxiter=20)
     except ValueError:
         factor *= 2
         return bisect(f, self.sigma/factor, factor*self.sigma, maxiter=20)
Esempio n. 12
0
 def marginalRayAngle(self, fieldPoint, wavelength=None, eps=1e-5):
     """Compute the angle to graze the edge of the pupil."""
     apInd = self.surfaces.index(self.apertureStop)
     frontSys = System(self.surfaces[:apInd+1])
     stopy = self.apertureStop.semidiam
     def residual(theta, stopy):
         """Compute the residual of hitting the top edge of the aperture."""
         if not np.isscalar(theta): theta = theta.flatten()[0] # Make theta be a scalar.
         direction = np.zeros((self.ndim, 1));
         direction[-2:,0] = [np.tan(theta), 1.0]
         ray = Rays(np.reshape(fieldPoint, (-1,1)), direction, wavelength=wavelength)
         pts, rayOut = frontSys.cast(ray, settings=RaytraceSettings(False))
         #import pdb;pdb.set_trace()
         #print '[{},{}],'.format(theta, rayOut.origins[-2,0] - stopy),
         result = rayOut.origins[-2,0] - stopy
         if np.isnan(result): result = np.inf
         return result
     def residFudged(theta, stopy):
         return residual(theta, stopy) + eps * stopy
     from scipy.optimize import fmin, bisect
     theta0 = self.marginalRayAngleRTM(fieldPoint[-2:][::-1], wavelength)
     results = [None, None]
     for i, pm1 in enumerate((-1, 1)):
         if True:
             try:
                 results[i] = bisect(residFudged, pm1 * 0.5 * theta0, pm1 * 1.5 * theta0, args=(pm1 * stopy,))
             except ValueError:
                 pass
         if results[i] is None:
             thetas = np.pi * 0.1 * np.linspace(-1, 1, 21)
             resids = np.array([residFudged(theta, pm1*stopy) for theta in thetas])
             th0 = thetas[np.argmin(resids**2)]
             mask = np.isfinite(resids)
             #print 'thetas', thetas
             #print 'resids', resids
             resids = resids[mask]
             thetas = thetas[mask]
             try:
                 pos = thetas[resids > 0][np.argmin(resids[resids > 0])]
                 neg = thetas[resids < 0][np.argmax(resids[resids < 0])]
             except ValueError:
                 # We can get here if the lens focuses all points at this field point to the same point at the aperture plane.
                 raise
             #print '------'
             #print pos, residFudged(pos, pm1*stopy), resids[resids > 0][np.argmin(resids[resids > 0])]
             #print neg, residFudged(neg, pm1*stopy), resids[resids < 0][np.argmax(resids[resids < 0])]
             try:
                 results[i] = bisect(residFudged, pos, neg, args=(pm1 * stopy,))
             except ValueError:
                 results[i] = fmin(lambda *a: residFudged(*a)**2, x0=np.mean(thetas), args=(pm1 * stopy,))[0]
                 print 'resid:',results[i], residFudged(results[i], pm1*stopy)
     return results
Esempio n. 13
0
def sunrise_sunset(date, longitude, latitude, solar_angular_radius=0.0):
    """Interface to the scipy.optimize.
    Using the date (j2000 offset) and location, start by finding the local 
    midnights. the local mid-day is then (roughly) at the center of the two 
    midnights. Sunrise must occur between midnight and midday, sunset between 
    midday and midnight (except for polar night).
    
    This method uses Ian's method, which is less annoying than my method that required
    a conditional depending on whether 'date' was in the daytime or nighttime."""
    
    mid1,mid2=midnight(date, longitude, latitude)
    noon = 0.5*(mid1+mid2)
    sunrise = so.bisect(solelev, mid1, noon, args=(longitude, latitude, solar_angular_radius))
    sunset = so.bisect(solelev, noon, mid2, args=(longitude, latitude, solar_angular_radius))
    return sunrise, sunset
Esempio n. 14
0
  def helper(key, value):
    def f(v):
      x = parameters.copy()
      x[key] = v
      return cost(hypothesis, x, data) - (min_S+1.)

    # Find bracketing intervals for the roots of the S+1 objective function.
    left, right = getBrackets(f, value)

    # Use a bisection search to find the left and right roots within the
    # intervals.
    left = opt.bisect(f, *left, xtol=1.0e-5)
    right = opt.bisect(f, *right, xtol=1.0e-5)

    return (key, (abs(left - value), abs(right - value)))
 def generate():
     to_begin = Line(nearest, line.begin)
     if to_begin.length() > 0:
         def func(parameter):
             return (self.position.distance(to_begin(parameter)) -
                     self.radius)
         if sign(func(0)) != sign(func(1)):
             yield to_begin(bisect(func, 0, 1))
     to_end = Line(nearest, line.end)
     if to_end.length() > 0:
         def func(parameter):
             return (self.position.distance(to_end(parameter)) -
                     self.radius)
         if sign(func(0)) != sign(func(1)):
             yield to_end(bisect(func, 0, 1))
def P(phi, phib, df):
	""" Numerically solve for partition coefficient as a
	    function of \phi_s """
	if f(0,phi,phib,df)*f(1,phi,phib,df) < 0:
		return opt.bisect(f, 0, 1, args=(phi,phib,df), maxiter=500) # Bisection method
	else:
		return opt.newton(f, 1.0, args=(phi,phib,df), maxiter=5000) # Newton-Raphson
Esempio n. 17
0
    def get_next_time(self, t_0, pixel_size, xtol=1e-12):
        """
        Get next time at which the body will have traveled *pixel_size*, the starting time is *t_0*.
        *xtol* is the absolute tolerance for bisection passed to :py:func:`scipy.optimize.bisect`.
        """
        def func(t):
            """Objective function for time bisection. The
            maximum dt obtained from individual trajectories might not be precise enough, but is
            precise enough to deterimne when the *pixel_size* is overstepped. Thus, we compute the
            time when the overall trajectory doesn't move more than *pixel_size*, then the time it
            does move more than the *pixel_size* and bisect to obtain the movement by exactly
            *pixel_size*.
            """
            t = t * q.s
            # scipy's bisection gets the root at 0, thus we need to shift by *pixel_size*
            return (self.get_distance(t_0, t) - pixel_size).simplified.magnitude

        self.bind_trajectory(pixel_size)
        if self._dt is None:
            self.get_maximum_dt(pixel_size)

        if self._dt is None:
            # All bodies are stationary
            return np.inf * q.s

        for current_time in np.arange(t_0.simplified.magnitude, self.time.simplified.magnitude,
                                      self._dt.simplified.magnitude) * q.s:
            if self.moved(t_0, current_time, pixel_size):
                return bisect(func, t_0, current_time, xtol=xtol) * q.s

        return np.inf * q.s
Esempio n. 18
0
def binomial_ci(mle, N, alpha=0.05):
    """ One sided confidence interval for a binomial test.
    To find the two sided interval, call with (1-alpha/2) and alpha/2 as arguments

    Parameters
    ----------
    mle : float
      Fraction of successes
    N : int
      Number of trials

    If after N trials we obtain mle as the proportion of those
    trials that resulted in success, find c such that

    P(k/N < mle; theta = c) = alpha

    where k/N is the proportion of successes in the set of trials,
    and theta is the success probability for each trial.
    """
    from scipy.stats import binom
    from scipy.optimize import bisect


    to_minimise = lambda c: binom.cdf(mle*N,N,c)-alpha
    return bisect(to_minimise,0,1)
Esempio n. 19
0
    def solve_for_fermi_energy(self, temperature, chemical_potentials, bulk_dos):
        """
        Solve for the Fermi energy self-consistently as a function of T
        and p_O2
        Observations are Defect concentrations, electron and hole conc
        Args:
            bulk_dos: bulk system dos (pymatgen Dos object)
            gap: Can be used to specify experimental gap.
                Will be useful if the self consistent Fermi level
                is > DFT gap
        Returns:
            Fermi energy
        """

        fdos = FermiDos(bulk_dos, bandgap=self.band_gap)

        def _get_total_q(ef):

            qd_tot = sum([
                d['charge'] * d['conc']
                for d in self.list_defect_concentrations(
                    chemical_potentials=chemical_potentials, temperature=temperature, fermi_level=ef)
            ])
            qd_tot += fdos.get_doping(fermi=ef, T=temperature)
            return qd_tot

        return bisect(_get_total_q, -1., self.band_gap + 1.)
Esempio n. 20
0
def velocity_law_U_of_R(R):
    "Explicit velocity law U(R) requires numerically solving implicit equation"
    def f(U):
	"Function to be zeroed: f(U) = 0"
        return R - velocity_law_R_of_U(U)
    U1, U2 = 1.0, 10.0
    return bisect(f, U1, U2)
Esempio n. 21
0
def calc_dh(area, h0, r):
    if _debug: print(area, h0)

    def func(h):
        return area_above_segment(r, h) - area_above_segment(r, h0) - area

    return h0 - bisect(func, 0, 2*r)
Esempio n. 22
0
def setup_smearing( eigs, n_electron, width = 0.1, exponent = 2.0 ):

    def objective( e_f ):
        r = nm.sum( smear( eigs, e_f, width, exponent ) ) - n_electron
#        print e_f, r
        return r

##     import pylab
##     x = nm.linspace(eigs[0], eigs[-1], 1000)
##     pylab.plot( x, smear( x, -3, width, exponent ) )
##     pylab.show()

##     import pylab
##     x = nm.linspace(eigs[0], eigs[-1], 1000)
##     pylab.plot( x, [objective(y) for y in x] )
##     pylab.show()

    try:
        e_f = bisect(objective, eigs[0], eigs[-1], xtol=1e-12)
    except AssertionError:
        e_f = None
##     print eigs
##     print e_f, e_f - width, e_f + width
##     print objective(e_f)
##     debug()

    def smear_tuned( energies ):
        return smear( energies, e_f, width, exponent )

##     import pylab
##     x = nm.linspace(eigs[0], eigs[-1], 1000)
##     pylab.plot( x, smear_tuned( x ) )
##     pylab.show()

    return e_f, smear_tuned
def CalculatePTime(vels = [   3500,   3500,   3500,   3500,   3500,   3500],
                   depths = [ 2000,   3000,   4000,   5000,   6000], 
                   rhos = [2.32,2.55,2.75,2.32,2.55,2.75],
                   source_depth = 5000,
                   source_offset = 0,costFunc=costFunc) :
    
#vels =np.array([1550,3100, 6200])
#depths = np.array([2000, 4000])
#rhos = np.array([2.3,2.3, 2.7])

# Velocities for the segments v_j
# Thicknesses Hj
    R=source_offset
    Hi,Vi,Rhoi = GetHjVjRhoj(vels,rhos,depths,source_depth) 

    res,r = opt.bisect(f=costFunc,a=0,b=1E-3,args=(Hi,Vi,R),full_output=True,disp=True)
    p=res

    #import pdb; pdb.set_trace()
# create an array of cosines:
    cosV = np.sqrt(1-(p**2)*Vi**2);
    #print Hi,Vi,cosV
# create an array of times per segment:
    t_int = Hi/(Vi*cosV);

    t_total = np.sum(t_int);

    return t_total,r
Esempio n. 24
0
def roundCorner(curveXy, curveXyt, rReq):
    def getOffsetPt(ptX,curveXy,rad):
        tanDir1 = normalToXyCurve(curveXy,ptX)
        curve = interp.interp1d(curveXy[:,0],curveXy[:,1],'cubic')
        x = ptX + tanDir1[0]*rad
        y = curve(ptX) + tanDir1[1]*rad
        return [x,y]
    class objFcn:
        def __init__(self,curveXy,curveXyt,rad):
            self.curveXy = curveXy
            self.curveXyt = curveXyt
            self.rad = rad
        def __call__(self,ptX):
            pt1 = getOffsetPt(ptX,self.curveXy,rReq)
            return getDist(self.curveXyt, pt1)[1]-self.rad
        def getPt(self,ptX):
            pt1 = getOffsetPt(ptX,self.curveXy,rReq)
            return getDist(self.curveXyt, pt1)[0]
            
    objFcn1 = objFcn(curveXy, curveXyt, rReq)
    xReq = bisect(objFcn1,curveXy[0,0]+.001,curveXy[-1,0]-.001)
    center = getOffsetPt(xReq,curveXy,rReq)
    curve1 = interp.interp1d(curveXy[:,0],curveXy[:,1])
    yReq = curve1(xReq)
    pt2 = objFcn1.getPt(xReq)
    curve3 = xytCurveInterp(curveXyt)
    pt3 = curve3(pt2)[0:2]
    theta1 = getAngle(center,[xReq,yReq])
    theta2 = getAngle(center,pt3)
    circle1 = getCircle(center, rReq,theta1,theta2,3)
    curveRes1 = xyCurveSplit(curveXy,[],xReq)
    curveRes2 = xytCurveSplit(curveXyt,[],pt2)
    return curveRes1, circle1, curveRes2
Esempio n. 25
0
def meco_velocity(m1, m2, chi1, chi2):
    """ 
    Returns the velocity of the minimum energy cutoff for 3.5pN (2.5pN spin)

    Parameters
    ----------
    m1 : float
        First component mass in solar masses
    m2 : float
        Second component mass in solar masses
    chi1 : float
        First component dimensionless spin S_1/m_1^2 projected onto L
    chi2 : float
        Second component dimensionless spin S_2/m_2^2 projected onto L

    Returns
    -------
    v : float
        Velocity (dimensionless)
    """
    energy0, energy2, energy3, energy4, energy5, energy6 = \
        _energy_coeffs(m1, m2, chi1, chi2)
    def eprime(v):
        return 2. + v * v * (4.*energy2 + v * (5.*energy3 \
                + v * (6.*energy4
                + v * (7.*energy5 + 8.*energy6 * v))))
    return bisect(eprime, 0.05, 1.0)
Esempio n. 26
0
def gentab(N,amin):
    " Generate the arrays x,y,d,delta, assuming a partition in n boxes for [0,+infty) " 
    
    area_low = 0.5
    area_up = 0.7
    area_zig = optimize.bisect(f,area_low,area_up,args=(N))
    xpos = f(area_zig,N,stats.norm,return_x=True)
    # don't store x_i such that x_i<a_min
    j = 0
    while(j<N) and (xpos[j]<-amin):
        j += 1
    x = concatenate((-xpos[j:0:-1],xpos)) 
    nd = x.size-1 # number of intervals [xi,x(i+1)]
    d = empty(nd,dtype="double")
    delta = empty(nd,dtype="double")
    yl = empty(nd,dtype="double")
    yu = empty(nd,dtype="double")
    for i in range(nd):
        if x[i]<0.:
            yl[i] = stats.norm.pdf(x[i])
            yu[i] = stats.norm.pdf(x[i+1])
        else:
            yl[i] = stats.norm.pdf(x[i+1])
            yu[i] = stats.norm.pdf(x[i])
        d[i] = x[i+1] - x[i]
        delta[i] = d[i]*yu[i]/yl[i]
    return (x,yl,yu,d,delta)
Esempio n. 27
0
	def inverse(self, data, max_iter=100):
		# make sure data has right shape
		data = asarray(data).reshape(1, -1)

		# apply Gaussian CDF
		data = norm.cdf(data)

		# apply inverse model CDF
		val_max = mean(self.mog.means) + 1.
		val_min = mean(self.mog.means) - 1.

		for t in range(data.shape[1]):
			# make sure root lies between val_min and val_max
			while float(self.mog.cdf(val_min)) > data[0, t]:
				val_min -= 1.
			while float(self.mog.cdf(val_max)) < data[0, t]:
				val_max += 1.

			# find root numerically
			data[0, t] = bisect(
			    f=lambda x: float(self.mog.cdf(x)) - data[0, t],
			    a=val_min,
			    b=val_max,
			    maxiter=max_iter,
			    disp=False)

		return data
Esempio n. 28
0
def VariableHeight(Moment):
	
	#Returns the ideal height of the beam based on the input moment profile. Mass is not considered unless mass is included in the moment profile.
	h = np.zeros((50)) #Creating container for optimized beam heights

	 #The optimize function loses its mind when the moment drops below 2980:

	for i in range(0,50):
		if abs(Moment[i]) < 2980:
			Moment[i] = -2980
	
	Yield_steel = 415e6 #MPa
	Yield_iron = 98e6 #MPa
	Safety_factor = 5 #Adjust this value as needed

	Max_stress_steel = Yield_steel / Safety_factor 
	Max_stress_iron = Yield_iron / Safety_factor
	def BeamHeight(h):
		x = 6*abs(Moment[i])*h/(fw*h**3-(fw-wt)*(h-2*ft)**3) - Max_stress_steel
		return x

	for i in range(0,50):	
		h[i] = sp.bisect(BeamHeight, 0.02, 10,xtol = 0.001)	
		if h[i] < 3*ft:
			h[i] = 3*ft #Preventing the I-beam from being unrealistically small. Minumum height is 3 times the flange thickness
	h = 1.01*h #The bisect function is not perfect. Small errors allow for under-designing for stress. Here the height is inflated by 2% to ensure the beam is sufficiently designed.	
	return h
def lambdavalue(y,p,alpha,gamma,phi,NegativeDemands=True,ub=10,method='bisect'):
    """
    Given income y, prices p and preference parameters
    (alpha,gamma,phi), find the marginal utility of income lbda.
    Elliott: Changed default method from 'root_with_precision' to 'bisect'.
    I think Rw/P was hanging up somehow because the fake_data simulation freezes when direct==False
    """

    n,alpha,gamma,phi = check_args(p,alpha,gamma,phi)

    if NegativeDemands:
        subsistence=sum([p[i]*phi[i] for i in range(n)])
    else:
        subsistence=sum([p[i]*phi[i] for i in range(n) if phi[i]<0])
        
    if y+subsistence<0: # Income too low to satisfy subsistence demands
        warnings.warn('Income too small to cover subsistence phis (%f < %f)' % (y,subsistence))
        return nan

    f = excess_expenditures(y,p,alpha,gamma,phi,NegativeDemands=NegativeDemands)

    if method=='bisect':
        try:
            return optimize.bisect(f,1e-20,ub)
        except ValueError:
            return lambdavalue(y,p,alpha,gamma,phi,NegativeDemands=NegativeDemands,ub=ub*2.0)
    elif method=='newton':
        df = excess_expenditures_derivative(p,alpha,gamma,phi)
        return optimize.newton(f,ub/2.,fprime=df)
    elif method=='root_with_precision':
        return root_with_precision(f,[0,ub,Inf],1e-12,open_interval=True)
    else:
        raise ValueError, "Method not defined."
Esempio n. 30
0
def get_lambda_spatialdistrib(A, A0, n0):
    """Solve for lambda_Pi from Harte 2011 equ. 7.50 and 7.51

    Arguments:
    A = the spatial scale of interest
    A0 = the maximum spatial scale under consideration
    n0 = the number of individuals of the focal species at scale A0

    """
    assert type(n0) is int, "n must be an integer"
    assert A > 0 and A0 > 0, "A and A0 must be greater than 0"
    assert A <= A0, "A must be less than or equal to A0"
    y = lambda x: x / (1 - x) - (n0 + 1) * x ** (n0 + 1) / (1 - x ** (n0 + 1)) - n0 * A / A0
    if A < A0 / 2:
        # Set the distance from the undefined boundaries of the Lagrangian multipliers
        # to set the upper and lower boundaries for the numerical root finders
        BOUNDS = [0, 1]
        DIST_FROM_BOUND = 10 ** -15
        exp_neg_lambda = bisect(y, BOUNDS[0] + DIST_FROM_BOUND,
                                   BOUNDS[1] - DIST_FROM_BOUND)
    elif A == A0 / 2:
        #Special case from Harte (2011). See text between Eq. 7.50 and 7.51
        exp_neg_lambda = 1
    else:
        # x can potentially go up to infinity
        # thus use solution of a logistic equation as the starting point
        exp_neg_lambda = (fsolve(y, - log(A0 / A - 1)))[0]
    lambda_spatialdistrib = -1 * log(exp_neg_lambda)
    return lambda_spatialdistrib
Esempio n. 31
0
def radiation(a):
    global E
    global L
    print('PROCEED WITH CAUTION!')
    print('Radiation in Progress')
    Ra_array = np.zeros(1)
    Rp_array = np.zeros(1)
    t_gw_array = np.zeros(1)
    t_array = np.zeros(1)
    E_array = np.zeros(1)
    E_array[0] = E
    L_array = np.zeros(1)
    L_array[0] = L
    e_array = np.zeros(1)
    a_array = np.zeros(1)
    Q_array = np.zeros(1)
    Sol_Array = np.zeros(shape=(3, 1))
    XY = np.zeros(shape=(2, 1))
    I = np.zeros(shape=(2, 1))
    count = 0
    # Fitting Numbers
    A_E = -0.141421
    B_E = 0.752091
    C_E = -4.634643
    A_L = -1.13137
    B_L = 1.31899
    C_L = -4.149103
    # temp r for switching between orbits
    temp_r = r_i
    # Integration sequence to reach first apoapsis given that initial conditions start at periapsis
    start_y0 = [temp_r, rdot_i, phi_i]
    start_sol = solve_ivp(deriv,
                          y0=start_y0,
                          t_span=[t_array[0], 1000000000],
                          rtol=1e-8,
                          atol=1e-8,
                          events=apoapsis)
    a_array[0] = (start_sol.y_events[0][0][0] - temp_r) / 2
    e_array[0] = get_e(temp_r, start_sol.y_events[0][0][0])
    start_y0 = [
        start_sol.y_events[0][0][0], start_sol.y_events[0][0][1],
        start_sol.y_events[0][0][2]
    ]
    t_gw_array[0] = start_sol.t_events[0][0]
    XY[0] = start_y0[0] * np.cos(start_y0[2])
    XY[1] = start_y0[0] * np.sin(start_y0[2])
    I[0] = m * (XY[0][-1]**2 - (1 / 3) * (start_y0[0]**2))
    I[1] = m * (XY[0][-1]**2 + XY[1][-1]**2)
    t_array[0] = start_sol.t_events[0][0]
    Ra_array[0] = start_sol.y_events[0][0][0]
    Rp_array[0] = temp_r
    # Radiation integration sequence
    try:
        for i in range(a):
            temp_sol_1 = solve_ivp(deriv,
                                   y0=start_y0,
                                   t_span=[t_array[-1], t_array[-1] + .001],
                                   rtol=1e-8,
                                   atol=1e-8)
            temp_y0_2 = [
                temp_sol_1.y[0][1], temp_sol_1.y[1][1], temp_sol_1.y[2][1]
            ]
            temp_sol_2 = solve_ivp(deriv,
                                   y0=temp_y0_2,
                                   t_span=[temp_sol_1.t[-1], 1000000000],
                                   rtol=1e-8,
                                   atol=1e-8,
                                   events=apoapsis)
            s = temp_sol_2.t.size
            # Plot of apoapsis to apoapsis
            Sol_Array = np.append(
                Sol_Array, [temp_sol_2.y[0], temp_sol_2.y[1], temp_sol_2.y[2]],
                axis=1)
            # Levin's Q
            dPhi = np.abs((temp_sol_2.y_events[0][0][2] % (2 * np.pi)) -
                          (start_y0[2] % (2 * np.pi)))
            w = int(
                np.abs((temp_sol_2.y_events[0][0][2] - start_y0[2])) /
                (2 * np.pi))
            Q_array = np.append(Q_array, w + (dPhi / (2 * np.pi)))
            # Gravitational Wave calculation
            for j in range(s):
                XY = np.append(
                    XY, [[temp_sol_2.y[0][j] * np.cos(temp_sol_2.y[2][j])],
                         [temp_sol_2.y[0][j] * np.sin(temp_sol_2.y[2][j])]],
                    axis=1)
                I = np.append(
                    I,
                    [[m * (XY[0][-1]**2 - (1 / 3) * (temp_sol_2.y[0][j])**2)],
                     [m * (XY[0][-1]**2 + XY[1][-1]**2)]],
                    axis=1)
                t_gw_array = np.append(t_gw_array, temp_sol_2.t[j])
            t_array = np.append(t_array, temp_sol_2.t_events[0][0])
            # Radiation calculations
            E_Rad = \
                (m / M) * (A_E * np.arccosh(1 + B_E * (4 * M / temp_r)**6 * (M / (temp_r - 4 * M))) + C_E * (temp_r / M - 4) * (M / temp_r)**(9/2))
            L_Rad = m * (A_L * np.arccosh(1 + B_L * (4 * M / temp_r)**3 *
                                          (M / (temp_r - 4 * M))) + C_L *
                         (temp_r / M - 4) * (M / temp_r)**3)
            # Updates to global variables affected by radiation
            E = E + E_Rad
            E_array = np.append(E_array, E)
            L = L + L_Rad
            L_array = np.append(L_array, L)
            global root1
            root1 = sp.lambdify(
                r, E + G * M / r - L**2 / (2 * r**2) + G * M * (L**2) / r**3 -
                (1 / 2) * rdot_i**2)
            global U_Eff
            U_Eff = (-G * M / r + L**2 / (2 * r**2) - G * M * (L**2) / r**3)
            global U_Eff_Func
            U_Eff_Func = sp.lambdify(r, U_Eff)
            global Eff_Force
            Eff_Force = -sp.diff(U_Eff, r)
            global Eff_Force_Func
            Eff_Force_Func = sp.lambdify(r, Eff_Force)
            global Phi_dot
            Phi_dot = L / r**2
            global Phi_dot_Func
            Phi_dot_Func = sp.lambdify(r, Phi_dot)
            global ISCO
            ISCO = (6 * G * M) / (1 + np.sqrt(1 - 12 * (G * M / L)**2))
            global IUCO
            IUCO = (6 * G * M) / (1 - np.sqrt(1 - 12 * (G * M / L)**2))
            global r_p
            r_p = bisect(root1, a=IUCO, b=ISCO, disp=True)
            # Update to initial conditions
            start_y0 = [r_p, rdot_i, phi_i]
            start_sol = solve_ivp(deriv,
                                  y0=start_y0,
                                  t_span=[0, 1000000000],
                                  rtol=1e-8,
                                  atol=1e-8,
                                  events=apoapsis)
            a_array = np.append(a_array,
                                (start_sol.y_events[0][0][0] - r_p) / 2)
            e_array = np.append(e_array, get_e(r_p,
                                               start_sol.y_events[0][0][0]))
            start_y0 = [
                start_sol.y_events[0][0][0], start_sol.y_events[0][0][1],
                temp_sol_2.y_events[0][0][2]
            ]
            Ra_array = np.append(Ra_array, start_sol.y_events[0][0][0])
            Rp_array = np.append(Rp_array, r_p)
            count = count + 1
            print(str(count) + '/' + str(a))
        I_dot = np.zeros(shape=(2, I[0].size - 2))
        I_ddot = np.zeros(shape=(2, I_dot[0].size - 2))
        for i in range(I_dot[0].size):
            I_dot[0][i] = (I[0][i + 2] - I[0][i]) / (t_gw_array[i + 2] -
                                                     t_gw_array[i])
            I_dot[1][i] = (I[1][i + 2] - I[1][i]) / (t_gw_array[i + 2] -
                                                     t_gw_array[i])
        t_gw_array = np.delete(t_gw_array, 0)
        t_gw_array = np.delete(t_gw_array, -1)
        for i in range(I_ddot[0].size - 2):
            I_ddot[0][i] = (I_dot[0][i + 2] -
                            I_dot[0][i]) / (t_gw_array[i + 2] - t_gw_array[i])
            I_ddot[1][i] = (I_dot[1][i + 2] -
                            I_dot[1][i]) / (t_gw_array[i + 2] - t_gw_array[i])
        t_gw_array = np.delete(t_gw_array, 0)
        t_gw_array = np.delete(t_gw_array, -1)
        dEdt_array = np.zeros(E_array.size - 2)
        dLdt_array = np.zeros(L_array.size - 2)
        for n in range(t_array.size - 2):
            dEdt_array[n] = (E_array[n + 2] - E_array[n]) / (t_array[n + 2] -
                                                             t_array[n])
            dLdt_array[n] = (L_array[n + 2] - L_array[n]) / (t_array[n + 2] -
                                                             t_array[n])
        fig1 = plt.figure(figsize=(16, 9))
        plt.subplots_adjust(bottom=.25)
        spec = gridspec.GridSpec(nrows=2, ncols=2, figure=fig1)
        ax1 = fig1.add_subplot(spec[0, 0])
        # plt.scatter(abs(e_array[:]), a_array[:])
        plt.scatter(Rp_array[1:a], np.abs(dEdt_array[:]), s=4)
        plt.xlabel('Radius of Periapsis')
        plt.ylabel('|dE/dt|')
        ax2 = fig1.add_subplot(spec[1, 0])
        # plt.plot(t_gw_array[:], 2 * M * I_ddot[0][:])
        # plt.scatter(t_array[:], E_array[:])
        # plt.scatter(Rp_array[1:a], np.abs(dEdt_array[:]))
        plt.scatter(Rp_array[1:a], np.abs(dLdt_array[:]), s=4)
        plt.xlabel('Radius of Periapsis')
        plt.ylabel('|dL/dt|')
        ax3 = fig1.add_subplot(spec[1, 1])
        # plt.scatter(XY[0], XY[1])
        # plt.scatter(Sol_Array[2][1:], Sol_Array[0][1:])
        # plt.scatter(E_array[:-1], Q_array[1:])
        plt.scatter(Rp_array[:], e_array[:], s=4)
        plt.xlabel('r_p')
        plt.ylabel('e')
        ax4 = fig1.add_subplot(spec[0, 1])
        # plt.polar(Sol_Array[2][1:], Sol_Array[0][1:])
        plt.plot(E_array[:], L_array[:])
        # plt.xlabel('Energy')
        # plt.ylabel('Angular Momentum')
        # plt.plot(t_gw_array[:], 2 * M * I_ddot[0][:], alpha=.7)
        # plt.plot(sol.t[10: - 4], 2 * get_H()[0][0][10:], alpha=.7)
        # plt.xlim(1000, 2000)
        # plt.ylim(-.000025, .000025)
    except (RuntimeError, ValueError):
        I_dot = np.zeros(shape=(2, I[0].size - 2))
        I_ddot = np.zeros(shape=(2, I_dot[0].size - 2))
        for i in range(I_dot[0].size):
            I_dot[0][i] = (I[0][i + 2] - I[0][i]) / (t_gw_array[i + 2] -
                                                     t_gw_array[i])
            I_dot[1][i] = (I[1][i + 2] - I[1][i]) / (t_gw_array[i + 2] -
                                                     t_gw_array[i])
        t_gw_array = np.delete(t_gw_array, 0)
        t_gw_array = np.delete(t_gw_array, -1)
        for i in range(I_ddot[0].size - 2):
            I_ddot[0][i] = (I_dot[0][i + 2] -
                            I_dot[0][i]) / (t_gw_array[i + 2] - t_gw_array[i])
            I_ddot[1][i] = (I_dot[1][i + 2] -
                            I_dot[1][i]) / (t_gw_array[i + 2] - t_gw_array[i])
        t_gw_array = np.delete(t_gw_array, 0)
        t_gw_array = np.delete(t_gw_array, -1)
        dEdt_array = np.zeros(E_array.size - 2)
        dLdt_array = np.zeros(L_array.size - 2)
        for n in range(t_array.size - 2):
            dEdt_array[n] = (E_array[n + 2] - E_array[n]) / (t_array[n + 2] -
                                                             t_array[n])
            dLdt_array[n] = (L_array[n + 2] - L_array[n]) / (t_array[n + 2] -
                                                             t_array[n])
        fig1 = plt.figure(figsize=(16, 9))
        plt.subplots_adjust(bottom=.25)
        spec = gridspec.GridSpec(nrows=2, ncols=2, figure=fig1)
        ax1 = fig1.add_subplot(spec[0, 0])
        # plt.scatter(abs(e_array[:]), a_array[:])
        plt.scatter(Rp_array[1:a], np.abs(dEdt_array[:]), s=4)
        plt.xlabel('Radius of Periapsis')
        plt.ylabel('|dE/dt|')
        ax2 = fig1.add_subplot(spec[1, 0])
        # plt.plot(t_gw_array[:], 2 * M * I_ddot[0][:])
        # plt.scatter(t_array[:], E_array[:])
        # plt.scatter(Rp_array[1:a], np.abs(dEdt_array[:]))
        plt.scatter(Rp_array[1:a], np.abs(dLdt_array[:]), s=4)
        plt.xlabel('Radius of Periapsis')
        plt.ylabel('|dL/dt|')
        ax3 = fig1.add_subplot(spec[1, 1])
        # plt.scatter(XY[0], XY[1])
        # plt.scatter(Sol_Array[2][1:], Sol_Array[0][1:])
        # plt.scatter(E_array[:-1], Q_array[1:])
        plt.scatter(Rp_array[:], e_array[:], s=4)
        plt.xlabel('r_p')
        plt.ylabel('e')
        ax4 = fig1.add_subplot(spec[0, 1])
        # plt.polar(Sol_Array[2][1:], Sol_Array[0][1:])
        plt.scatter(E_array[:], L_array[:], s=4)
        plt.xlabel('Energy')
        plt.ylabel('Angular Momentum')
Esempio n. 32
0
gamma1 = entEn/elResEn
gamma = ((3/(16*np.pi))*deltV0*bendRad*beamPow/(radEl**2*massEl*(lght**2)*(lee)*betaE))**(1/3)
effCrossVol = np.pi*beamWid**3*np.cos(incRad)/(2*np.sin(incRad)) # m^3
lenInt = 2*beamWid*np.cos(incRad)/np.sin(incRad) # 

kEn = entEn - elResEn*lght**2 # MeV

# solve for velocity of the particles
entEnBeam = entEn/2
veloc = np.arange(2.22e5,2.3e5,1) # range narrowed through experimentation
def func(veloc):    
    return np.sqrt(entEnBeam/((1/np.sqrt(1-veloc**2/lght**2))-1)/elResEn) - veloc

funcOut = np.array([func(veloc[i]) for i in range(len(veloc))])
plt.plot(veloc,funcOut)
solVeloc = bisect(func,226335,226336,maxiter = 10000)



# calculate momentum
loren = 1/np.sqrt(1-solVeloc**2/lght**2)
momPartMag = loren*elResEn*solVeloc
# the two particles have opp x momentums but similar y 
momElX = momPartMag*np.cos(incRad)
momElY = momPartMag*np.sin(incRad)

momPosX = momPartMag*np.cos(-incRad)
momPosY = momPartMag*np.sin(incRad)


'''
Esempio n. 33
0
def get_fdr_threshold_estimate(null,
                               nonnull,
                               region_list,
                               alpha,
                               maxima=False,
                               n_proc=None,
                               verbose=False):
    '''
    Calculate FDR-based detection threshold from given data for FDR=alpha.

    Operates across regions and returns numpy array with threshold for each
    region.

    Uses direct estimation technique for FDR as in Storey (2002).

    Estimates p-values using Gaussian KDE on null sample.
    '''
    # Setup pool if needed
    if n_proc is not None:
        pool = multiprocessing.Pool(processes=n_proc)
    else:
        pool = None

    thresh_list = []
    for region in region_list:
        if verbose:
            print >> sys.stderr, "Limits\t=\t%d\t%d" % (region[0], region[-1])

        null_region = null[region]
        if maxima:
            subset = find_maxima(null_region)
            if subset.size > 1:
                null_region = null_region[subset]
            else:
                if verbose:
                    err_msg = "Region %d had only %d local maxima" % (
                        region, subset.size)
                    print >> sys.stderr, err_msg
        null_region.sort()

        nonnull_region = nonnull[region]
        nonnull_max = nonnull.max()
        if maxima:
            nonnull_region = nonnull_region[find_maxima(nonnull_region)]
        nonnull_region.sort()

        # Handle case of no local maxima
        if nonnull_region.size < 1:
            thresh_list.append(nonnull_max * 2)
            continue

        # Transform data
        log_null = np.log(null_region)
        log_nonnull = np.log(nonnull_region)

        # Calculate p-values
        try:
            p = estimate_pvalues(log_null, log_nonnull, pool, n_proc)
        except:
            # Empirical CDF fall-back for singular cases
            p = np.searchsorted(log_null, log_nonnull).astype(np.float)
            p /= log_null.size

        # Check for pathological case
        # Both cases have same sign -> give up
        if estimate_fdr_direct(0.0, alpha, p) * estimate_fdr_direct(
                1.0, alpha, p) > 0:
            thresh_p = p.min() * (1 - np.sqrt(np.spacing(1)))
            if verbose:
                print >> sys.stderr, "Pathological case"
        else:
            # Otherwise, get threshold via bisection
            thresh_p = optimize.bisect(estimate_fdr_direct,
                                       0.0,
                                       1.0,
                                       args=(alpha, p))
        thresh_ind = np.searchsorted(np.sort(p), thresh_p)
        if thresh_ind > 0:
            thresh_coef = np.sort(log_nonnull)[::-1][thresh_ind -
                                                     1:thresh_ind + 1]
            thresh_coef = thresh_coef.mean()
        else:
            thresh_coef = log_nonnull.max() + np.log(2)
        thresh = np.exp(thresh_coef)

        thresh_list.append(thresh)

    return np.array(thresh_list)
Esempio n. 34
0
import numpy as np
import timeit
from scipy import optimize

start = timeit.default_timer()


# combined function
def f(x):
    return (x**2 + (np.sqrt(3) * x)**2)**3 - 4 * x**2 * (np.sqrt(3) * x)**2


# roots are labeled from x_0 to x_2 from left to right

# root of x_0 using bisection method
x0_root = optimize.bisect(f, -1, -0.4)
x0_root = np.format_float_positional(x0_root,
                                     precision=4,
                                     unique=False,
                                     fractional=False,
                                     trim='k')
y = np.sqrt(3) * float(x0_root)
int0 = (x0_root, y)
print("Intersection 1 is {}".format(int0))

# root of x_1 using newton method
x1_root = optimize.newton(f, -0.1, tol=1.48e-06)
if (x1_root < 0.0001) | (x1_root > -0.0001):
    x1_root = 0
y = np.sqrt(3) * float(x1_root)
int1 = (x1_root, y)
def solve_self_consistent_real_space\
(Nx, Ny, nOrb, nHole, invTemp, betaStart, betaSpeed, betaThreshold,\
anneal_or_not, t, U, itMax, dampFreq, dyn, singleExcitationFreq, osc,\
K, abs_t0, delta, nUp, nDown):
    '''
    Solves the self-consistent equation in real space.
    '''
    nUpBest, nDownBest = nUp, nDown
    # Total number of sites + orbitals
    N = nOrb * Nx * Ny
    # Initialize deltas for the tolerance check (ensure that it does not stop
    # at the first step)
    deltaUp = delta + 1
    deltaDown = delta + 1
    # Initialize inverse temperature for the annealing
    beta = betaStart
    # Initialize energies
    energies = np.zeros(itMax)
    bestGrandpotential = 1e100
    # Initialize iteration
    it = 0
    # Initialize iteration at which we finish annealing
    itSwitch = 0
    # How many iterations to wait between dynamic kicks
    itWait = 3
    # lbda is a parameter that reduces the weight
    # on the density obtained in the previous iteration.
    # the factor multiplied by itMax impedes that P ( I ) < delta
    # initially, we give equal weights and then progressively more
    # to the new configuration
    factor = 1.2
    lbda = 0.5 / (factor * itMax)
    # This ensures that we do not stop at the first step
    energies[-1] = 1e100
    # Print frequency
    printFreq = 10

    if anneal_or_not == True:
        print("Started annealing.\n")

    while loopCondition(it, itMax, deltaUp, deltaDown,\
                        delta, beta, invTemp, betaThreshold):

        # Annealing
        if anneal_or_not == True:
            beta = anneal(invTemp, betaStart, betaSpeed, beta,\
            betaThreshold, it, osc)
        else:
            beta = noAnneal(invTemp, beta)

        # Define the MF Hamiltonian for this iteration
        C, Hup, Hdown = hamiltonian(nUp, nDown, K, U, N)

        # Diagonalize
        eUp, wUp = la.eigh(Hup)
        eDown, wDown = la.eigh(Hdown)

        # Save the previous fields to compare to test convergence
        nUpOld = nUp.copy()
        nDownOld = nDown.copy()

        # Compute the chemical potential implicitly
        interval_limits = 50
        mu = bisect(rootToChem, -interval_limits, interval_limits, \
        args = (eUp, eDown, beta, Nx, Ny, nHole) )

        # Update fields
        nUp, nDown = update(nUp, nDown, N, wUp, wDown, eUp, eDown, mu, beta)

        # Grandpotential per site
        energy = grandpotential(U, nUp, nDown, nUpOld, nDownOld, \
        nOrb, Nx, Ny, mu, invTemp, eUp, eDown, abs_t0)

        # Damping
        nUp, nDown = damp(it, dampFreq, nUp, nDown, nUpOld, nDownOld, lbda)

        # Relative difference between current and previous fields
        deltaUp = np.dot(nUp - nUpOld, nUp - nUpOld) \
        / np.dot(nUp, nUp)
        deltaDown = np.dot(nDown - nDownOld, nDown - nDownOld) \
        / np.dot(nDown, nDown)

        if it % printFreq == 0:
            print("\niteration: ", it)
            print("deltaUp: ", deltaUp)
            print("deltaDown: ", deltaDown, "\n")

        if ( it + 1 ) % singleExcitationFreq == 0 :
            if dyn == 'local' or dyn == 'wait':
                for attempt in range(nOrb * N):
                    i = int( np.random.random() * N )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = updateLocal(i, nUp, nDown, N, wUp, wDown,\
                    eUp, eDown, mu, beta)

                    # Define the MF Hamiltonian for this iteration
                    C, Hup, Hdown = hamiltonian(nUp, nDown, K, U, N)

                    # Diagonalize
                    eUp, wUp = la.eigh(Hup)
                    eDown, wDown = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    interval_limits = 50
                    mu = bisect(rootToChem, -interval_limits, interval_limits, \
                    args = (eUp, eDown, beta, Nx, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update(nUp, nDown, N, wUp, wDown, eUp, eDown, mu, beta)

                    # Grandpotential per site
                    energyTmp = grandpotential(U, nUp, nDown, nUpOld, nDownOld, \
                    nOrb, Nx, Ny, mu, invTemp, eUp, eDown, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp

            elif dyn == 'kick':
                for attempt in range(nOrb * N):
                    i = int( np.random.random() * N )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)

                    # Define the MF Hamiltonian for this iteration
                    C, Hup, Hdown = hamiltonian(nUp, nDown, K, U, N)

                    # Diagonalize
                    eUp, wUp = la.eigh(Hup)
                    eDown, wDown = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    interval_limits = 50
                    mu = bisect(rootToChem, -interval_limits, interval_limits, \
                    args = (eUp, eDown, beta, Nx, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update(nUp, nDown, N, wUp, wDown, eUp, eDown, mu, beta)

                    # Grandpotential per site
                    energyTmp = grandpotential(U, nUp, nDown, nUpOld, nDownOld, \
                    nOrb, Nx, Ny, mu, invTemp, eUp, eDown, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp

            elif dyn == 'mixed':
                for attempt in range(nOrb * N):
                    i = int( np.random.random() * N )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = updateLocal(i, nUp, nDown, N, wUp, wDown,\
                    eUp, eDown, mu, beta)
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)

                    # Define the MF Hamiltonian for this iteration
                    C, Hup, Hdown = hamiltonian(nUp, nDown, K, U, N)

                    # Diagonalize
                    eUp, wUp = la.eigh(Hup)
                    eDown, wDown = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    interval_limits = 50
                    mu = bisect(rootToChem, -interval_limits, interval_limits, \
                    args = (eUp, eDown, beta, Nx, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update(nUp, nDown, N, wUp, wDown, eUp, eDown, mu, beta)

                    # Grandpotential per site
                    energyTmp = grandpotential(U, nUp, nDown, nUpOld, nDownOld, \
                    nOrb, Nx, Ny, mu, invTemp, eUp, eDown, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp
        if dyn == 'wait':
            if ( it + 1 + itWait ) % singleExcitationFreq == 0 :
                for attempt in range(nOrb * N):
                    i = int( np.random.random() * N )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)

                    # Define the MF Hamiltonian for this iteration
                    C, Hup, Hdown = hamiltonian(nUp, nDown, K, U, N)

                    # Diagonalize
                    eUp, wUp = la.eigh(Hup)
                    eDown, wDown = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    mu = -1
                    # interval_limits = 50
                    # mu = bisect(rootToChem, -interval_limits, interval_limits, \
                    # args = (eUp, eDown, beta, Nx, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update(nUp, nDown, N, wUp, wDown, eUp, eDown, mu, beta)

                    # Grandpotential per site
                    energyTmp = grandpotential(U, nUp, nDown, nUpOld, nDownOld, \
                    nOrb, Nx, Ny, mu, invTemp, eUp, eDown, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp

        energies[it] = energy

        if invTemp == 'infty':
            if energy < bestGrandpotential and beta >= betaThreshold:
                bestGrandpotential = energy
                nUpBest, nDownBest = nUp, nDown
                eUpBest, eDownBest = eUp, eDown
                wUpBest, wDownBest = wUp, wDown
        else:
            if energy < bestGrandpotential and beta == invTemp:
                bestGrandpotential = energy
                nUpBest, nDownBest = nUp, nDown
                eUpBest, eDownBest = eUp, eDown
                wUpBest, wDownBest = wUp, wDown

        # Move to the next iteration
        it += 1

    # Save the last iteration
    lastIt = it
    print("\nTotal number of iterations: ", lastIt, "\n")
    return nUpBest, nDownBest, energies, bestGrandpotential,\
    itSwitch, lastIt, mu, eUpBest, eDownBest,\
    np.absolute(wUpBest.flatten('C'))**2, np.absolute(wDownBest.flatten('C'))**2
    while theta <= 2.0000001 * np.pi:

        k = e0 / (h * vF)
        #define the values of the limits for the root finding function.
        valuea = 0.3 * k
        valueb = 1.5 * k
        #next use bisect on the Rotation value function to find the k values

        #print(theta, Dispersion(valuea,args = ([a, t, vF, 1, theta, e0])), Dispersion(valueb,args = ([a, t, vF, -1, theta, e0])))
        kx = k * np.cos(theta)
        ky = k * np.sin(theta)

        #We use bisect to determine the values of kplus around the fermi surface
        kplus = so.bisect(Dispersion,
                          valuea,
                          valueb,
                          args=([a, t, vF, 1, theta, e0]))
        #we then repeat the procedure for the negative spin values
        kminus = so.bisect(Dispersion,
                           valuea,
                           valueb,
                           args=([a, t, vF, -1, theta, e0]))
        ###
        #Next we determine what the x and y positions are.
        kxp1, kyp1, kxm1, kym1 = kplus * np.cos(theta) / k, kplus * np.sin(
            theta) / k, kminus * np.cos(theta) / k, kminus * np.sin(theta) / k

        #then we print out the particular values of kplus and kminus to a file.
        ustring = theta, kminus, kplus, k
        file1.write(' '.join(map(str, ustring)))
        file1.write('\n')
Esempio n. 37
0
Eb = np.arange(0, 10, 0.1)
y = (np.sqrt(10 - Eb) * np.tan(np.sqrt(10 - Eb)) - np.sqrt(Eb))

plt.plot(Eb, y)
plt.ylim(-50, 50)
plt.title("Even function with binding potential of 10")
plt.grid()
plt.show()


#Now use the Bisection method to determine where F(Eb) = 0
def funct(Eb):
    return (np.sqrt(10 - Eb) * np.tan(np.sqrt(10 - Eb)) - np.sqrt(Eb))


guess = optimize.bisect(funct, 8, 9)
print("Guess for the Bisection Method:", guess)

#Now use the Newton / Raphson Method to determine where F(Eb) = 0
x0 = 8.5

newguess = optimize.newton(funct, x0)
print("Guess for the Newton/Raphson Method:", newguess)

#When looking at the two results, there are quite similar to 10**-8 decimal place.
#Then the question would be which is more accurate, both are precise to 10**-8.

#Now we can check the values returned for each method within our function

test1 = funct(guess)
test2 = funct(newguess)
Esempio n. 38
0
    result = (np.newton(f, 3.0, tol=0.001, rtol=0.01, maxiter=100))
    print(math.isclose(result, ref_result, rel_tol=1E-6, abs_tol=1E-6))

    ref_result = 1.4142135623715149
    result = (np.bisect(f, 1.0, 3.0))
    print(math.isclose(result, ref_result, rel_tol=1E-6, abs_tol=1E-6))

    ref_result = -7.105427357601002e-15
    result = np.fmin(f, 3.0, fatol=1e-15)
    print(math.isclose(result, ref_result, rel_tol=1E-6, abs_tol=1E-6))

    ref_result = -7.105427357601002e-15
    result = np.fmin(f, 3.0, xatol=1e-8, fatol=1e-15, maxiter=500)
    print(math.isclose(result, ref_result, rel_tol=1E-6, abs_tol=1E-6))
else:
    ref_result = 1.41421826342255
    result = optimize.newton(f, 3., tol=0.001, rtol=0.01)
    print(math.isclose(result, ref_result, rel_tol=1E-9, abs_tol=1E-9))
    result = optimize.newton(f, 3., tol=0.001, rtol=0.01, maxiter=100)
    print(math.isclose(result, ref_result, rel_tol=1E-9, abs_tol=1E-9))

    ref_result = 1.4142135623715149
    result = optimize.bisect(f, 1.0, 3.0)
    print(math.isclose(result, ref_result, rel_tol=1E-9, abs_tol=1E-9))

    ref_result = -7.105427357601002e-15
    result = optimize.fmin(f, 3.0, disp=0)
    print(math.isclose(result[0], ref_result, rel_tol=1E-9, abs_tol=1E-9))
    result = optimize.fmin(f, 3.0, xtol=0.0001, ftol=0.0001, disp=0)
    print(math.isclose(result[0], ref_result, rel_tol=1E-9, abs_tol=1E-9))
Esempio n. 39
0
            return ["Wynik: ", x, "Iteracje: ", iteracje]
        x = x - f(x) / pochodna_f(x)
    return ["Wynik: ", x, "Iteracje: ", iteracje]


def secant(x0, x1, n):
    iteracje = 0
    for i in range(n):
        iteracje += 1
        if f(x1) - f(x0) == 0:
            return ["Wynik: ", x1, "Iteracje: ", iteracje]
        x_temp = x1 - (f(x1) * (x1 - x0) * 1.0) / (f(x1) - f(x0))
        x0 = x1
        x1 = x_temp
        #print(x1)

    return ["Wynik: ", x1, "Iteracje: ", iteracje]


print("Metoda Brenta")
m_brent = optimize.brenth(f, a, b, full_output=True)
print(m_brent)

print("\n Metoda Bisekscji")
m_bisekcja = optimize.bisect(f, a, b, full_output=True)
print(m_bisekcja)

print("\n Metoda Siecznych")
m_sieczne = secant(a, b, 100)
print(m_sieczne)
Esempio n. 40
0
def printout_tcrit(mec):
    """
    Output calculations based on division into bursts by critical time (tcrit).

    Parameters
    ----------
    mec : dcpyps.Mechanism
        The mechanism to be analysed.
    output : output device
        Default device: sys.stdout
    """

    str = ('\n\n*******************************************\n' +
           'CALCULATIONS BASED ON DIVISION INTO BURSTS BY' +
           ' tcrit- CRITICAL TIME.\n')
    # Ideal shut time pdf
    eigs, w = ideal_dwell_time_pdf_components(mec.QII, qml.phiF(mec))
    str += ('\nIDEAL SHUT TIME DISTRIBUTION\n')
    str += pdfs.expPDF_printout(eigs, w)
    taus = 1 / eigs
    areas = w / eigs
    taus, areas = sortShell2(taus, areas)

    comps = taus.shape[0] - 1
    tcrits = np.empty((3, comps))
    for i in range(comps):
        str += ('\nCritical time between components {0:d} and {1:d}\n'.format(
            i + 1, i + 2) + '\nEqual % misclassified (DC criterion)\n')
        try:
            tcrit = so.bisect(pdfs.expPDF_tcrit_DC,
                              taus[i],
                              taus[i + 1],
                              args=(taus, areas, i + 1))
            enf, ens, pf, ps = pdfs.expPDF_misclassified(
                tcrit, taus, areas, i + 1)
            str += pdfs.expPDF_misclassified_printout(tcrit, enf, ens, pf, ps)
        except:
            str += ('Bisection with DC criterion failed.\n')
            tcrit = None
        tcrits[0, i] = tcrit

        str += ('\nEqual # misclassified (Clapham & Neher criterion)\n')
        try:
            tcrit = so.bisect(pdfs.expPDF_tcrit_CN,
                              taus[i],
                              taus[i + 1],
                              args=(taus, areas, i + 1))
            enf, ens, pf, ps = pdfs.expPDF_misclassified(
                tcrit, taus, areas, i + 1)
            str += pdfs.expPDF_misclassified_printout(tcrit, enf, ens, pf, ps)
        except:
            str += ('Bisection with Clapham & Neher criterion failed.\n')
            tcrit = None
        tcrits[1, i] = tcrit

        str += ('\nMinimum total # misclassified (Jackson et al criterion)')
        try:
            tcrit = so.bisect(pdfs.expPDF_tcrit_Jackson,
                              taus[i],
                              taus[i + 1],
                              args=(taus, areas, i + 1))
            enf, ens, pf, ps = pdfs.expPDF_misclassified(
                tcrit, taus, areas, i + 1)
            str += pdfs.expPDF_misclassified_printout(tcrit, enf, ens, pf, ps)
        except:
            str += ('\nBisection with Jackson et al criterion failed.')
            tcrit = None
        tcrits[2, i] = tcrit

    str += ('\nSUMMARY of tcrit values:\n' + 'Components  DC\tC&N\tJackson\n')
    for i in range(comps):
        str += ('{0:d} to {1:d} '.format(i + 1, i + 2) +
                '\t{0:.5g}'.format(tcrits[0, i] * 1000) +
                '\t{0:.5g}'.format(tcrits[1, i] * 1000) +
                '\t{0:.5g}\n'.format(tcrits[2, i] * 1000))

    return str
Esempio n. 41
0
 def _solve_electroneutrality_equation(self, equation, T) -> float:
     return bisect(equation, -self.Eg, 2 * self.Eg, T,
                   xtol=1e-6 * self.Eg)  # noqa
Esempio n. 42
0
 transitABlist = []
 transitBAlist = []
 oldt = time[0]
 for i in range(len(time)):
     t = time[i]
     sim.integrate(t)
     p = sim.particles
     olddyAB = DyAB(oldt)
     dyAB = DyAB(t)
     #if i%10==0:
     #	pl.scatter(-p[0].x,-p[0].z,s=1,c='k')
     #	pl.scatter(-p[1].x,-p[1].z,s=1,c='k')
     #	pl.scatter(-p[2].x,-p[2].z,s=1,c='k')
     if dyAB > 0 and olddyAB < 0 and p[
             0].z < 0:  # and np.abs(p[0].x)<0.05:
         transitAB = optimize.bisect(DyAB, oldt, t)
         transitABlist.append(transitAB)
         sim.integrate(transitAB)
         p = sim.particles
         #pl.scatter(-p[0].x,-p[0].z,s=50,c='b')
         #pl.scatter(-p[1].x,-p[1].z,s=50,c='b')
     if dyAB > 0 and olddyAB < 0 and p[
             0].z > 0:  # and np.abs(p[0].x)<0.1:
         transitBA = optimize.bisect(DyAB, oldt, t)
         transitBAlist.append(transitBA)
         sim.integrate(transitBA)
         p = sim.particles
         #pl.scatter(-p[0].x,-p[0].z,s=50,c='r')
         #pl.scatter(-p[1].x,-p[1].z,s=50,c='r')
     oldt = t
 #pl.xlim([-1.5,1.5])
Esempio n. 43
0
    def _likelihood_ratio_confint(self, alpha: float) -> List[float]:
        """Compute the likelihood ratio confidence interval for the MLE of the previous run.

        Args:
            alpha: Specifies the (1 - alpha) confidence level (0 < alpha < 1).

        Returns:
            The likelihood ratio confidence interval.
        """
        # Compute the two intervals in which we the look for values above
        # the likelihood ratio: the two bubbles next to the QAE estimate
        M = 2**self._m
        qae = self._ret['value']

        y = int(np.round(M * np.arcsin(np.sqrt(qae)) / np.pi))
        if y == 0:
            right_of_qae = np.sin(np.pi * (y + 1) / M)**2
            bubbles = [qae, right_of_qae]

        elif y == int(M / 2):  # remember, M = 2^m is a power of 2
            left_of_qae = np.sin(np.pi * (y - 1) / M)**2
            bubbles = [left_of_qae, qae]

        else:
            left_of_qae = np.sin(np.pi * (y - 1) / M)**2
            right_of_qae = np.sin(np.pi * (y + 1) / M)**2
            bubbles = [left_of_qae, qae, right_of_qae]

        # likelihood function
        ai = np.asarray(self._ret['values'])
        pi = np.asarray(self._ret['probabilities'])
        m = self._m
        shots = self._ret['shots']

        def loglikelihood(a):
            return np.sum(shots * pi * np.log(pdf_a(ai, a, m)))

        # The threshold above which the likelihoods are in the
        # confidence interval
        loglik_mle = loglikelihood(self._ret['ml_value'])
        thres = loglik_mle - chi2.ppf(1 - alpha, df=1) / 2

        def cut(x):
            return loglikelihood(x) - thres

        # Store the boundaries of the confidence interval
        # It's valid to start off with the zero-width confidence interval, since the maximum
        # of the likelihood function is guaranteed to be over the threshold, and if alpha = 0
        # that's the valid interval
        lower = upper = self._ret['ml_value']

        # Check the two intervals/bubbles: check if they surpass the
        # threshold and if yes add the part that does to the CI
        for a, b in zip(bubbles[:-1], bubbles[1:]):
            # Compute local maximum and perform a bisect search between
            # the local maximum and the bubble boundaries
            locmax, val = bisect_max(loglikelihood, a, b, retval=True)
            if val >= thres:
                # Bisect pre-condition is that the function has different
                # signs at the boundaries of the interval we search in
                if cut(a) * cut(locmax) < 0:
                    left = bisect(cut, a, locmax)
                    lower = np.minimum(lower, left)
                if cut(locmax) * cut(b) < 0:
                    right = bisect(cut, locmax, b)
                    upper = np.maximum(upper, right)

        # Put together CI
        ci = [lower, upper]
        return [self.a_factory.value_to_estimation(bound) for bound in ci]
Esempio n. 44
0
def teardrop_angle(a1, a2, y):
    a = optimize.bisect(angle_func, a1, a2, args=(y), maxiter=1000)
    return a
Esempio n. 45
0

print(newton2(5, 1e-5))


def newton3(f, fp, a, eps):
    x = a - f(a) / fp(a)
    x2 = a
    while abs(f(x) - f(x2)) > eps:
        x2 = x
        x = x - f(x) / fp(x)
    return x


f = lambda x: (x - pi)
print(opt.bisect(f, 0, 4))
f = lambda x: x**2 - 2
print(opt.newton(f, 2))
print(np.roots([5, 6, 0, 0, 2]))


def D0(f, x, h=1e-3):
    return (f(x + h) - f(x)) / h


def D1(f, x, h=1e-3):
    return (f(x + h) - f(x - h)) / (2 * h)


print(D0(f, 5), D1(f, 5))
Esempio n. 46
0
argument. It scales the rate with the size of the inhibitory
population and configures the inhibitory Poisson generator
(``noise[1]``) accordingly. Then, the spike counter of the
`spike_detector` is reset to zero. The network is simulated using
`Simulate`, which takes the desired simulation time in milliseconds
and advances the network state by this amount of time. During
simulation, the `spike_detector` counts the spikes of the target
neuron and the total number is read out at the end of the simulation
period. The return value of ``output_rate()`` is the firing rate of
the target neuron in Hz.

Second, the scipy function ``bisect`` is used to determine the optimal
firing rate of the neurons of the inhibitory population.
'''

in_rate = bisect(lambda x: output_rate(x) - r_ex, lower, upper, xtol=prec)
print("Optimal rate for the inhibitory population: %.2f Hz" % in_rate)
'''
The function ``bisect`` takes four arguments: first a function whose
zero crossing is to be determined. Here, the firing rate of the target
neuron should equal the firing rate of the neurons of the excitatory
population. Thus we define an anonymous function (using ``lambda``)
that returns the difference between the actual rate of the target
neuron and the rate of the excitatory Poisson generator, given a rate
for the inhibitory neurons. The next two arguments are the lower and
upper bound of the interval in which to search for the zero
crossing. The fourth argument of ``bisect`` is the desired relative
precision of the zero crossing.

Finally, we plot the target neuron's membrane potential as a function
of time.
Esempio n. 47
0
    tau = kap * (rho * dr).sum()
    return tau - tau0


sigma_smalldust = sigma_dust_2d[0]
astr = ("{0:" + sizeformat + "}").format(a_grains[0] * 1e4)
osmall = readOpac(ext=astr, scatmat=True)
lamstar = 0.45  # Representative wavelength for stellar radiation
kappa = np.interp(lamstar, osmall.wav[0], osmall.kabs[0] + osmall.ksca[0])

thetaupp = np.zeros(nphi)
for iphi in range(nphi):
    args = (0.01, ri, sigma_smalldust[:, iphi], hpr, kappa)
    thetaupp[iphi] = bisect(ftauroot,
                            0.2,
                            np.pi / 2,
                            args=args,
                            xtol=1e-6,
                            rtol=1e-6)
thetaup = thetaupp.min()  # Min, because pi/2-thetaup must be max

#
# Make the theta coordinate, and refine near the midplane
# (to resolve the dust layer)
#
# NOTE: If your grains have not-so-large Stokes numbers,
#       you can save computing time by reducing nlev_zr.
#       Just make sure that the vertical structure of the
#       distribution of the largest grains remains resolved
#       by the theta-grid.
#
ntheta = 32
Esempio n. 48
0
import timeit
import numpy as np
import scipy.optimize as optimize

import time


def f(x):
    return np.cos(x) / (1. + x**2)


def fprime(x):
    return (-(x**2 + 1.) * np.sin(x) - 2. * x * np.cos(x)) / (x**2 + 1)**2


start_time = time.time()
brentq_x = optimize.brentq(f, 0.1, 2.4)
print("--- %s seconds ---" % (time.time() - start_time))

start_time = time.time()
bisect_x = optimize.bisect(f, 0.1, 2.4)
print("--- %s seconds ---" % (time.time() - start_time))

start_time = time.time()
newton_x = optimize.newton(f, 1.3)
print("--- %s seconds ---" % (time.time() - start_time))

start_time = time.time()
newtonx2_x = optimize.newton(f, 1.3, fprime)
print("--- %s seconds ---" % (time.time() - start_time))
Esempio n. 49
0
def solve_model(rho,
                gam,
                T,
                phi1,
                phi2,
                verbose=False,
                transform=False,
                empirical=1,
                calibrated=True,
                risk_free_adj=1,
                shock=1,
                verbose_ss=False):

    #######################################################
    #                Section 1: Calibration               #
    #######################################################

    if empirical == 0 or empirical == 0.5:
        beta2 = 0.0022
        zeta = 0.014
        # Calibrated params old model
        # a_k = 0.03191501061172916
        # phi = 13.353240248981844
        # A = 0.26314399999999993
        # delta = 0.007 * risk_free_adj - alpha_c * rho

        # Original, randomly selected params
        # a_k = 0.017
        # phi = 13.807 / 2
        # A = 0.052
        # delta = 0.025

        if empirical == 0:
            # Eberly Wang annual params
            # a_k = .1
            # phi2 = 100
            # phi1 = .05
            # # A = 0.1 + .004
            # A = 0.1 + .042
            # delta = .02

            # Eberly Wang quarterly params
            a_k = .1 / 4
            # phi2 = 100 * 4
            # phi1 = .05 / 4
            A = (.1 + .042) / 4
            delta = .02 / 4

        if empirical == 0.5:
            # Low adjustment cost model annual params
            # a_k = .05
            # phi2 = 3.
            # phi1 = 1. / phi1
            # A = .14
            # delta = .05

            # Low adjustment cost model quarterly params
            a_k = .05 / 4
            # phi2 = 3. * 4
            # phi1 = 1. / phi1
            A = .14 / 4
            delta = .05 / 4

        def f(c):
            Phi = (1 + phi2 * (A - np.exp(c)))**(phi1)
            Phiprime = phi2 * phi1 * (1 + phi2 * (A - np.exp(c)))**(phi1 - 1)
            k = np.log(Phi) - a_k

            if rho == 1:
                v = c + k * np.exp(-delta) / (1 - np.exp(-delta))
            else:
                v = np.log((1 - np.exp(-delta)) * np.exp(c * (1 - rho)) /
                           (1 - np.exp(-delta + k * (1 - rho)))) / (1 - rho)

            r1 = Phiprime - (np.exp(delta) - 1) * Phi * np.exp(c * -rho +
                                                               (v + k) *
                                                               (rho - 1))
            return r1

        sol = opt.bisect(f, -10, np.log(A), disp=True)
        cstar = sol

        # print(f(cstar))
        def v(c):
            Phi = (1 + phi2 * (A - np.exp(c)))**(phi1)
            # Phiprime = phi2 * phi1 * (1 + phi2 * (A - np.exp(c)))**(phi1 - 1)
            k = np.log(Phi) - a_k

            if rho == 1:
                v = c + k * np.exp(-delta) / (1 - np.exp(-delta))
            else:
                v = np.log((1 - np.exp(-delta)) * np.exp(c * (1 - rho)) /
                           (1 - np.exp(-delta + k * (1 - rho)))) / (1 - rho)

            # r1 = Phiprime - (np.exp(delta) - 1) * Phi * np.exp(c * -rho + (v + k) * (rho - 1))
            return v

        def denom(c):
            Phi = (1 + phi2 * (A - np.exp(c)))**(phi1)
            # Phiprime = phi2 * phi1 * (1 + phi2 * (A - np.exp(c)))**(phi1 - 1)
            k = np.log(Phi) - a_k
            return (1 - np.exp(-delta + k * (1 - rho)))

        def k(c):
            Phi = (1 + phi2 * (A - np.exp(c)))**(phi1)
            # Phiprime = phi2 * phi1 * (1 + phi2 * (A - np.exp(c)))**(phi1 - 1)
            k = np.log(Phi) - a_k
            return k

        dom = np.linspace(-10, np.log(A), 500)
        # plt.plot(dom, f(dom))
        # plt.plot(dom, v(dom))
        # plt.subplot(1,2,1)
        # plt.plot(dom, denom(dom), label='denominator')
        # plt.plot(dom, k(dom), label='k')
        # plt.plot(dom, np.zeros_like(dom))
        # plt.plot(dom, np.ones_like(dom) * delta / (1 - rho), label = r'$\frac{\delta}{(1 - \rho)}$')
        # plt.legend()
        # plt.subplot(1,2,2)
        # plt.plot(dom, f(dom), label = 'Root function')
        # plt.legend()
        # plt.show()

        if np.min(np.abs(cstar - np.array([-10, np.log(A)]))) < 1e-8:
            raise ValueError("Not actually solved")

        Phi = (1 + phi2 * (A - np.exp(cstar)))**(phi1)
        Phiprime = phi2 * phi1 * (1 + phi2 * (A - np.exp(cstar)))**(phi1 - 1)

        kstar = np.log(Phi) - a_k
        istar = np.log(A - np.exp(cstar))

        if rho == 1:
            vstar = cstar + kstar * np.exp(-delta) / (1 - np.exp(-delta))
        else:
            vstar = np.log(
                (1 - np.exp(-delta)) * np.exp(cstar * (1 - rho)) /
                (1 - np.exp(-delta + kstar * (1 - rho)))) / (1 - rho)

        istar = np.log(A - np.exp(cstar))

        zstar = 0
        dstar = 0

    # Calculate parameters using empirical targets
    elif empirical == 1:
        # Use all empirical targets with all parameters free
        istar = np.log(IoverK)
        cstar = np.log(CoverI * np.exp(istar))
        delta0 = 0.007 * risk_free_adj - alpha_c * rho
        A0 = np.exp(istar) + np.exp(cstar)
        kstar = alpha_c

        def f(v0):
            if rho != 1:
                r3 = np.exp(v0) ** (1 - rho) - (1 - np.exp(-delta0)) \
                     * np.exp(cstar) ** (1 - rho) - np.exp(-delta0) \
                     * (np.exp(v0) * np.exp(kstar)) ** (1 - rho)
            else:
                r3 = np.exp(v0) ** (1 - np.exp(-delta0)) \
                     - np.exp(cstar) ** (1 - np.exp(-delta0)) \
                     * np.exp(kstar) ** (np.exp(-delta0))

            return r3

        vstar = opt.root(f, -3.2).x[0]

        def g(phi):
            Phi = (1. + phi * np.exp(istar))**(1. / phi)
            PhiPrime = (1. + phi * np.exp(istar))**(1. / phi - 1)
            return np.exp(-rho * cstar + (rho - 1) * (vstar + kstar)) \
                * (np.exp(delta0) - 1) * (Phi) \
                - PhiPrime

        phi0 = opt.root(g, 700).x[0]

        a_k0 = np.log((1. + phi0 * np.exp(istar))**(1. / phi0)) - kstar

        zstar = 0

        A = A0
        delta = delta0
        a_k = a_k0
        phi = phi0

    elif empirical == 2:
        raise ValueError(
            "The specifications for C and V are not yet developed for this empirical case."
        )
        # Fix phi = 0 and free C/I
        I = IoverK
        istar = np.log(I)
        phi = 0
        delta = 0.007 * risk_free_adj - alpha_c * rho
        kstar = alpha_c
        G = np.exp(kstar)
        a_k = np.log(1 + I) - kstar

        if (-delta + (1 - rho) * kstar) >= 0:
            raise ValueError(("The constraint to solve for V is not"
                              "satisfied for rho = {}").format(rho))

        if rho != 1:
            C = (np.exp(delta) - 1) * (G ** (rho - 1) - np.exp(-delta)) \
                * (1 + I) / (1 - np.exp(-delta))
            V = ((1 - np.exp(-delta)) * C ** (1 - rho) \
                / (1 - np.exp(-delta) * G ** (1 - rho))) ** (1 / (1 - rho))
        else:
            C = (np.exp(delta) - 1) * G ** ((rho - 1) / (1 - np.exp(-delta))) \
                * (1 + I)
            V = C * G**(np.exp(-delta) / (1 - np.exp(-delta)))

        cstar = np.log(C)
        vstar = np.log(V)

        A = np.exp(cstar) + np.exp(istar)

        zstar = 0

    else:
        raise ValueError("'Empirical' must be 1 or 2.")

    #######################################################
    #               Section 2: Model Solution             #
    #######################################################

    #######################################################
    #       Section 2.1: Symbolic Model Declaration       #
    #######################################################

    # Declare necessary symbols in sympy notation
    # Note that the p represents time, so k = k_t and kp = k_{t+1}

    k, kp, c, cp, v, vp, zo, zop, zt, ztp = symbols(
        "k kp c cp v vp zo zop zt ztp")

    # k = log(K_t / K_{t-1})
    # c = log(C_t / K_t)
    # v = log(V_t / K_t)
    # zo = Z_{t,1}
    # zt = Z_{t,2}
    # d = log D_t

    # Set up the equations from the model in sympy
    # The equations come from subtracting the right side from the left of the
    # log linearized governing equations

    I = A - exp(c)  # I_t / K_t
    # NEW FUNCTION
    #     i = 1 + I - phi / 2 * I ** 2 # quadratic version of I; also I^*/K_t
    i = (1. + phi2 * I)**(phi1)
    # i = 1 + log(phi * I + 1)/phi
    #     phip = 1 - phi * I
    phip = phi2 * phi1 * (1. + phi2 * I)**(phi1 - 1)
    # phip = 1 / (phi * I + 1)
    r = vp + kp + zt

    # Equation 1: Capital Evolution
    eq1 = kp - log(i) + a_k - zo

    # Equation 2: First Order Conditions on Consumption
    eq2 = log(exp(delta) - 1) - rho * c + (rho - 1) * r + \
                    log(i) - log(phip)

    # Equation 3: Value Function Evolution: rho == 1 is separate case
    if rho != 1:
        eq3 = exp(v * (1 - rho))  - ((1 - exp(-delta)) * exp((1 - rho) * c) \
                            + exp(-delta) * exp((1 - rho) * r))
    else:
        eq3 = v - (1 - exp(-delta)) * c - exp(-delta) * r

    # Equations 4 and 5: Shock Processes Evolution
    eq4 = zop - exp(-zeta) * zo
    eq5 = ztp - exp(-beta2) * zt

    eqs = [eq1, eq2, eq3, eq4, eq5]
    lead_vars = [kp, cp, vp, zop, ztp]
    current_vars = [k, c, v, zo, zt]

    substitutions = {
        k: kstar,
        kp: kstar,
        c: cstar,
        cp: cstar,
        v: vstar,
        vp: vstar,
        zo: zstar,
        zop: zstar,
        zt: zstar,
        ztp: zstar
    }
    # print(substitutions)

    #######################################################
    #Section 2.2: Generalized Schur Decomposition Solution#
    #######################################################

    # Take the appropriate derivatives and evaluate at steady state
    Amat = np.array([[eq.diff(var).evalf(subs=substitutions) for \
                      var in lead_vars] for eq in eqs]).astype(np.float)
    B = -np.array([[eq.diff(var).evalf(subs=substitutions) for var in \
                       current_vars] for eq in eqs]).astype(np.float)

    # print(Amat)
    # print(B)
    # print(la.inv(Amat[:3, :3]))

    # Substitute for k and c to reduce A and B to 2x2 matrices, noting that:
    # A[0,0]kp - B[0,1]c = zo
    # A[1,0]kp - B[1,1]c = B[1,4]zt - A[1,2]vp

    M = np.array([[Amat[0, 0], -B[0, 1]], [Amat[1, 0], -B[1, 1]]])
    Minv = la.inv(M)

    # kp = Minv[0,0] * zo + Minv[0,1] * (B[1,4]zt - A[1,2]vp)      (1)
    # c  = Minv[1,0] * zo + Minv[1,1] * (B[1,4]zt - A[1,2]vp)      (2)

    # So the system can be reduced in the following way:
    Anew = np.copy(Amat[2:, 2:])
    Bnew = np.copy(B[2:, 2:])

    # Update the column of Anew corresponding to vp, subbing in with (1)
    Anew[:, 0] += Minv[0, 1] * Amat[2:, 0] * (-Amat[1, 2])
    # Update the column of Bnew corresponding to zo, subbing in with (1)
    Bnew[:, 1] -= Minv[0, 0] * Amat[2:, 0]
    # Update the column of Bnew corresponding to zt, subbing in with (1)
    Bnew[:, 2] -= Minv[0, 1] * Amat[2:, 0] * B[1, 4]

    # Update the column of Anew corresponding to vp, subbing in with (2)
    Anew[:, 0] -= Minv[1, 1] * B[2:, 1] * (-Amat[1, 2])
    # Update the column of Bnew corresponding to zo, subbing in with (2)
    Bnew[:, 1] += Minv[1, 0] * B[2:, 1]
    # Update the column of Bnew corresponding to zt, subbing in with (2)
    Bnew[:, 2] += Minv[1, 1] * B[2:, 1] * B[1, 4]

    # Compute the generalized Schur decomposition of the reduced A and B,
    # sorting so that the explosive eigenvalues are in the bottom right

    BB, AA, a, b, Q, Z = la.ordqz(Bnew, Anew, sort='iuc')

    total_dim = len(Anew)
    # a/b is a vector of the generalized eiganvals
    exp_dim = len(a[np.abs(a / b) > 1])
    stable_dim = total_dim - exp_dim

    if verbose:
        print("Rho = {}".format(rho))
        # print(-delta + (1 - rho) * kstar)
        print(("{} out of {} eigenvalues were found to be"
               " unstable.").format(exp_dim, total_dim))

    J1 = Z.T[stable_dim:, :exp_dim][0][0]
    J2 = Z.T[stable_dim:, exp_dim:][0]

    # J1v = J2 @ [zo, zt]
    v_loading = -(J2 / J1)

    # Recall the following identities:
    # kp = Minv[0,0] * zo + Minv[0,1] * (B[1,4]zt - A[1,2]vp)      (1)
    # c  = Minv[1,0] * zo + Minv[1,1] * (B[1,4]zt - A[1,2]vp)      (2)

    # Rewrite as
    # kp = -Minv[0,1]*A[1,2]vp + Minv[0,0]zo + Minv[0,1]*B[1,4]zt  (1)
    # c  = -Minv[1,1]*A[1,2]vp + Minv[1,0]zo + Minv[1,1]*B[1,4]zt  (2)

    k_loading = -Minv[0, 1] * Amat[1, 2] * v_loading
    c_loading = -Minv[1, 1] * Amat[1, 2] * v_loading * np.exp(-zeta)

    # Add the zo and zt specific dependencies to each entry of each vector
    k_loading += np.array([Minv[0, 0], Minv[0, 1] * B[1, 4]]) * np.exp(zeta)
    c_loading += np.array([Minv[1, 0], Minv[1, 1]])

    istar = np.log(A - np.exp(cstar))
    i_loading = (-exp(cstar) * c_loading / exp(istar)).astype(np.float)

    slopes1 = [k_loading[0], c_loading[0], i_loading[0], v_loading[0]]
    slopes2 = [k_loading[1], c_loading[1], i_loading[1], v_loading[1]]

    #######################################################
    #          Section 3: First Order Adjustments         #
    #######################################################

    # sigz = np.array([.00011, .00025, 0.0])
    # scriptB = np.array([[0.012, 0.027, 0, 0],[0, 0, 0.132, 0]]) * 0.01
    # sigk = np.array([.00477, 0.0, 0.0, 0.0])
    # scriptK = np.array([0.481, 0, 0, 0]) * 0.01

    # Create first order adjustments on constant terms
    # if verbose:
    #     print("Making first order adjustments to constants.")

    if transform:
        sig = np.vstack((sigk, sigz))
        s1 = sigk + sigz / (1 - np.exp(-zeta))
        s1 = s1 / la.norm(s1)
        s2 = s1[::-1] * np.array([-1, 1])
        s = np.column_stack((s2, s1))
        snew = sig @ s

        sigk = snew[0][::-1] * np.array([1, -1])
        sigz = snew[1][::-1] * np.array([1, -1])

        #print(np.array([sigk,sigz]) * 100)

    selector = np.zeros(4)
    selector[shock - 1] = 1
    B = np.array([[0.011, 0.025, 0, 0], [0, 0, 0.119, 0]])
    sigk = np.array([0.477, 0, 0, 0])
    # sigk = np.array([0.477, 0, 0, 0]) * 0.01
    sigd = np.array([0, 0, 0, 0])

    # adjustment = - (1 - gam) / 2 * la.norm(v_loading * sigz + sigk) ** 2
    # adjustment = - (1 - gam) / 2 * la.norm(v_loading @ scriptB + scriptK) ** 2
    # adjustments = la.solve((Amat - B)[:3,:3], np.array([0,0,adjustment]))

    # print(adjustments)
    # kstar += adjustments[0]
    # cstar += adjustments[1]
    # vstar += adjustments[2]
    # istar = log(A - np.exp(cstar))

    levels = [kstar, cstar, istar, vstar]

    if verbose:
        print("Log Levels: k, c, i, v")
        print(levels)
        print("Log slopes, growth shock: k, c, i, v")
        print(slopes1)
        print("Log slopes, preference shock: k, c, i, v")
        print(slopes2)
        print("\n")

    if verbose_ss:
        print("Rho = {}:".format(rho))
        print("\tLog capital growth: \t\t{0:.3f}".format(levels[0], 3))
        print("\tConsumption to capital: \t{0:.3f}".format(
            np.exp(levels[1]), 3))
        print("\tInvestment to capital: \t\t{0:.3f}".format(
            np.exp(levels[2]), 3))
        print("\tQ: \t\t\t\t{0:.3f}".format(
            (1 - np.exp(-delta)) * np.exp(levels[3] * (rho - 1)) *
            np.exp(levels[1] * -rho), 3))

    Atransition = np.array([[np.exp(-zeta), 0], [0, np.exp(-beta2)]])

    #######################################################
    #       Section 4: Impulse Response Generation        #
    #######################################################

    Z = np.zeros((2, T))
    Z[:, 0] = B @ selector
    for i in range(1, T):
        Z[:, i] = Atransition @ Z[:, i - 1]

    if shock == 1 or shock == 2:
        pass
        # bool_marker = np.array([1,0])
    elif shock == 3:
        pass
        # bool_marker = np.array([0, 1])
    else:
        raise ValueError("'shock' parameter must be set to 1, 2, or 3.")

    K = np.zeros(T)
    S = np.zeros(T)
    C = np.zeros(T)
    I = np.zeros(T)

    K[0] = sigk @ selector
    for p in range(1, T):
        K[p] = K[p - 1] + k_loading @ Z[:, p]

    S[0] = -rho * c_loading @ Z[:,0] + (1 - rho) * Z[1,0] + \
                 (rho - gam) * ((v_loading @ B) + sigk + sigd)[shock - 1] \
                  - rho * K[0]

    for p in range(1, T):
        S[p] = S[p-1] - rho * c_loading @ (Z[:,p] - Z[:,p-1]) \
                - rho * k_loading @ Z[:,p] + (1 - rho) * Z[1,p]

    C = c_loading @ Z + K

    I = i_loading @ Z + K

    return levels, slopes1, slopes2, np.array([
        -S.astype(np.float),
        K.astype(np.float),
        C.astype(np.float),
        I.astype(np.float)
    ])
Esempio n. 50
0
    def power_law_graph(self, N, exp, dmin, expect_n_edges,
                        num_trial=100, tol=100):
        '''
        generate power law [connected] graph by given number of nodes, edges and 
        exponent.

        :param N:
        :param exp: [float] negative exponent in power low graph, typical value is -2.0 to -3.0
        :param dmin: minimum degree of a node.
        :param expect_n_edges: expected number of edges in graph
        :param num_trial: number of trial to try find a connected graph
        :param tol: the tolerance number of edges versus the expected given number of edges.
        :return: networkx graph
        '''

        def tune_dmin(x, G, N, exp, expect_n_edges, max_num_try=50):
            max_try = 50

            for i in range(max_num_try):
                sequence = generate_power_law_dist(N, exp, x, N-1)

                G = nx.configuration_model(sequence)
                G.remove_edges_from(G.selfloop_edges())
                G = nx.Graph(G)

                simple_seq = np.asarray([deg for (node, deg) in G.degree()])
                num_edges = int(np.sum(simple_seq)/2.0)

                if nx.is_connected(G):
                    # print "n = ", num_edges
                    break

            if i == (max_num_try-1):
                print("G is not connected")
                exit(0)

            return num_edges - expect_n_edges

        G = nx.Graph()
        dmax = N-1

        for itr in range(num_trial):

            # tune minimum degree of the node
            d_tuned = bisect(tune_dmin,
                             dmin-3, dmin+10, # sweep the interval around dmin
                             args=(G, N, exp, expect_n_edges),
                             xtol=0.01)

            simple_seq = np.asarray([deg for (node, deg) in G.degree()])
            num_edges = np.sum(simple_seq)/2.0
            error = np.abs(num_edges - expect_n_edges)
            
            if verbocity:
                print ("dmin = %.2f, error = %d, n_edges = %d " % (
                    d_tuned, error, num_edges))

            # check if number of edges is close to the expected number of edges
            if error < tol:
                return G

        if itr == num_trial-1:
            print("could not find a proper graph with given properties!")
            exit(0)
Esempio n. 51
0
import matplotlib.pyplot as plt
import numpy as np
from scipy import optimize as op


def f(x):
    y = ((np.exp(-x)) * (x + 1)) - 0.5
    return y


x1 = 0.0
x2 = 2.0

print(f(x1), f(x2))

root = op.bisect(f, x1, x2)

print(root)

#3c

h = 5.4
I0 = 1.38e8


def L(x):
    y = 2 * np.pi * I0 * h * (h - (np.exp(-x / h) * (x + h)))
    return y


r = np.arange(0, 100, 0.01)
Esempio n. 52
0
    
fa.set_xlabel(r'$k\alpha$',fontsize=20)
fa.set_ylabel(r'$m$',fontsize=20)
fa.text(2.5,0.1,'a',fontsize=24,backgroundcolor='w')

fb.set_xlabel(r'$k\alpha$',fontsize=20)
fb.set_ylabel(r'$v_m$',fontsize=20)
fb.text(2.5,0.025,'b',fontsize=24,backgroundcolor='w')

#***********************************************************************
fc = fig.add_subplot(2,2,3)
delta=arange(0.,1.01,0.01)
kalpha=[]
k0=20.
for d in delta:
       kn=optimize.bisect((lambda x:mr(x,d)[0][0]-0.0001),0.,k0)
       kalpha.append(kn)
       k0=kn 
       #print d,kn 
fc.plot(delta,kalpha)
fc.set_ylabel(r'$k\alpha$',fontsize=20)
fc.set_xlabel(r'$\delta$',fontsize=20)
fc.set_xlim((0.,1.))
fc.set_ylim((0.,20.))
fc.grid(True)
fc.text(.1,2.,'c',fontsize=24,backgroundcolor='w')
fc.text(.3,2.5,r'$m=0$',fontsize=24,backgroundcolor='w')
fc.text(.5,12,r'$m>0$',fontsize=24,backgroundcolor='w')

#***********************************************************************
fd = fig.add_subplot(2,2,4)
def write_layered_dx(x):
    '''dxx = write_layered_dx(x)
        Returns dxx - a list of grid distance increments for a series of layers with set thicknesses, desired nodes, and bias flag (see layer class).
        Requires x - a list of layer classes with defined thickness number of nodes and bias type.  Layers should be ordered in pflotran natural ordering - (i.e. starting a lower southwest corner.'''
    dx = n.array([])
    l_count = 0
    txx = 0.
    for l in x:
        txx = txx + l.h
        bias_l = l.bias
        nx = l.n_step
        x1 = l.h / l.n_step  # just a guess for now
        if l.bias == 0:
            px = sum(dx)
            tx = px + l.h
            for i in xrange(nx):
                dx = n.append(dx, x1)
            #hit end of layer on the nose
            dx[-1] = tx - sum(dx[0:-1])
        if l.bias == 1:  #single bias decreasing in positive direction
            if x[l_count + 1].bias == 0:
                x1 = x[l_count + 1].h / x[
                    l_count +
                    1].n_step  # want to end with the next grid spacing
            else:
                print 'not sure what do with bias min x distance, figure it out'
            args = (l.h, nx, x1)
            bx = optimize.bisect(txb_diff_single_bias, 1., 2.5, args=args)
            px = sum(dx)
            tx = px + l.h
            #construct the biased vector
            for i in xrange(1, int(nx)):
                dx = n.append(dx, x1 * bx**(nx - (i + 1)))
            #hit end of bias on the nose
            dx[-1] = tx - sum(dx[0:-1])
        if l.bias == 2:  #single bias increasing in positive direction
            if l.num >= 1:
                x1 = dx[-1]  #start with last grid spacing
            else:
                print 'not sure what to do about min x distance, figure it out'
            bx = optimize.bisect(txb_diff_single_bias,
                                 1.,
                                 2.5,
                                 args=(l.h, nx, x1))
            px = sum(dx)
            tx = px + l.h
            #construct the biased vector
            for i in xrange(1, int(nx)):
                dx = n.append(dx, x1 * bx**(i - 1))

            #hit end of domain on the nose
            dx[-1] = tx - sum(dx[0:-1])
        if l.bias == 3:  #dual bias
            if l.num >= 1:
                x1 = dx[-1]  #start with last grid spacing
            else:
                print 'not sure what to do about min x distance, figure it out'
            # now to figure out the correct bias (for a dual bias)
            nnx = nx / 2 - 1
            px = sum(dx)
            tx = px + l.h
            bx = optimize.bisect(txb_diff_dual_bias,
                                 1.,
                                 2.5,
                                 args=(l.h, nx, x1))
            #first half of the biased vector
            for i in xrange(1, int(nx / 2)):
                dx = n.append(dx, x1 * bx**(i - 1))
            #second half of the biased x vector
            for i in xrange(1, int(nx / 2)):
                dx = nappend(dx, x1 * bx**(nnx - (i)))
            #hit end of bias on the nose
            dx[-1] = tx - sum(dx[0:-1])
        tl = n.sum(dx) - px
        print 'Total thickness of layer ' + l.name + ' = %1.2g units' % tl
        l_count += 1

    #make the ending length get us to a the desired total length.
    print "layer discretization error %1.5g" % (txx - tx)
    dx[-1] = txx - sum(dx[0:-1])

    dxx = list(dx)
    print 'Total layered thickness' + l.name + ' = %1.2g units' % (sum(dxx))
    return dxx
from scipy.optimize import bisect
from numpy import sin, cos, exp


def f(x):
    return sin(cos(exp(x)))


b = bisect(f, -1, 1)
print("zero of sin(cos(exp(x)))=0 is", b, ".")

q = sin(cos(exp(b)))
print("value of function at found zero is", q, ".")
def getLambert(r):
    R2 = R1 + R20 * r
    a = NAN
    try:
        lambert_result = lambert_problem(R1, R2, dt)
        if lambert_result.is_reliable():
            a = lambert_result.get_a()[0]
    except:
        a = NAN
    #print " r,a ", r,a
    return a


# ============ Pre optimize

x0 = bisect(getLambert, rmin, rmax, xtol=0.00001)
#   print " x0 = ", x0
print " bisect DONE "


def fitFractionalFunc(x0, A, B):
    a = (A[1] - B[1]) / (1 / (x0 - A[0]) - 1 / (x0 - B[0]))
    b = A[1] - a / (x0 - A[0])
    return a, b


ysc, y0 = fitFractionalFunc(x0, (rmin, getLambert(rmin)),
                            (x0 * 0.9, getLambert(x0 * 0.9)))
'''
# NOT  GOOD
Gen = (y0, ysc, 0, 0, 0, 0, 0); print " Gen0 = ",Gen
def solve_self_consistent_k_space_2sublat\
(abs_t0, E0, E1, E2, E3, E4,\
Nk, Ny, nOrb, nHole, invTemp, betaStart, betaSpeed, betaThreshold,anneal_or_not,\
U, itMax, dampFreq, dyn, singleExcitationFreq, osc, delta, nUp, nDown):
    '''
    Solves the self-consistent equation in momentum space.
    '''
    # Initialize deltas for the tolerance check and
    # beta for the annealing.
    deltaUp = delta + 1
    deltaDown = delta + 1
    beta = betaStart
    # Initialize energies
    energies = np.zeros(itMax)
    # Initialize iteration
    it = 0
    # Initialize iteration at which we finish annealing
    itSwitch = 0
    # How many iterations to wait between kicks
    itWait = 3
    # lbda is a parameter that reduces weight
    # on the density obtained in the previous iteration.
    # the factor multiplied by itMax impedes that P ( I ) < delta
    # initially, we give equal weights and then progressively more
    # to the new configuration
    factor = 1.2
    lbda = 0.5 / (factor * itMax)
    # This ensures that we do not stop at the first step
    energies[-1] = 1e100
    # Print frequency
    printFreq = 10

    # Initialize arrays to store energies and eigenstates
    eUp = np.zeros((Nk, 2 * nOrb * Ny))
    wUp = np.zeros((Nk, 2 * nOrb * Ny, 2 * nOrb * Ny), dtype=np.complex64)
    eDown = np.zeros((Nk, 2 * nOrb * Ny))
    wDown = np.zeros((Nk, 2 * nOrb * Ny, 2 * nOrb * Ny), dtype=np.complex64)
    eUpBest = np.zeros((Nk, 2 * nOrb * Ny))
    wUpBest = np.zeros((Nk, 2 * nOrb * Ny, 2 * nOrb * Ny), dtype=np.complex64)
    eDownBest = np.zeros((Nk, 2 * nOrb * Ny))
    wDownBest = np.zeros((Nk, 2 * nOrb * Ny, 2 * nOrb * Ny), dtype=np.complex64)
    ks = np.linspace(-np.pi / 2,np.pi / 2, num=Nk, endpoint=False)

    if anneal_or_not == True:
        print("Started annealing.\n")

    nUpBest, nDownBest = nUp, nDown
    bestGrandpotential = 1e100

    while loopCondition(it, itMax, deltaUp, deltaDown,\
                        delta, beta, invTemp, betaThreshold):

        # Annealing
        if anneal_or_not == True:
            beta = anneal(invTemp, betaStart, betaSpeed, beta,\
            betaThreshold, it, osc)
        else:
            beta = noAnneal(invTemp, beta)

        for kCount, k in enumerate(ks):
            # Define the MF Hamiltonian for this iteration and k-point
            K = HribbonKSpace2sublat(k, nOrb, Ny, E0, E1, E2, E3, E4)
            Hup = K + U * np.eye(2 * nOrb * Ny) * nDown
            Hdown = K + U * np.eye(2 * nOrb * Ny) * nUp

            # Diagonalize
            eUp[kCount, :], wUp[kCount, :, :] = la.eigh(Hup)
            eDown[kCount, :], wDown[kCount, :, :] = la.eigh(Hdown)

        # Save the previous fields to compare to test convergence
        nUpOld = nUp.copy()
        nDownOld = nDown.copy()

        # Compute the chemical potential implicitly
        mu = bisect(rootToChem2sublat, -50, 50,\
         args = (eUp, eDown, beta, Nk, Ny, nHole) )

        # Update fields
        nUp, nDown = update2sublat(nUp, nDown, Nk, Ny,\
         wUp, wDown, eUp, eDown, mu, beta, nOrb)

        # Damping
        nUp, nDown = damp(it, dampFreq, nUp, nDown, nUpOld, nDownOld, lbda)

        # Relative difference between current and previous fields
        deltaUp = np.dot(nUp - nUpOld, nUp - nUpOld)\
         / np.dot(nUpOld, nUpOld)
        deltaDown = np.dot(nDown - nDownOld, nDown - nDownOld)\
         / np.dot(nDownOld, nDownOld)

        if it % printFreq == 0:
            print("iteration: ", it)
            print("deltaUp: ", deltaUp)
            print("deltaDown: ", deltaDown)

        # Grandpotential per site
        energy = grandpotentialKSpace2sublat(U, nUp, nDown, nUpOld, nDownOld,\
         invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)
        if it % singleExcitationFreq == 0 :
            for attempt in range(2 * nOrb * Ny):
                i = int( np.random.random() * 2 * nOrb * Ny )
                nUpTmp, nDownTmp = nUp[i], nDown[i]
                nUp[i], nDown[i] = updateLocal2sublat(i, nUp, nDown, Nk, Ny,\
                wUp, wDown, eUp, eDown, mu, beta, nOrb)
                energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)
                if energyTmp > energy:
                    nUp[i], nDown[i] = nUpTmp, nDownTmp
                else:
                    energy = energyTmp

        if ( it + 1 ) % singleExcitationFreq == 0 :
            for attempt in range(nOrb * 2 * Ny):
                i = int( np.random.random() * 2 * nOrb * Ny )
                nUpTmp, nDownTmp = nUp[i], nDown[i]
                if dyn == 'local' or dyn == 'wait':
                    nUp[i], nDown[i] = updateLocalKSpace(i, nUp, nDown, Nk, Ny,\
                    wUp, wDown, eUp, eDown, mu, beta, nOrb)
                    # We do not take steps that increase energy
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)
                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp
                if dyn == 'kick':
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)
                    # We do not take steps that increase energy
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)
                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp
                if dyn == 'mixed':
                    nUp[i], nDown[i] = updateLocalKSpace(i, nUp, nDown, Nk, Ny,\
                    wUp, wDown, eUp, eDown, mu, beta, nOrb)
                    # We do not take steps that increase energy
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)
                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)
                    # We do not take steps that increase energy
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)
                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp
        if dyn == 'wait':
            if ( it + 1 + itWait ) % singleExcitationFreq == 0 :
                for attempt in range(nOrb * 2 * Ny):
                    i = int( np.random.random() * 2 * nOrb * Ny )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)
                    # We do not take steps that increase energy
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)
                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp

        ## HERE

        if ( it + 1 ) % singleExcitationFreq == 0 :
            if dyn == 'local' or dyn == 'wait':
                for attempt in range(2 * nOrb * Ny):
                    i = int( np.random.random() * 2 * nOrb * Ny )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = updateLocal2sublat(i, nUp, nDown, Nk, Ny,\
                    wUp, wDown, eUp, eDown, mu, beta, nOrb)
                    for kCount, k in enumerate(ks):
                        # Define the MF Hamiltonian for this iteration and k-point
                        K = HribbonKSpace2sublat(k, nOrb, Ny, E0, E1, E2, E3, E4)
                        Hup = K + U * np.eye(2 * nOrb * Ny) * nDown
                        Hdown = K + U * np.eye(2 * nOrb * Ny) * nUp

                        # Diagonalize
                        eUp[kCount, :], wUp[kCount, :, :] = la.eigh(Hup)
                        eDown[kCount, :], wDown[kCount, :, :] = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    mu = bisect(rootToChem2sublat, -50, 50,\
                     args = (eUp, eDown, beta, Nk, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update2sublat(nUp, nDown, Nk, Ny,\
                     wUp, wDown, eUp, eDown, mu, beta, nOrb)

                    # Grandpotential per site
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp

            elif dyn == 'kick':
                for attempt in range(2 * nOrb * Ny):
                    i = int( np.random.random() * 2 * nOrb * Ny )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)
                    for kCount, k in enumerate(ks):
                        # Define the MF Hamiltonian for this iteration and k-point
                        K = HribbonKSpace2sublat(k, nOrb, Ny, E0, E1, E2, E3, E4)
                        Hup = K + U * np.eye(2 * nOrb * Ny) * nDown
                        Hdown = K + U * np.eye(2 * nOrb * Ny) * nUp

                        # Diagonalize
                        eUp[kCount, :], wUp[kCount, :, :] = la.eigh(Hup)
                        eDown[kCount, :], wDown[kCount, :, :] = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    mu = bisect(rootToChem2sublat, -50, 50,\
                     args = (eUp, eDown, beta, Nk, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update2sublat(nUp, nDown, Nk, Ny,\
                     wUp, wDown, eUp, eDown, mu, beta, nOrb)

                    # Grandpotential per site
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp
            elif dyn == 'mixed':
                for attempt in range(2 * nOrb * Ny):
                    i = int( np.random.random() * 2 * nOrb * Ny )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = updateLocal2sublat(i, nUp, nDown, Nk, Ny,\
                    wUp, wDown, eUp, eDown, mu, beta, nOrb)
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)
                    for kCount, k in enumerate(ks):
                        # Define the MF Hamiltonian for this iteration and k-point
                        K = HribbonKSpace2sublat(k, nOrb, Ny, E0, E1, E2, E3, E4)
                        Hup = K + U * np.eye(2 * nOrb * Ny) * nDown
                        Hdown = K + U * np.eye(2 * nOrb * Ny) * nUp

                        # Diagonalize
                        eUp[kCount, :], wUp[kCount, :, :] = la.eigh(Hup)
                        eDown[kCount, :], wDown[kCount, :, :] = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    mu = bisect(rootToChem2sublat, -50, 50,\
                     args = (eUp, eDown, beta, Nk, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update2sublat(nUp, nDown, Nk, Ny,\
                     wUp, wDown, eUp, eDown, mu, beta, nOrb)

                    # Grandpotential per site
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp
        if dyn == 'wait':
            if ( it + 1 + itWait ) % singleExcitationFreq == 0 :
                for attempt in range(nOrb * Ny * 2):
                    i = int( np.random.random() * nOrb * Ny * 2 )
                    nUpTmp, nDownTmp = nUp[i], nDown[i]
                    nUp[i], nDown[i] = singleExcitation(nUp, nDown, i)
                    for kCount, k in enumerate(ks):
                        # Define the MF Hamiltonian for this iteration and k-point
                        K = HribbonKSpace2sublat(k, nOrb, Ny, E0, E1, E2, E3, E4)
                        Hup = K + U * np.eye(2 * nOrb * Ny) * nDown
                        Hdown = K + U * np.eye(2 * nOrb * Ny) * nUp

                        # Diagonalize
                        eUp[kCount, :], wUp[kCount, :, :] = la.eigh(Hup)
                        eDown[kCount, :], wDown[kCount, :, :] = la.eigh(Hdown)

                    # Save the previous fields to compare to test convergence
                    nUpOld = nUp.copy()
                    nDownOld = nDown.copy()

                    # Compute the chemical potential implicitly
                    mu = bisect(rootToChem2sublat, -50, 50,\
                     args = (eUp, eDown, beta, Nk, Ny, nHole) )

                    # Update fields
                    nUp, nDown = update2sublat(nUp, nDown, Nk, Ny,\
                     wUp, wDown, eUp, eDown, mu, beta, nOrb)

                    # Grandpotential per site
                    energyTmp = grandpotentialKSpace2sublat(U, nUp, nDown,\
                    nUpOld, nDownOld, invTemp, Nk, Ny, eUp, eDown, mu, abs_t0)

                    if energyTmp > energy:
                        nUp[i], nDown[i] = nUpTmp, nDownTmp
                    else:
                        energy = energyTmp

        energies[it] = energy
        if invTemp == 'infty':
            if energy < bestGrandpotential and beta >= betaThreshold:
                bestGrandpotential = energy
                nUpBest, nDownBest = nUp, nDown
                eUpBest, eDownBest = eUp, eDown
                wUpBest, wDownBest = wUp, wDown
        else:
            if energy < bestGrandpotential and beta == invTemp:
                bestGrandpotential = energy
                nUpBest, nDownBest = nUp, nDown
                eUpBest, eDownBest = eUp, eDown
                wUpBest, wDownBest = wUp, wDown

        # Move to the next iteration
        it += 1

    # Save the last iteration
    lastIt = it
    return nUpBest, nDownBest, energies, bestGrandpotential, itSwitch,\
     lastIt, mu, abs_t0, eUpBest, eDownBest,\
     np.absolute(wUpBest.flatten('C'))**2, np.absolute(wDownBest.flatten('C'))**2
Esempio n. 57
0
def solveForPrice(ap, goal):
    fun = lambda p: sum(analyze(ap, price=p)[1].values()) - goal
    if not fun(0) * fun(ap['startPrice'] * 1.5) < 0:
        return 0
    return bisect(fun, 0, ap['startPrice'] * 1.5)
Esempio n. 58
0
#!/usr/bin/env python3
import math
from scipy.optimize import bisect 

refr_idx = [1.0, 10/9, 10/8, 10/7, 10/6, 10/5]
delta_ys = [100/math.sqrt(2)-50] + [10]*5
total_delta_x_target = 100.0 / math.sqrt(2)

def total_delta_x_error(alpha0):
    global delta_xs # save for time calculations
    sin0 = math.sin(alpha0)
    delta_xs = [delta_x(sin0, refr_idx[i], delta_ys[i]) for i in range(6)]
    return sum(delta_xs) - total_delta_x_target

def delta_x(sin0, ior, dy):
    # delta_y / slope
    return dy / (1/(sin0 / math.sqrt(1 - (sin0/ior)**2) / ior))

alpha = bisect(total_delta_x_error, 0.1, math.pi/2-0.1) 
sin = math.sin(alpha)

velocities = (10/refr_idx[i] for i in range(6))
lengths = ((dx**2 + dy **2) ** 0.5 for dx, dy in zip(delta_xs, delta_ys))
total_time = sum(l/v for l, v in zip(lengths, velocities))

print(f"{total_time:.10f}")
Esempio n. 59
0
 def get_energy(self, k, m, bracket, xtol):
     start, stop = bracket
     return bisect(self.equation, start, stop, args=(k, m),
                   xtol=xtol)  # noqa
Esempio n. 60
0
#!/usr/bin/env python3
# ----------------------------------------- #
#
import numpy as np
from scipy.linalg import solve
from scipy.sparse import diags
from scipy.optimize import bisect

L = 16
beta = bisect(f=(lambda x: np.cosh(x) * np.cos(x) + 1), a=5, b=10)
k = beta / L
print(k, k * L, np.cosh(k * L) * np.cos(k * L))

R = (np.cos(k * L) + np.cosh(k * L)) / (np.sin(k * L) + np.sinh(k * L))
print(R)


def y(x):
    return 0.5 * ((np.cosh(k * x) - np.cos(k * x)) - R *
                  (np.sinh(k * x) - np.sin(k * x)))


print(y(L))

n = L
x = np.linspace(0.5, n - 0.5, n)
A4 = np.array([
    np.ones(n - 2),
    -4 * np.ones(n - 1),
    6 * np.ones(n),
    -4 * np.ones(n - 1),