def compute_k2_vals(l_max, num_vals):
    """
    Compute hyper radial infinite well K^2 eigenvalues for a well of unit radial width.  The eigenvalues for a well with
     parameter L = G + 3 D / 2

    Compute square of zeros of J_{l+1/2}(x) for l = 0, 1/2, 1, ..., floor(l_max), floor(l_max)+1/2
    :param l_max:  Max l to find zeros of
    :param num_vals:  Total number of zeros to find for each l
    :return K2:  A 2*l_max + 1 by num_vals ndarray containing the computed squared zeros.  K2[2*K + D-3] are the
    eigenvalues for dimension D and hyper angular momentum L
    """

    from numpy import arange, pi, zeros, zeros_like
    from scipy.optimize import brentq
    from scipy.special import jv

    zro = zeros((2 * l_max + 1, num_vals), dtype=float)

    z_l_m_1 = pi * arange(1,num_vals + l_max + 1)
    z_l = zeros_like(z_l_m_1)
    zz_l = zeros_like(z_l_m_1)

    zro[0] = z_l_m_1[:num_vals]

    for l in range(1, l_max + 1):
        for i in range(num_vals + l_max - l):
            zz_l[i] = brentq(lambda x: jv(l, x), z_l_m_1[i], z_l_m_1[i + 1])
            z_l[i] = brentq(lambda x: jv(l + .5, x), z_l_m_1[i], z_l_m_1[i + 1])

        z_l_m_1[:] = z_l[:]
        zro[2 * l] = z_l[:num_vals]
        zro[2 * l - 1] = zz_l[:num_vals]
    if num_vals == 1:
        zro = zro[:,0]
    return zro**2
Example #2
0
    def profile_CI_1d(self,param,CL):
        """
        Calculate approximate Wilks' confidence intervals

        :param param: parameter name
        :param CL: confidence level
        """
        if not param in self.minuit.limits.keys():
            raise RuntimeError('You must specify parameter limits.')

        values0 = self.minuit.values.copy()
        fixed0 = self.minuit.fixed.copy()
        delta_llh = stats.chi2.ppf(CL,1)
        self.minuit.migrad()
        best_llh  = self.minuit.fval
        best_param = self.minuit.values[param]

        if self.profile(**{param:self.minuit.limits[param][0]}) - best_llh > delta_llh:
            lo_lim = optimize.brentq(lambda x: self.profile(**{param:x}) - best_llh - delta_llh,self.minuit.limits[param][0],best_param)
        else:
            lo_lim = self.minuit.limits[param][0]
        if self.profile(**{param:self.minuit.limits[param][1]}) - best_llh > delta_llh:
            up_lim = optimize.brentq(lambda x: self.profile(**{param:x}) - best_llh - delta_llh,best_param,self.minuit.limits[param][1])
        else:
            up_lim = self.minuit.limits[param][1]

        self.minuit.values = values0.copy()
        self.minuit.fixed = fixed0.copy()
        return np.array([lo_lim,up_lim])
Example #3
0
def Nsrc_gal(magg,magr,zred,q):
  global boost_csect_gal,init_nsmlim_gal;

  listmag=[];
  listz=[];
  rands=[];

  if(zred>zmax_gal):
      return 0,listmag,listz,rands;

  ## Initialize ns(mlim) if not done before
  if(init_nsmlim_gal==0):
      initnsmlim();

  ## Get the velocity dispersion
  vdisp=svd.getsigma(magg,magr,zred);
  bsist=slp.getreinst(zred,zmax_gal,vdisp)*206264.8;
  
  if (bsist<1.2):
      return 0,0,0,0,0.;
  
  else:
      ## Calculate the number of galaxies around this object within its
      ## lensing cross-section. 
      ## This is given by the following integral
      ## \int dz dV/dz P(zs,mlim) \sigma(zl,zs,q)
      #
      Nsrcmean=sci.quad(lambda zz: dNbydz_gal(zz,vdisp,zred,q),zmin_gal,zmax_gal)[0];
      Nsrcmean_boost=Nsrcmean*nsmlim_gal_spl(mmax_gal)*boost_csect_gal;

      ## Return a Poisson deviate
      Nreal=np.random.poisson(Nsrcmean_boost);

      fp1.write("This lens has %f galaxies behind it on average and Poisson deviate is %d\n"%(Nsrcmean_boost,Nreal));

      for ii in range(Nreal):
	  galzsrc=0.0;
	  trials=0
	  while(galzsrc<zred):
	      rr=np.random.random();
	      Ntarg=rr*Nsrcmean;
	      ## Need a root finder to get the redshift
	      galzsrc=brentq(findzgal,zmin_gal,zmax_gal,args=(vdisp,zred,q,Ntarg),xtol=1.e-3);
	  rands=np.append(rands,rr);
	  listz=np.append(listz,galzsrc);
	  done=False;
	  while(not done):
	      try:
		  rr=np.random.random();
		  Phitarg=rr*P_gal_spl(galzsrc)*nsmlim_gal_spl(mmax_gal);
		  rands=np.append(rands,rr);
		  galmag=brentq(findmaggal,mmin_gal,mmax_gal,args=(galzsrc,Phitarg),xtol=1.e-3);
		  done=True;
	      except ValueError:
		  print "checking", findmaggal(mmin_gal,galzsrc,Phitarg), findmaggal(mmax_gal,galzsrc,Phitarg),mmin_gal, mmax_gal, Phitarg;
		  print "Done is:",done;
	  listmag=np.append(listmag,galmag);
	  

      return Nreal,listmag,listz,rands,vdisp;
Example #4
0
def get_optimal_mix_balancing(L, GW, GS, gamma=1., returnall=True, normalized=False):
    L, GW, GS = array(L,ndmin=2), array(GW,ndmin=2), array(GS,ndmin=2) 
    weighed_sum = lambda x: sum(x,axis=0)/mean(sum(x,axis=0))

    l = weighed_sum(L)
    Gw = weighed_sum(GW)	
    Gs = weighed_sum(GS)

    mismatch = lambda alpha_w: gamma*(alpha_w*Gw + (1.-alpha_w)*Gs) - l
    res_load = lambda alpha_w: sum(get_positive(-mismatch(alpha_w)))

    alpha_w_opt = fmin(res_load,0.5,disp=False)
    res_load_1p = lambda alpha_w: res_load(alpha_w)-(res_load(alpha_w_opt)+.01*sum(l))

    alpha_w_opt_1p_interval = array([brentq(res_load_1p, 0, alpha_w_opt),brentq(res_load_1p, alpha_w_opt, 1)])

    if normalized:
        mismatch_opt = mismatch(alpha_w_opt)
    else:
        mismatch_opt = mismatch(alpha_w_opt)*mean(sum(L,axis=0))
    res_load_opt = sum(get_positive(-mismatch_opt))

    if returnall:
    #Returns: alpha_w_opt, alpha_w_opt_1p_interval, res_load_sum_opt, mismatch_opt
        return alpha_w_opt, alpha_w_opt_1p_interval, res_load_opt, mismatch_opt
    else:
        return alpha_w_opt
Example #5
0
def _boxcox_conf_interval(x, lmax, alpha):
    # Need to find the lambda for which
    #  f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
    fac = 0.5*distributions.chi2.ppf(1-alpha,1)
    target = boxcox_llf(lmax,x)-fac
    def rootfunc(lmbda,data,target):
        return boxcox_llf(lmbda,data) - target
    # Find positive endpont
    newlm = lmax+0.5
    N = 0
    while (rootfunc(newlm,x,target) > 0.0) and (N < 500):
        newlm += 0.1
        N +=1
    if N == 500:
        raise RuntimeError("Could not find endpoint.")
    lmplus = optimize.brentq(rootfunc,lmax,newlm,args=(x,target))
    newlm = lmax-0.5
    N = 0
    while (rootfunc(newlm,x,target) > 0.0) and (N < 500):
        newlm += 0.1
        N +=1
    if N == 500:
        raise RuntimeError("Could not find endpoint.")
    lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x,target))
    return lmminus, lmplus
def get_optimal_mix_balancing(L, GW, GS, gamma=1., returnall=False, normalized=True):

	L, GW, GS = array(L,ndmin=2), array(GW,ndmin=2), array(GS,ndmin=2)  #Ensure minimum dimension to 2 to alow the weighed sum to be calculated correctly.
	weighed_sum = lambda x: sum(x,axis=0)/mean(sum(x,axis=0))

	l = weighed_sum(L)
	Gw = weighed_sum(GW)	
	Gs = weighed_sum(GS)
	
	mismatch = lambda alpha_w: gamma*(alpha_w*Gw + (1.-alpha_w)*Gs) - l
	res_load_sum = lambda alpha_w: sum(get_positive(-mismatch(alpha_w)))
	
	alpha_w_opt = fmin(res_load_sum,0.5,disp=False)
	res_load_sum_1p_interval = lambda alpha_w: res_load_sum(alpha_w)-(res_load_sum(alpha_w_opt)+.01*sum(l))
	
	alpha_w_opt_1p_interval = array([brentq(res_load_sum_1p_interval, 0, alpha_w_opt),brentq(res_load_sum_1p_interval, alpha_w_opt, 1)])
	
	if normalized:
		mismatch_opt = mismatch(alpha_w_opt)
	else:
		mismatch_opt = mismatch(alpha_w_opt)*mean(sum(L,axis=0))
	res_load_sum_opt = sum(get_positive(-mismatch_opt))
	
	if returnall:
		#Returns: alpha_w_opt, alpha_w_opt_1p_interval, res_load_sum_opt, mismatch_opt
		return alpha_w_opt, alpha_w_opt_1p_interval, res_load_sum_opt, mismatch_opt
	else:
		return alpha_w_opt
Example #7
0
    def CJ(self, vol_0):
        '''Find CJ conditions using two nested line searches.
        '''
        # Search for det velocity between 1 and 10 km/sec
        d_min = 1.0e5
        d_max = 1.0e6
        v_min = magic.spline_min
        v_max = magic.spline_max

        # R is Rayleigh line
        R = lambda vel, vol: vel*vel*(vol_0-vol)/(vol_0*vol_0)
        # F is self - R
        F = lambda vel, vol, self : self(vol) - R(vel, vol)
        # d_F is derivative of F wrt vol
        d_F = lambda vol, vel, self: self.derivative(1)(vol) + (
            vel/vol_0)**2
        
        # arg_min(vel, self) finds volume that minimizes self(v) - R(v)
        arg_min = lambda vel,self: brentq(d_F,v_min,v_max,args=(vel,self))
        
        E = lambda vel, self: F(vel, arg_min(vel,self), self)
        
        vel_CJ = brentq(E, d_min, d_max, args=(self,))
        vol_CJ = arg_min(vel_CJ,self)
        p_CJ = self(vol_CJ)
        return vel_CJ, vol_CJ, p_CJ
Example #8
0
 def __init__(self,a,b):
     if a.ref != b.ref:
         raise AttributeError("Coordinate conversion not implemented yet")
     if not a.isFlying or not b.isFlying:
         raise AttributeError("Both vessels must be orbiting.")
     
     
     self.figure = figure(figsize=(8,8))
     self.axis = self.figure.gca(projection="rectilinear")#,aspect='equal')
     
     
     t = time.time() # Temp
     ts = a.orbit.synodicPeriod(b.orbit)
     
     boundary = [t,t+ts]
     
     X = linspace(boundary[0],boundary[1],1000) #arange(boundary[0],boundary[1],100)
     Y = []
     for i in X:
         Y.append(a.distanceTo(i,b))
     
     self.axis.plot(X,Y)
     d=lambda x: a.distanceTo(x,b)
     closest = so.minimize_scalar(d,method="bounded",bounds=boundary)
     
     self.axis.scatter(closest.x,closest.fun)
     if closest.fun < b.SoI:
         self.axis.plot(boundary,[b.SoI,b.SoI],label="%s SoI"%b.name)
         
         d=lambda x: a.distanceTo(x,b) - b.SoI
         encounter = so.brentq(d,t,closest.x)
         escape = so.brentq(d,closest.x,t+ts)
         
         self.axis.scatter(encounter,b.SoI)
         self.axis.scatter(escape,b.SoI)
Example #9
0
    def find_intersect(self,ray): # does the ray intercept the surface if so how far along ray? if not return -1
        s0 = 1e-5 #start this distance along the ray so that we don't get confused by the surface we have just passed thru
        
#        print(ray.r0)
#        print(self.Ffunc(ray.r0))
        if self.Ffunc(ray.r0+s0*ray.direction)>0: #do we start of inside the sphere
            #code for if we do:
            #if you go three radii in any direction you are definatley outside the sphere so this point is definately outside the sphere
            soutside = 3*self.radius
            #now we have values of s that bracket the intercept we can find it
            s = brentq(lambda x: self.Ffunc(ray.r0+x*ray.direction),s0,soutside)
            return s
        else:
            #code for if we start outside sphere
            #s for closest point to the center
            s_closest = dot(self.center-ray.r0,ray.direction)
            if s_closest < s0:
                return -1 #we are outside and heading away from center
            #f value for closest point to the center
            F_closest = self.Ffunc(ray.r0+s_closest*ray.direction)
            if F_closest<0: #ray never hits sphere
                return -1
            else:
                #s_closest and 0 are on different sides of the sphere
                #print(s_closest)
                #print(ray.r0)
                #print(ray.direction)
                #print(F_closest)
                #print(self.Ffunc(ray.r0))
                s = brentq(lambda x: self.Ffunc(ray.r0+x*ray.direction),0,s_closest)
                return s

        #we shouldn't never reach this point
        #this is a way to make sure know if we do
        assert(False) 
Example #10
0
def find_vmin(vorb, uorb, E, Jphi, I3V, Phi, delta):
    """Find the lower limit of integration for the vertical action"""
    eps = 1e-8
    if np.abs(pv_squared(vorb, uorb, E, Jphi, I3V, Phi, delta)) < 1e-7:
        # we are at either vmin or vmax
        peps = pv_squared(vorb + eps, uorb, E, Jphi, I3V, Phi, delta)
        meps = pv_squared(vorb - eps, uorb, E, Jphi, I3V, Phi, delta)
        if peps < 0.0 and meps > 0.0:
            # we're at vmax
            rstart = findstart_vmin(vorb, uorb, E, Jphi, I3V, Phi, delta)
            if rstart == 0.0:
                return 0.0
            else:
                try:
                    return brentq(pv_squared, rstart, vorb - eps, (uorb, E, Jphi, I3V, Phi, delta), maxiter=200)
                except ValueError:
                    print "This orbit has either zero or very small binding energy. Can't compute the vertical action."
                    return BRENTERROR
        elif peps > 0.0 and meps < 0.0:
            # we're at vmin
            return vorb
        else:
            # orbit is in the equatorial plane
            return vorb
    else:
        rstart = findstart_vmin(vorb, uorb, E, Jphi, I3V, Phi, delta)
        if rstart == 0.0:
            return 0.0
        else:
            try:
                return brentq(pv_squared, rstart, vorb - eps, (uorb, E, Jphi, I3V, Phi, delta), maxiter=200)
            except ValueError:
                print "This orbit either has either zero or very small binding energy. Can't compute the vertical action."
    return None
Example #11
0
def kerr_lightring_velocity(chi):
    """Return the velocity at the Kerr light ring"""
    # If chi > 0.9996, the algorithm cannot solve the function
    if chi >= 0.9996:
        return brentq(kerr_lightring, 0, 0.8, args=(0.9996))
    else:
        return brentq(kerr_lightring, 0, 0.8, args=(chi))
Example #12
0
    def calc_ci(self, para, direction):
        """
        Calculate the ci for a single parameter for a single direction.
        """
        if isinstance(para, str):
            para = self.minimizer.params[para]

        calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
        if self.trace:
            x = [i.value for i in self.minimizer.params.values()]
            self.trace_dict[para.name].append(x + [0])

        para.vary = False
        self.minimizer.prepare_fit(self.params)
        limit = self.find_limit(para, direction)
        start_val = para.value
        a_limit = start_val
        ret = []

        for prob in self.sigmas:
            try:
                val = brentq(calc_prob, a_limit, limit, rtol=.5e-4, args=prob)
            except ValueError:
                self.reset_vals()
                val = brentq(calc_prob, start_val, limit, rtol=.5e-4, args=prob)
            a_limit = val
            ret.append((prob, val))

        para.vary = True
        self.reset_vals()
        return ret
Example #13
0
def YnVn(v0,eta,w,n,t0):
    def Y(t):
        return ((yn)+(vn*t)-((1./2.)*g*(t**2.)))
    def Z(t):
        return (A*math.sin(w*t))
    def H(t):
        return Y(t)-Z(tiempo+t)
    tiempo=t0
    vn=v0
    yn=Z(t0)
    print "v 0 =",vn
    print "y 0 =",yn
    while (Y(t0)-Z(t0))>=0:
        t0+=0.1
    raiz=brentq(H,t0-0.1,t0)
    print "raiz 1 =",raiz
    i=1
    tiempo=raiz
    while i<=n:
        yn=(A*math.sin(w*tiempo))
        vn=(1+eta)*(w*math.cos(w*tiempo))-eta*(vn-g*raiz)
        print "v",i,"=",vn
        print "y",i,"=",yn        
        ti=0
        while H(ti)>=0:
            ti+=0.1
        raiz=brentq(H,ti-0.1,ti)
        tiempo+=raiz
        i+=1
        print "raiz",i,"=",tiempo
        
    return np.array([vn,yn])
Example #14
0
    def binomial(self,ncounts,fraction=0.68):

        ncounts_cum = np.cumsum(ncounts)
        ncounts_tot = ncounts_cum[-1]

        fn_ncdf = UnivariateSpline(self._x,ncounts_cum,s=0,k=1)        
        xq = opt.brentq(lambda t: fn_ncdf(t)-ncounts_tot*fraction,
                        self._x[0],self._x[-1])

        eff  = fn_ncdf(xq)/ncounts_tot
        nerr = np.sqrt(ncounts_tot*eff*(1-eff))

        nerr_hi = ncounts_tot*fraction+nerr
        nerr_lo = ncounts_tot*fraction-nerr

        xq_hi = self._x[-1]
        xq_lo = self._x[0]

        if nerr_hi < ncounts_tot:
            xq_hi = opt.brentq(lambda t: fn_ncdf(t)-nerr_hi,
                               self._x[0],self._x[-1])

        if nerr_lo > 0:
            xq_lo = opt.brentq(lambda t: fn_ncdf(t)-nerr_lo,
                               self._x[0],self._x[-1])

        xq_err = 0.5*(xq_hi-xq_lo)
        
        return xq, xq_err
Example #15
0
  def period(self):
    '''Analytically calculate the period of EKM oscillations.'''

    # First calculate the limits. 
    xcrit = brentq(lambda x: ellipk(x) - 2 * ellipe(x), 0, 1)
    phicrit = 3 * (1 - xcrit) / (3 + 2 * xcrit)

    if self.phiq < phicrit:
      CKLmin = brentq(lambda CKL: self.chi - self.epsoct - F(CKL), self.tol, self.phiq)
    else:
      # Check if flips occur for Omega = Pi or 0
      if (np.sign(self.chi - self.epsoct - F(self.tol)) != 
          np.sign(self.chi - self.epsoct - F(self.phiq))):
        CKLmin = brentq(lambda CKL: self.chi - self.epsoct - F(CKL), self.tol, self.phiq)
      else:
        CKLmin = brentq(lambda CKL: self.chi + self.epsoct - F(CKL), self.tol, self.phiq)
    if self.doesflip():
      CKLmax = self.phiq
    else:
      CKLmax = brentq(lambda CKL: self.chi + self.epsoct - F(CKL), 0, 1)

    prefactor = 256 * np.sqrt(10) / (15 * np.pi) / self.epsoct
    P = quad(lambda CKL: (prefactor * ellipk((3 - 3*CKL)/(3 + 2*CKL)) / 
      (4 - 11*CKL) / np.sqrt(6 + 4*CKL) / np.sqrt(1 - 1/self.epsoct**2 *
      (F(CKL) - self.chi)**2) / np.sqrt(2* np.fabs(self.phiq - CKL))), 
      CKLmin, CKLmax, epsabs=1e-12, epsrel=1e-12, limit=100)

    return P[0]
	def value_dp(self,lb=-10.,ub=10.):
		"""
		Method that calls the dynamic programming method that computes
		the value of beliefs and the optimal bounds for decisions,
		adjusting the predicted average reward (rho)
		"""
		if self.store_p:
			f = lambda x: self.backpropagate_value(x)
		else:
			f = lambda x: self.memory_efficient_backpropagate_value(x)
		try:
			self.rho = optimize.brentq(f,lb,ub)
		except ValueError as er:
			if er.message=="f(a) and f(b) must have different signs":
				m = f(lb)
				M = f(ub)
				if m!=0 and M!=0:
					while np.sign(m)==np.sign(M):
						if (m<M and m<0) or (m>M and m>0):
							lb = ub
							ub = ub*10
							M = f(ub)
						elif (m>M and m<0) or (m<M and m>0):
							ub = lb
							lb = lb*10
							m = f(lb)
					self.rho = optimize.brentq(f,lb,ub)
				else:
					if m==0:
						self.rho = lb
					else:
						self.rho = ub
			else:
				raise er
		self.decision_bounds()
Example #17
0
def find_zeros_tophat():
    """ Find the zeros of the fourier transform of a tophat function.

    (i.e. the x values satisfying

     sin(x) - x * cos(x) == 0

     for 0 < x < 1e5)

    The first ~30000 zeros (for integration purposes).
    """
    x = np.arange(0, 1e5, 1e-2)

    def f(x):
        return (np.sin(x) - x * np.cos(x)) / x 

    y = f(x)
    c0 = find_roots(y)


    #Now find roots with better accuracy

    ind = np.flatnonzero(c0)
    
    from scipy import optimize
    xroots = []
    xroots.append(optimize.brentq(f, 1, x[ind[0]+1]))
    i = 0
    while i < len(ind) - 1:
        xroots.append(optimize.brentq(f, x[ind[i]+1], x[ind[i+1]+1]))
        i += 1

    xroots = np.array(xroots)

    np.savez('zeros.npz', xroots=xroots)
def curve_intersect(fun1, fun2, xmin, xmax):
	'''
	Finds the intersection of two curves, bounded in [xmin, xmax]
	Returns an array of x values
	'''
	diff = lambda x: fun1(x)-fun2(x)
	x_range = np.linspace(xmin, xmax, 100)
	m_sign = np.sign(diff(x_range)).astype(int)
	roots = x_range[np.where(m_sign[1:] - m_sign[:-1] != 0)[0] + 1]
	
	# If they don't cross, return None
	if len(roots) == 0:
		return np.array([])
	
	# If they cross exactly once, find the global solution
	elif len(roots) == 1:
		return np.array([brentq(diff, xmin, xmax)])
	
	# If they cross multiple times, find the local solution between each root
	else:
		limits = np.concatenate(([xmin], roots, [xmax]))
		intersections = np.empty(len(limits)-2)
		for ix in range(len(intersections)):
			intersections[ix] = brentq(diff, limits[ix], limits[ix+1])
		return intersections
Example #19
0
    def compute_fwhm_1(U, V, fun, center):

        cp = int(math.floor(center + 0.5))

        # Min on the rigth
        r_idx = V[cp:].argmin()
        u_r = U[cp + r_idx]

        if V[cp + r_idx] > 0.5 * peak:
            # FIXME: we have a problem
            # brentq will raise anyway
            pass

        sol_r = opz.brentq(fun, center, u_r)

        # Min in the left
        rV = V[cp - 1::-1]
        rU = U[cp - 1::-1]
        l_idx = rV.argmin()
        u_l = rU[l_idx]
        if rV[l_idx] > 0.5 * peak:
            # FIXME: we have a problem
            # brentq will raise anyway
            pass

        sol_l = opz.brentq(fun, u_l, center)
        fwhm = sol_r - sol_l
        return fwhm
Example #20
0
def FWHM(series,norm=True,simple=False,notcentered=False):
    if norm:
        series=normalize(series) #assumes these are floats, not integers!
    y=np.abs(series-0.5)
    
    N=len(series)
    half=N//2

    wL = 0
    wR = N-1

    
    #initial solution
    if notcentered:
        series = center_max(series)
#        half=np.argmax(series)
    iL=np.argmin(y[:half])
    iR=np.argmin(y[half:])+half
    if not simple:
        x=np.arange(len(series))
        f=interp.interp1d(x,series-0.5)

        negindsL = np.where(np.logical_and(series<0.5,x<half))[0]
        negindsR = np.where(np.logical_and(series<0.5,x>half))[0]
        iL=optimize.brentq(f,negindsL[-1],negindsL[-1]+1)#half)
        iR=optimize.brentq(f,negindsR[0]-1,negindsR[0])#half,wR)
    return iR-iL
Example #21
0
def guss_gaussian(x):
    """
    Find a set of better starting parameters for Gaussian function fitting

    Parameters
    ----------
    x : 1D np.array
        1D profile of your data

    Returns
    -------
    out : tuple of float
        estimated value of (a, mu, sigma, c)
    """
    c_guess = (x[0] + x[-1]) / 2
    a_guess = x.max() - c_guess
    mu_guess = x.argmax()
    x_inter = interp1d(range(len(x)), x)

    def _(i):
        return x_inter(i) - a_guess / 2 - c_guess

    try:
        sigma_l_guess = brentq(_, 0, mu_guess)
    except:
        sigma_l_guess = len(x) / 4
    try:
        sigma_r_guess = brentq(_, mu_guess, len(x) - 1)
    except:
        sigma_r_guess = 3 * len(x) / 4
    return a_guess, mu_guess, (sigma_r_guess -
                               sigma_l_guess) / 2.35482, c_guess
Example #22
0
def test_apocenter_pericenter():
    pot = KeplerPotential(m=1., units=solarsystem)
    w0 = CartesianPhaseSpacePosition(pos=[1,0,0.]*u.au,
                                     vel=[0.,1.5*np.pi,0.]*u.au/u.yr)

    w = pot.integrate_orbit(w0, dt=0.01, n_steps=10000, Integrator=DOPRI853Integrator)
    apo = w.apocenter()
    per = w.pericenter()

    assert apo.unit == u.au
    assert per.unit == u.au
    assert apo > per

    # see if they're where we expect
    E = np.mean(w.energy()).decompose(pot.units).value
    # Phi = np.mean(w.potential_energy()).value
    L = np.mean(np.sqrt(np.sum(w.angular_momentum()**2, axis=0))).decompose(pot.units).value
    def func(r):
        val = 2*(E-pot.value([r,0,0]).value[0]) - L**2/r**2
        return val

    pred_apo = so.brentq(func, 0.9, 1.0)
    pred_per = so.brentq(func, 0.3, 0.5)

    assert np.allclose(apo.value, pred_apo, rtol=1E-2)
    assert np.allclose(per.value, pred_per, rtol=1E-2)
Example #23
0
    def spike_v_dvdt(self, num_samples, dvdt2v_scale=0.25, interp_order=3,
                     start_thresh=10.0, stop_thresh=-10.0, index_buffer=5):
        """
        Cuts outs loops (either spikes or sub-threshold oscillations) from the
        v-dV/dt trace based on the provided threshold values a

        `num_samples`     -- the number of samples to place around the V-dV/dt
                             spike
        `interp_order`    -- the order of the spline interpolation used
        `start_thresh`    -- the start dV/dt threshold
        `stop_thresh`     -- the stop dV/dt threshold
        `index_buffer`    -- the number of indices either side of the spike
                             period to include in the fitting of the spline
                             to avoid boundary effects
        """
        # Cut up the traces in between where the interpolated curve exactly
        # crosses the start and end thresholds
        spikes = []
        for start_i, stop_i in self._spike_period_indices(
                                  threshold='dvdt', start=start_thresh,
                                  stop=stop_thresh, index_buffer=index_buffer):
            v_spl, dvdt_spl, s = self._interpolate_v_dvdt(
                                                    self[start_i:stop_i],
                                                    self.dvdt[start_i:stop_i],
                                                    dvdt2v_scale, interp_order)
            start_s = brentq(lambda x: (dvdt_spl(x) - start_thresh),
                             s[0], s[index_buffer * 2])
            end_s = brentq(lambda x: (dvdt_spl(x) - stop_thresh),
                           s[-index_buffer * 2], s[-1])
            # Over the loop length interpolate the splines at a fixed number of
            # points
            spike_s = numpy.linspace(start_s, end_s, num_samples)
            spike = numpy.array((v_spl(spike_s), dvdt_spl(spike_s)))
            spikes.append(spike)
        return spikes
Example #24
0
def rho_calculator(av_w,agg=False,indist=False,M=1,**kwargs):
    """
        Computes the lagrange multiplier for the case of fixed k and a desired average existing weight av_w = T/<E> (where T is the total number of events).
        It does so by inverting the equation:
            <t | t>0 > (rho) -> rho(av_w) according to each case.
            
        Input:
            av_w: total number of events divided by total number of binary edges.
            agg: Set to true to analyze the case of aggregation of binary networks
            indist: Set to true to analyze the case of weighted networks 
            M: Number of aggregated layers (ignored for the ME case)
        Output:
            rho: Lagrange multiplier for the generation of random network ensembles.
    """
    av_w = 1.0*av_w
    if not indist and not agg: # case ME, fully analytical
        from scipy import special
        x = -av_w*np.exp(-av_w)
        rho = (special.lambertw(x,0)+av_w).real
    else:
        from scipy import optimize as opt
        if indist:
            if M==1: # fully analytical
                rho = 1. - 1/av_w
            else: # must solve equation numerically
                rho = opt.brentq(rho_ZINB,1e-16,1-1e-14,args=(av_w,M),**kwargs)
        else:
            if M==1:  # fully analytical
                rho = 1 #pretty absurd, it is a binary network!
            else:  # must solve equation numerically
                rho = opt.brentq(rho_ZIB,1e-12,2e20,args=(av_w,M),**kwargs)
    return rho
Example #25
0
    def pressure(self, T,wc):
        func = self._wc_functor(T,wc)

        dp = 101325
        a = 101325
        fa = func(a)
        if (fa < 0):
            fb = fa
            b = a
            nits = 0
            while fb <= 0 and nits < 1000:
                a = b
                b = b - dp
                fb = func(b)
                nits = nits + 1

            p,r = optimize.brentq(func,a,b, full_output=True)
            assert r.converged
        else:
            fb = fa
            b = a
            nits = 0
            while fb >= 0 and nits < 1000:
                a = b
                b = b + dp
                fb = func(b)
                nits = nits + 1

            p,r = optimize.brentq(func,b,a,full_output=True)
            assert r.converged

        return p
def find_effective_Fisher_region(P, IP, target_match, param_names,param_bounds):
    """
    Example Usage:
        find_effective_Fisher_region(P, IP, 0.9, ['Mc', 'eta'], [[mchirp(P.m1,P.m2)-lal.MSUN_SI,mchirp(P.m1,P.m2)+lal.MSUN_SI], [0.05, 0.25]])
    Arguments:
        - P: a ChooseWaveformParams object describing a target signal
        - IP: An inner product class to compute overlaps.
                Should have deltaF and length consistent with P
        - target_match: find parameter variation where overlap is target_match.
                Should be a real number in [0,1]
        - param_names: array of string names for members of P to vary.
                Should have length N for N params to be varied
                e.g. ['Mc', 'eta']
        - param_bounds: array of max variations of each param in param_names
                Should be an Nx2 array for N params to be varied
    
    Returns:
        Array of boundaries of a hypercube meant to encompass region where
                match is >= target_match.
                e.g. [ [3.12,3.16] , [0.12, 0.18] ]

    N.B. Only checks variations along parameter axes. If params are correlated,
    may get better matches off-axis, and the hypercube won't fully encompass
    region where target_match is achieved. Therefore, allow a generous
    safety factor in your value of 'target_match'.
    """
    TOL = 1.e-3 # Don't need to be very precise for this...
    Nparams = len(param_names)
    assert len(param_bounds) == Nparams
    param_cube = []
    hfSIG = lsu.norm_hoff(P, IP)
    for i, param in enumerate(param_names):
        PT = P.copy()
        if param=='Mc':
            param_peak = lsu.mchirp(P.m1, P.m2)
        elif param=='eta':
            param_peak = lsu.symRatio(P.m1, P.m2)
        else:
            param_peak = getattr(P, param)
        func = lambda x: update_params_ip(hfSIG, PT, IP, [param], [x]) - target_match
        try:
            min_param = brentq(func, param_peak, param_bounds[i][0], xtol=TOL)
        except ValueError:
            print "\nWarning! Value", param_bounds[i][0], "of", param,\
                    "did not bound target match", target_match, ". Using",\
                    param_bounds[i][0], "as the lower bound of", param,\
                    "range for the effective Fisher region.\n"
            min_param = param_bounds[i][0]
        try:
            max_param = brentq(func, param_peak, param_bounds[i][1], xtol=TOL)
        except ValueError:
            print "\nWarning! Value", param_bounds[i][1], "of", param,\
                    "did not bound target match", target_match, ". Using",\
                    param_bounds[i][1], "as the upper bound of", param,\
                    "range for the effective Fisher region.\n"
            max_param = param_bounds[i][1]
        param_cube.append( [min_param, max_param] )

    return param_cube
 def apply_load( self, sigma ):
     if sigma > self.sigma_c_max[0]:
         raise ValueError( 'applied load ', sigma , 'MPa is larger than composite strength ', self.sigma_c_max[0], 'MPa' )
     else:
         def residuum( w ):
             self.model.w = float( w )
             return sigma - self.sigma_c
         brentq( residuum, 0.0, min( self.sigma_c_max[1], 20. ) )
Example #28
0
 def get_eta_from_t(self,t):
     try:
         return brentq(lambda eta: t-self.cycloidt(eta),0,2*np.pi)
     except TypeError:
         return [brentq(lambda eta: tt-self.cycloidt(eta),0,2*np.pi) for tt in t]
     except ValueError:
         print "Error: t out of bounds for this B. (t,B) = ("+str(t)+","+str(self.B)+")"
         raise
Example #29
0
def one_parameter_interval(lf, target, bound,
                           confidence_level=0.9, kind='upper',
                           bestfit_routine=None,
                           t_ppf=None,
                           **kwargs):
    """Set a confidence_level interval of kind (central, upper, lower) on the parameter target of lf.
    This assumes the likelihood ratio is asymptotically chi2(1) distributed (Wilk's theorem)
    target: parameter of lf to constrain
    bound: bound(s) for the line search. For upper and lower: single value, for central: 2-tuple.
    t_ppf: function (hypothesis, level) -> test statistic (-2 Log[ L(test)/L(bestfit) ])
           must return value at which test statistic reaches level'th quantile if hypothesis is true.
           If not specified, Wilks' theorem will be used.
    kwargs: dictionary with arguments to bestfit
    """
    bestfit_routine = _get_bestfit_routine(bestfit_routine)
    if target is None:
        target = lf.source_list[-1] + '_rate_multiplier'

    # Find the likelihood of the global best fit (denominator of likelihood ratio)
    result, max_loglikelihood = bestfit_routine(lf, **kwargs)
    global_best = result[target]

    def t(hypothesis, critical_quantile):
        """(profile) likelihood ratio test statistic, with critical_value subtracted
        critical_quantile: fraction (percentile/100) of the test statistic distribution you want to find
        """
        if t_ppf is None:
            # Use Wilk's theorem
            # "But I thought I there was a chi2 in Wilk's theorem!" Quite right, but
            # stats.norm.ppf(CL)**2 = stats.chi2(1).ppf(2*CL - 1)
            # So the chi2 formula is often quoted for central CI's, the normal one for bounds...
            # This cost me hours of confusion. Please explain this to your students if you're statistics professor.
            critical_value = stats.norm.ppf(critical_quantile) ** 2
        else:
            # Use user-specified function
            critical_value = t_ppf(hypothesis, critical_quantile)

        if kind == 'upper' and hypothesis <= global_best:
            result = 0
        elif kind == 'lower' and hypothesis >= global_best:
            result = 0
        else:
            # Find the best fit assuming the hypothesis (numerator of likelihood ratio)
            lf_kwargs = {target: hypothesis}
            lf_kwargs.update(kwargs)
            fitresult, ll = bestfit_routine(lf, **lf_kwargs)
            result = 2*(max_loglikelihood - ll)

        return result - critical_value

    if kind == 'central':
        a = brentq(t, bound[0], global_best, args=[(1-confidence_level)/2])
        b = brentq(t, global_best, bound[1], args=[1 - (1 - confidence_level) / 2])
        return a, b
    elif kind == 'lower':
        return brentq(t, bound, global_best, args=[1 - confidence_level])
    elif kind == 'upper':
        return brentq(t, global_best, bound, args=[confidence_level])
Example #30
0
    def calcRapRperi(self,**kwargs):
        """
        NAME:
           calcRapRperi
        PURPOSE:
           calculate the apocenter and pericenter radii
        INPUT:
        OUTPUT:
           (rperi,rap)
        HISTORY:
           2010-12-01 - Written - Bovy (NYU)
        """
        if hasattr(self,'_rperirap'):
            return self._rperirap
        EL= self.calcEL(**kwargs)
        E, L= EL
        if self._vR == 0. and m.fabs(self._vT - vcirc(self._pot,self._R)) < _EPS: #We are on a circular orbit
            rperi= self._R
            rap = self._R
        elif self._vR == 0. and self._vT > vcirc(self._pot,self._R): #We are exactly at pericenter
            rperi= self._R
            if self._gamma != 0.:
                startsign= _rapRperiAxiEq(self._R,E,L,self._pot)
                startsign/= m.fabs(startsign)
            else: startsign= 1.
            rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True,
                                        startsign=startsign)
            rap= optimize.brentq(_rapRperiAxiEq,rperi+0.00001,rend,
                                 args=(E,L,self._pot))
#                                   fprime=_rapRperiAxiDeriv)
        elif self._vR == 0. and self._vT < vcirc(self._pot,self._R): #We are exactly at apocenter
            rap= self._R
            if self._gamma != 0.:
                startsign= _rapRperiAxiEq(self._R,E,L,self._pot)
                startsign/= m.fabs(startsign)
            else: startsign= 1.
            rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot,
                                          startsign=startsign)
            if rstart == 0.: rperi= 0.
            else:
                rperi= optimize.brentq(_rapRperiAxiEq,rstart,rap-0.000001,
                                       args=(E,L,self._pot))
#                                   fprime=_rapRperiAxiDeriv)
        else:
            if self._gamma != 0.:
                startsign= _rapRperiAxiEq(self._R,E,L,self._pot)
                startsign/= m.fabs(startsign)
            else:
                startsign= 1.
            rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot,
                                          startsign=startsign)
            if rstart == 0.: rperi= 0.
            else: 
                try:
                    rperi= optimize.brentq(_rapRperiAxiEq,rstart,self._R,
                                           (E,L,self._pot),
                                           maxiter=200)
                except RuntimeError:
                    raise UnboundError("Orbit seems to be unbound")
            rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True,
                                        startsign=startsign)
            rap= optimize.brentq(_rapRperiAxiEq,self._R,rend,
                                 (E,L,self._pot))
        self._rperirap= (rperi,rap)
        return self._rperirap
Example #31
0
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder,
             phase_train_placeholder, batch_size_placeholder,
             control_placeholder, embeddings, labels, image_paths,
             actual_issame, batch_size, nrof_folds, distance_metric,
             subtract_mean, use_flipped_images,
             use_fixed_image_standardization):
    # Run forward pass to calculate embeddings
    print('Runnning forward pass on LFW images')

    # Enqueue one epoch of image paths and labels
    nrof_embeddings = len(
        actual_issame) * 2  # nrof_pairs * nrof_images_per_pair
    nrof_flips = 2 if use_flipped_images else 1
    nrof_images = nrof_embeddings * nrof_flips
    labels_array = np.expand_dims(np.arange(0, nrof_images), 1)
    image_paths_array = np.expand_dims(
        np.repeat(np.array(image_paths), nrof_flips), 1)
    control_array = np.zeros_like(labels_array, np.int32)
    if use_fixed_image_standardization:
        control_array += np.ones_like(
            labels_array) * facenet.FIXED_STANDARDIZATION
    if use_flipped_images:
        # Flip every second image
        control_array += (labels_array % 2) * facenet.FLIP
    sess.run(
        enqueue_op, {
            image_paths_placeholder: image_paths_array,
            labels_placeholder: labels_array,
            control_placeholder: control_array
        })

    embedding_size = int(embeddings.get_shape()[1])
    assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
    nrof_batches = nrof_images // batch_size
    emb_array = np.zeros((nrof_images, embedding_size))
    lab_array = np.zeros((nrof_images, ))
    for i in range(nrof_batches):
        feed_dict = {
            phase_train_placeholder: False,
            batch_size_placeholder: batch_size
        }
        emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
        lab_array[lab] = lab
        emb_array[lab, :] = emb
        if i % 10 == 9:
            print('.', end='')
            sys.stdout.flush()
    print('')
    embeddings = np.zeros((nrof_embeddings, embedding_size * nrof_flips))
    if use_flipped_images:
        # Concatenate embeddings for flipped and non flipped version of the images
        embeddings[:, :embedding_size] = emb_array[0::2, :]
        embeddings[:, embedding_size:] = emb_array[1::2, :]
    else:
        embeddings = emb_array

    assert np.array_equal(
        lab_array, np.arange(nrof_images)
    ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        embeddings,
        actual_issame,
        nrof_folds=nrof_folds,
        distance_metric=distance_metric,
        subtract_mean=subtract_mean)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer)
Example #32
0
    def ci_mean(self, sig=.05, method='gamma', epsilon=10 ** -8,
                 gamma_low=-10 ** 10, gamma_high=10 ** 10):
        """
        Returns the confidence interval for the mean.

        Parameters
        ----------
        sig : float
            significance level. Default is .05

        method : str
            Root finding method,  Can be 'nested-brent' or
            'gamma'.  Default is 'gamma'

            'gamma' Tries to solve for the gamma parameter in the
            Lagrange (see Owen pg 22) and then determine the weights.

            'nested brent' uses brents method to find the confidence
            intervals but must maximize the likelihood ratio on every
            iteration.

            gamma is generally much faster.  If the optimizations does not
            converge, try expanding the gamma_high and gamma_low
            variable.

        gamma_low : float
            Lower bound for gamma when finding lower limit.
            If function returns f(a) and f(b) must have different signs,
            consider lowering gamma_low.

        gamma_high : float
            Upper bound for gamma when finding upper limit.
            If function returns f(a) and f(b) must have different signs,
            consider raising gamma_high.

        epsilon : float
            When using 'nested-brent', amount to decrease (increase)
            from the maximum (minimum) of the data when
            starting the search.  This is to protect against the
            likelihood ratio being zero at the maximum (minimum)
            value of the data.  If data is very small in absolute value
            (<10 ``**`` -6) consider shrinking epsilon

            When using 'gamma', amount to decrease (increase) the
            minimum (maximum) by to start the search for gamma.
            If function returns f(a) and f(b) must have different signs,
            consider lowering epsilon.

        Returns
        -------
        Interval : tuple
            Confidence interval for the mean
        """
        endog = self.endog
        sig = 1 - sig
        if method == 'nested-brent':
            self.r0 = chi2.ppf(sig, 1)
            middle = np.mean(endog)
            epsilon_u = (max(endog) - np.mean(endog)) * epsilon
            epsilon_l = (np.mean(endog) - min(endog)) * epsilon
            ulim = optimize.brentq(self._ci_limits_mu, middle,
                max(endog) - epsilon_u)
            llim = optimize.brentq(self._ci_limits_mu, middle,
                min(endog) + epsilon_l)
            return llim, ulim

        if method == 'gamma':
            self.r0 = chi2.ppf(sig, 1)
            gamma_star_l = optimize.brentq(self._find_gamma, gamma_low,
                min(endog) - epsilon)
            gamma_star_u = optimize.brentq(self._find_gamma, \
                         max(endog) + epsilon, gamma_high)
            weights_low = ((endog - gamma_star_l) ** -1) / \
                np.sum((endog - gamma_star_l) ** -1)
            weights_high = ((endog - gamma_star_u) ** -1) / \
                np.sum((endog - gamma_star_u) ** -1)
            mu_low = np.sum(weights_low * endog)
            mu_high = np.sum(weights_high * endog)
            return mu_low,  mu_high
Example #33
0
 def de_poly3(self, r_):
     try:
         return brentq(lambda r: r * (self.k1 * r**2 + 1 - self.k1) - r_, 0,
                       2)
     except ValueError:
         return float("inf")
Example #34
0
def impliedPutVolatility(S, K, r, price, T):
    impliedVol = brentq(lambda x: price - Black76LognormalPut(S, K, r, x, T),
                        1e-6, 1)

    return impliedVol
	## perform linear regression for the effective velocities
	#ab_p_ud = np.polyfit(H_x_list, velocity_eff_p_updown, 1)
	#ab_p_du = np.polyfit(H_x_list, velocity_eff_p_downup, 1)
	#ab_n_ud = np.polyfit(H_x_list, velocity_eff_n_updown, 1)
	#ab_n_du = np.polyfit(H_x_list, velocity_eff_n_downup, 1)

	#print(j, "-th slope for positive up-down is", ab_p_ud[0])

	#H_c = (ab_p_ud[1] / ab_p_ud[0] - ab_p_du[1] / ab_p_du[0] + ab_n_ud[1] / ab_n_ud[0] - ab_n_du[1] / ab_n_du[0]) / 4
	#H_c *= 1e-03 * 4 * pi	## Oe conversion
	f_p_ud = interpolate.interp1d(H_x_list, velocity_stat_p_updown)
	f_p_du = interpolate.interp1d(H_x_list, velocity_stat_p_downup)
	f_n_ud = interpolate.interp1d(H_x_list, velocity_stat_n_updown)
	f_n_du = interpolate.interp1d(H_x_list, velocity_stat_n_downup)

	H_DMI_p_ud = optimize.brentq(f_p_ud, -1450 * 1e+03 / (4 * pi), 1450 * 1e+03 / (4 * pi))
	H_DMI_p_du = optimize.brentq(f_p_du, -1450 * 1e+03 / (4 * pi), 1450 * 1e+03 / (4 * pi))
	H_DMI_n_ud = optimize.brentq(f_n_ud, -1450 * 1e+03 / (4 * pi), 1450 * 1e+03 / (4 * pi))
	H_DMI_n_du = optimize.brentq(f_n_du, -1450 * 1e+03 / (4 * pi), 1450 * 1e+03 / (4 * pi))
	
	H_DMI = (- H_DMI_p_ud + H_DMI_p_du - H_DMI_n_ud + H_DMI_n_du) / 4
	H_DMI *= 1e-03 * 4 * pi
	DMI[j] = H_DMI * M_s * Delta / 10	# mJ/m^2
	j += 1
#####################################
####### current sweep end ###########
#####################################


# plot DMI v.s. J
plt.figure(1)
Example #36
0
def brentq_onesideerror(f1, lb, ub, maxit=1000, precision=1e-11):
    """
    One side of this can return an error.
    Split in two until get only one side that returns error.
    Both sides cannot return error because then I would have to split many sides in two until I find an area that works and it would be a more complicated function.
    """
    try:
        lv = f1(lb)
    except Exception:
        lv = None
    try:
        uv = f1(ub)
    except Exception:
        uv = None

    if lv is None and uv is None:
        print('Both lb and ub return an exception. ub:' + str(ub) + '. lb: ' +
              str(lb))
        print('Running function again on lb to show what error was.')
        f1(lb)

    # would be weird if the values equal exactly 0
    if lv == 0:
        raise ValueError('lv == 0')
    if uv == 0:
        raise ValueError('uv == 0')

    if lv is None or uv is None:
        # find whether I need the side that failed to be positive or negative
        if lv is None:
            if uv < 0:
                needmediumgt0 = True
            else:
                needmediumgt0 = False
        else:
            if lv < 0:
                needmediumgt0 = True
            else:
                needmediumgt0 = False

        # iterate until I find two numbers for which the function does not fail and returns either side of 0
        i = 0
        while i < maxit:
            medium = 0.5 * (lb + ub)
            try:
                mediumv = f1(medium)
            except Exception:
                mediumv = None
            # if value not exist, set lb as value tried
            if mediumv is None:
                # still getting a failed attempt so reduce size of range considered and try again
                if lv is None:
                    lb = medium
                else:
                    ub = medium
            elif mediumv > 0 and needmediumgt0 is True or mediumv < 0 and needmediumgt0 is False:
                # mediumv is a value we need
                # set failed side to be medium and break
                if lv is None:
                    lb = medium
                else:
                    ub = medium
                break
            else:
                # function worked for medium
                # but returned a value on the same side as the side that initially worked
                # set new bound on side that worked to be the medium value and repeat
                if lv is None:
                    ub = medium
                else:
                    lb = medium

            i += 1

            if ub - lb < precision:
                if lv is None:
                    print(
                        'Upper bound which worked from start value converged to: '
                        + str(ub) + '.')
                    print('f(ub) = ' + str(f1(ub)))
                if uv is None:
                    print(
                        'Lower bound which worked from start value converged to: '
                        + str(lb) + '.')
                    print('f(lb) = ' + str(f1(lb)))
                raise ValueError(
                    'No solution exists in the range of values given.')

        if i == maxit:
            raise ValueError(
                'Reached maximum iteration. Brentq on one side failed.')

    # adjust_lb = lb
    # adjust_ub = ub
    #
    # if lv is None:
    #     for i in range(maxit):
    #         medium = 0.5 * (adjust_lb + adjust_ub)
    #         try:
    #             mediumv = f1(medium)
    #         except Exception:
    #             mediumv = None
    #         # if value not exist, set adjust_lb as value tried
    #         if mediumv is None:
    #             adjust_lb = medium
    #         elif (mediumv > 0 and uv > 0) or (mediumv < 0 and uv < 0):
    #             adjust_ub = medium
    #         else:
    #             lb = medium
    #             break
    #
    #     if i == maxit - 1:
    #         raise ValueError('Reached maximum iteration.')
    #
    # if uv is None:
    #     for i in range(maxit):
    #         medium = 0.5 * (adjust_lb + adjust_ub)
    #         try:
    #             mediumv = f1(medium)
    #         except Exception:
    #             mediumv = None
    #         # if value not exist, set adjust_ub as value tried
    #         if mediumv is None:
    #             adjust_ub = medium
    #         elif (mediumv > 0 and uv > 0) or (mediumv < 0 and lv < 0):
    #             adjust_ub = medium
    #         else:
    #             ub = medium
    #             break
    #
    #     if i == maxit - 1:
    #         raise ValueError('Reached maximum iteration.')

    sol = brentq(f1, lb, ub, xtol=precision)

    return (sol)
Example #37
0
    return old_pens_inc
 
#Grid  
k_nodes=5
k_min=0.5*k_prime[0]
k_max=1.5*k_prime[0]
k_grid= np.linspace(k_min,k_max, k_nodes)   

#Solving the HH problem along the Euler Equation

for k, kk in enumerate(k_grid):
    for j, z in enumerate(zeta_disc):
        def linearised_Euler(a1):
           a=beta*w(k,z)-a1-lamb*(1-tau)*old_lab_inc_func(kk)-(1-lamb)*old_pens_inc_func(z,kk) #originally wt(kk,z) #Euler after transf., III
           return (a)
        state_matrix[k,j]=optimize.brentq(linearised_Euler,0.0000001,1)
        old_pens_inc=0
        old_lab_inc=0
        sav_rate[k,j]=state_matrix[k,j]/(1-tau)*w(k,z) #Policy function for savings

        

#Simulating the economy T time periods
cons_1=np.empty([T])
cons_2=np.empty([T])
k2_tmrw=np.empty([T])
binominal=[0,1]
binominal_zeta= np.random.choice(binominal, size=T, p=[.5, .5])
#binominal_rho=np.random.choice(binominal, size=T, p=[.5, .5])
rho_T= np.random.choice(rho, size=T, p=[.5, .5])
eta_T= np.random.choice(eta, size=T)
def tunnelFromPhase(phases,
                    start_phase,
                    V,
                    dV,
                    Tmax,
                    Ttol=1e-3,
                    maxiter=100,
                    phitol=1e-8,
                    overlapAngle=45.0,
                    nuclCriterion=lambda S, T: S / (T + 1e-100) - 140.0,
                    verbose=True,
                    fullTunneling_params={}):
    """
    Find the instanton and nucleation temeprature for tunneling from
    `start_phase`.

    Parameters
    ----------
    phases : dict
        Output from :func:`traceMultiMin`.
    start_phase : Phase object
        The metastable phase from which tunneling occurs.
    V, dV : callable
        The potential V(x,T) and its gradient.
    Tmax : float
        The highest temperature at which to try tunneling.
    Ttol : float, optional
        Tolerance for finding the nucleation temperature.
    maxiter : int, optional
        Maximum number of times to try tunneling.
    phitol : float, optional
        Tolerance for finding the minima.
    overlapAngle : float, optional
        If two phases are in the same direction, only try tunneling to the
        closer one. Set to zero to always try tunneling to all available phases.
    nuclCriterion : callable
        Function of the action *S* and temperature *T*. Should return 0 for the
        correct nucleation rate, > 0 for a low rate and < 0 for a high rate.
        Defaults to ``S/T - 140``.
    verbose : bool
        If true, print a message before each attempted tunneling.
    fullTunneling_params : dict
        Parameters to pass to :func:`pathDeformation.fullTunneling`.

    Returns
    -------
    dict or None
        A description of the tunneling solution at the nucleation temperature,
        or None if there is no found solution. Has the following keys:

        - *Tnuc* : the nucleation temperature
        - *low_vev, high_vev* : vevs for the low-T phase (the phase that the
          instanton tunnels to) and high-T phase (the phase that the instanton
          tunnels from).
        - *low_phase, high_phase* : identifier keys for the low-T and high-T
          phases.
        - *action* : The Euclidean action of the instanton.
        - *instanton* : Output from :func:`pathDeformation.fullTunneling`, or
          None for a second-order transition.
        - *trantype* : 1 or 2 for first or second-order transitions.
    """
    outdict = {}  # keys are T values
    args = (phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion,
            fullTunneling_params, verbose, outdict)
    Tmin = start_phase.T[0]
    T_highest_other = Tmin
    for phase in phases.values():
        T_highest_other = max(T_highest_other, phase.T[-1])
    Tmax = min(Tmax, T_highest_other)
    assert Tmax >= Tmin
    try:
        Tnuc = optimize.brentq(_tunnelFromPhaseAtT,
                               Tmin,
                               Tmax,
                               args=args,
                               xtol=Ttol,
                               maxiter=maxiter,
                               disp=False)
    except ValueError as err:
        if err.args[0] != "f(a) and f(b) must have different signs":
            raise
        if nuclCriterion(outdict[Tmax]['action'], Tmax) > 0:
            if nuclCriterion(outdict[Tmin]['action'], Tmax) < 0:
                # tunneling *may* be possible. Find the minimum.
                # It's important to make an appropriate initial guess;
                # otherwise the minimization routine may get get stuck in a
                # region where the action is infinite. Modify Tmax.
                Tmax = _maxTCritForPhase(phases, start_phase, V, Ttol)

                def abort_fmin(T, outdict=outdict, nc=nuclCriterion):
                    T = T[0]  # T is an array of size 1
                    if nc(outdict[T]['action'], T) <= 0:
                        raise StopIteration(T)

                try:
                    Tmin = optimize.fmin(_tunnelFromPhaseAtT,
                                         0.5 * (Tmin + Tmax),
                                         args=args,
                                         xtol=Ttol * 10,
                                         ftol=1.0,
                                         maxiter=maxiter,
                                         disp=0,
                                         callback=abort_fmin)[0]
                except StopIteration as err:
                    Tmin = err.args[0]
                if nuclCriterion(outdict[Tmin]['action'], Tmin) > 0:
                    # no tunneling possible
                    return None
                Tnuc = optimize.brentq(_tunnelFromPhaseAtT,
                                       Tmin,
                                       Tmax,
                                       args=args,
                                       xtol=Ttol,
                                       maxiter=maxiter,
                                       disp=False)
            else:
                # no tunneling possible
                return None
        else:
            # tunneling happens right away at Tmax
            Tnuc = Tmax
    rdict = outdict[Tnuc]
    return rdict if rdict['trantype'] > 0 else None
Example #39
0
def grd1(x):
    return 1 - np.e**(-x**2 /
                      1000) + 0.28 * (1 - np.e**
                                      (-.03 * x**2)) * (1 - np.cos(20 * x**.2))


def diff(x, v0, th0):
    return traj(x, v0, th0) - gr(x)  #intersectsWithGround


def diff1(x, v0, th0):
    return traj(x, v0, th0) - grd1(x)  #intersectwithGround


#numbericalUsingBrenq Finding where traj meets with ground
r = brentq(diff, 1, 5, args=(5, 25))
r1 = brentq(diff, 7, 10, args=(10, 25))
r2 = brentq(diff, 15, 18, args=(15, 25))
solution = [r, r1, r2]
plt.subplot(2, 1, 1)

plt.scatter(r, gr(r))
plt.scatter(r1, gr(r1))
plt.scatter(r2, gr(r2))

plt.annotate('({0:.2f}, {1:.2f})'.format(r, gr(r)),
             xy=(r, gr(r)),
             xytext=(r, gr(r) + .2))
plt.annotate('({0:.2f}, {1:.2f})'.format(r1, gr(r1)),
             xy=(r1, gr(r1)),
             xytext=(r1, gr(r1) + .7))
Example #40
0
def lake_model(b=0.42,
               q=2.0,
               mean=0.02,
               stdev=0.001,
               delta=0.98,
               alpha=0.4,
               nsamples=100,
               myears=100,
               c1=0.25,
               c2=0.25,
               r1=0.5,
               r2=0.5,
               w1=0.5,
               seed=None):
    """
    
    :param b: float decay rate for P in Lake
    :param q: float recycling exponent
    :param mean: float mean of natural inflows 
    :param stdev: float standard deviation of natural inflows
    :param delta: float future utility discount rate
    :param alpha: float utility from pollution
    :param nsamples: int, optional
    :param myears: int, optional
    :param c1: float
    :param c2: float
    :param r1: float
    :param r2: float
    :param w1: float
    :param seed: int, optional for random number gen
    :return: tuple
    """
    np.random.seed(seed)
    Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)

    X = np.zeros((myears, ))
    average_daily_P = np.zeros((myears, ))
    reliability = 0.0
    inertia = 0
    utility = 0

    for _ in range(nsamples):
        X[0] = 0.0
        decision = 0.1
        decisions = np.zeros(myears, )

        natural_inflows = np.random.lognormal(
            math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),
            math.sqrt(math.log(1.0 + stdev**2 / mean**2)),
            size=myears)
        for t in range(1, myears):
            #decision rule is used
            decision = get_antropogenic_release(X[t - 1], c1, c2, r1, r2, w1)
            decisions[t] = decision

            X[t] = (1-b)*X[t-1] + X[t-1]**q/(1+X[t-1]**q) + decision +\
                natural_inflows[t-1]
            average_daily_P[t] += X[t] / nsamples
        reliability += np.sum(X < Pcrit) / (nsamples * myears)
        inertia += np.sum(
            np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears)
        utility += np.sum(
            alpha * decisions * np.power(delta, np.arange(myears))) / nsamples
        max_P = np.max(average_daily_P)
        return max_P, utility, inertia, reliability
Example #41
0
def calc_cl(sj, prob):
	#If the uncertainty is very small then restrict the limits of integration
	w1,w2 = (0., np.inf) if sj > 0.01 else (1.-6.*sj, 1.+6.*sj)
	#Use brentq to find x
	return brentq(myprob, 0., 500., args=(sj,prob,w1,w2))
Example #42
0
File: an.py Project: zimoun/mtf
            print('warning')
        fj = fi
    fm, I = fm[1:], I[1:]
    return fm, I


Z = []
for ii in range(m):
    d, ind = changedsign(fdet(k, ii, n, R))
    for p in range(len(ind) - 1):
        j, jj = ind[p], ind[p + 1]
        a, b = k[j], k[jj]
        x, y = fdet(a, ii, n, R), fdet(b, ii, n, R)
        if x * y > 0:
            print('bad sign', x, y, a, b, ii)
        z = brentq(fdet, a, b, args=(ii, n, R))
        Z.append(z)
Z = np.array(Z)
Z.sort()

plt.figure()
plt.plot(k, 0 * k, 'k--')
for ii in range(m):
    plt.plot(k, det(ii), label='n={}'.format(ii))
    plt.plot(Z, 0. * Z, 'gd')
    # d, ind = changedsign(fdet(k, ii, n, R))
    # plt.plot(k[ind], d, 's')

plt.plot(3.14159, 0, 'k.', label='from paper')
plt.plot(3.69245, 0, 'k.')
plt.plot(4.26168, 0, 'k.')
Example #43
0
def plotROCs():
    # NO
    scorefilename_no = 'output/scores_no.csv'
    data_no = pd.read_csv(scorefilename_no, names=['label', 'score'])
    labels_no = data_no['label']
    scores_no = data_no['score']
    labels_no = [int(e) for e in labels_no]
    scores_no = [float(e) for e in scores_no]
    auc_value_no = metrics.roc_auc_score(np.array(labels_no),
                                         np.array(scores_no))

    fpr_no, tpr_no, thresholds_no = metrics.roc_curve(labels_no,
                                                      scores_no,
                                                      pos_label=1)
    eer_no = brentq(lambda x: 1. - x - interp1d(fpr_no, tpr_no)(x), 0., 1.)
    thresh_no = interp1d(fpr_no, thresholds_no)(eer_no)

    # LINEAR
    scorefilename_linear = 'output/scores_linear.csv'
    data_linear = pd.read_csv(scorefilename_linear, names=['label', 'score'])
    labels_linear = data_linear['label']
    scores_linear = data_linear['score']
    labels_linear = [int(e) for e in labels_linear]
    scores_linear = [float(e) for e in scores_linear]
    auc_value_linear = metrics.roc_auc_score(np.array(labels_linear),
                                             np.array(scores_linear))

    fpr_linear, tpr_linear, thresholds_linear = metrics.roc_curve(
        labels_linear, scores_linear, pos_label=1)
    eer_linear = brentq(lambda x: 1. - x - interp1d(fpr_linear, tpr_linear)(x),
                        0., 1.)
    thresh_linear = interp1d(fpr_linear, thresholds_linear)(eer_linear)

    # SPLINE
    scorefilename_spline = 'output/scores_spline.csv'
    data_spline = pd.read_csv(scorefilename_spline, names=['label', 'score'])
    labels_spline = data_spline['label']
    scores_spline = data_spline['score']
    labels_spline = [int(e) for e in labels_spline]
    scores_spline = [float(e) for e in scores_spline]
    auc_value_spline = metrics.roc_auc_score(np.array(labels_spline),
                                             np.array(scores_spline))

    fpr_spline, tpr_spline, thresholds_spline = metrics.roc_curve(
        labels_spline, scores_spline, pos_label=1)
    eer_spline = brentq(lambda x: 1. - x - interp1d(fpr_spline, tpr_spline)(x),
                        0., 1.)
    thresh_spline = interp1d(fpr_spline, thresholds_spline)(eer_spline)

    plt.figure()
    lw = 2
    plt.plot(fpr_no,
             tpr_no,
             color='r',
             lw=lw,
             label='NO interp. (AUC = %0.4f)' % auc_value_no)
    plt.plot(fpr_linear,
             tpr_linear,
             color='g',
             lw=lw,
             label='Linear interp. (AUC = %0.4f)' % auc_value_linear)
    plt.plot(fpr_spline,
             tpr_spline,
             color='b',
             lw=lw,
             label='Spline interp. (AUC = %0.4f)' % auc_value_spline)

    plt.plot([0, 1], [0, 1], color='darkorange', lw=lw, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC curves ')
    plt.legend(loc="lower right")
    plt.show()
    # end plot
    return
Example #44
0
def Af(gc, ca=40):
    f1 = lambda ci: Acif(ci) - Agcf(gc, ci, ca)
    ci = optimize.brentq(f1, 0, ca)
    return Acif(ci)
def findCriticalTemperatures(phases, V, start_high=False):
    """
    Find all temperatures `Tcrit` such that there is degeneracy between any
    two phases.

    Parameters
    ----------
    phases : dict
        Output from :func:`traceMultiMin`.
    V : callable
        The potential function `V(x,T)`, where `x` is the field value (which
        should be an array, not a scalar) and `T` is the temperature.
    start_high : bool, optional
        If True, only include those transitions which could be reached starting
        from the high-T phase. NOT IMPLEMENTED YET.

    Returns
    -------
    list of transitions
        Transitions are sorted in decreasing temperature. Each transition is a
        dictionary with the following keys:

        - *Tcrit* : the critical temperature
        - *low_vev, high_vev* : vevs for the low-T phase (the phase that the
          model transitions to) and high-T phase (the phase that the model
          transitions from).
        - *low_phase, high_phase* : identifier keys for the low-T and high-T
          phases.
        - *trantype* : 1 or 2 for first or second-order transitions.
    """
    transitions = []
    for i in phases.keys():
        for j in phases.keys():
            if i == j:
                continue
            # Try going from i to j (phase1 -> phase2)
            phase1, phase2 = phases[i], phases[j]
            tmax = min(phase1.T[-1], phase2.T[-1])
            tmin = max(phase1.T[0], phase2.T[0])
            if tmin >= tmax:
                # No overlap. Try for second-order.
                if phase2.key in phase1.low_trans:
                    transitions.append(
                        secondOrderTrans(phase1, phase2, 'Tcrit'))
                continue

            def DV(T):
                return V(phase1.valAt(T), T) - V(phase2.valAt(T), T)

            if DV(tmin) < 0:
                # phase1 is lower at tmin, no tunneling
                continue
            if DV(tmax) > 0:
                # phase1 is higher even at tmax, no critical temperature
                continue
            Tcrit = optimize.brentq(DV, tmin, tmax, disp=False)
            tdict = {}
            tdict['Tcrit'] = Tcrit
            tdict['high_vev'] = phase1.valAt(Tcrit)
            tdict['high_phase'] = phase1.key
            tdict['low_vev'] = phase2.valAt(Tcrit)
            tdict['low_phase'] = phase2.key
            tdict['trantype'] = 1
            transitions.append(tdict)
    if not start_high:
        return sorted(transitions, key=lambda x: x['Tcrit'])[::-1]
    start_phase = getStartPhase(phases, V)
    raise NotImplementedError("start_high=True not yet supported")
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu  # set GPU id=1

    # Start running operations on the Graph.能够在GPU上分配的最大内存
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Read the file containing the pairs used for testing
            #pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw_finger.get_paths(
                os.path.expanduser(args.finger_dir), args.validate_dir)

            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
            image_size = args.image_size
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on validation images')
            batch_size = args.lfw_batch_size
            nrof_images = len(paths)
            print("%d validation pairs" % nrof_images)
            nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = lfw_finger.load_data(paths_batch, False, False,
                                              image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            tpr, fpr, accuracy, val, val_std, far = lfw_finger.evaluate(
                emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds)

            print('Accuracy: %1.3f+-%1.3f' %
                  (np.mean(accuracy), np.std(accuracy)))
            print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                  (val, val_std, far))

            auc = metrics.auc(fpr, tpr)
            print('Area Under Curve (AUC): %1.3f' % auc)
            eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x),
                         0., 1.)
            print('Equal Error Rate (EER): %1.3f' % eer)
Example #47
0
        for i in range(output_dis.shape[0]):
            if output_dis[i, 1] >= output_dis[i, 0]:
                output_pred[i] = 1.0
            else:
                output_pred[i] = 0.0

        tol_label = np.concatenate((tol_label, img_label))
        tol_pred = np.concatenate((tol_pred, output_pred))

        pred_prob = torch.softmax(output_dis, dim=1)
        tol_pred_prob = np.concatenate(
            (tol_pred_prob, pred_prob[:, 1].data.numpy()))

        count += 1

    acc_test = metrics.accuracy_score(tol_label, tol_pred)
    loss_test /= count

    fpr, tpr, thresholds = roc_curve(tol_label, tol_pred_prob, pos_label=1)
    eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)

    # fnr = 1 - tpr
    # hter = (fpr + fnr)/2

    print('[Epoch %d] Test acc: %.2f   EER: %.2f' %
          (opt.id, acc_test * 100, eer * 100))
    text_writer.write('%d,%.2f,%.2f\n' % (opt.id, acc_test * 100, eer * 100))

    text_writer.flush()
    text_writer.close()
Example #48
0
def brentq_widebounderrors(f1,
                           lb,
                           ub,
                           linspace=None,
                           numlinspace=21,
                           loglinspace=False,
                           strictlyincreasing=False,
                           strictlydecreasing=False,
                           increasing=False,
                           decreasing=False,
                           printdetails=False,
                           alwaysuserange=False):
    """
    Relevant if:
    - want to do brentq on bounds when have curve that goes down then up or up then down
    - f1 sometimes returns errors at high or low values
    .
    If is always strictly strictlyincreasing or always strictly strictlydecreasing and does not return an error, can just do this with brentq in a try/except. WHAT DOES THIS MEAN???

    Often I want to solve functions that yield errors for high or low values. If so, I'll search within a linspace to try to find a better range over to use brentq.

    increasing means solve for the crossing point when the function is increasing. strictlyincreasing means return an error if the function decreases at any point.
    """
    # if it is True then we always consider a range of values so even if lb < ub we still want to do range
    if alwaysuserange is not True:
        try:
            lb_val = f1(lb)
        except Exception:
            lb_val = None
        try:
            ub_val = f1(ub)
        except Exception:
            ub_val = None

    if alwaysuserange is True or lb_val is None or ub_val is None or (
            lb_val < 0 and ub_val < 0) or (lb_val > 0 and ub_val > 0) or (
                lb_val < 0 and ub_val > 0
                and decreasing is True) or (lb_val > 0 and ub_val < 0
                                            and increasing is True):
        if strictlyincreasing is True or strictlydecreasing is True:
            if (lb_val > 0 and ub_val > 0) or (lb_val < 0 and ub_val < 0):
                # print('lb_val: ' + str(lb_val))
                # print('ub_val: ' + str(ub_val))
                raise ValueError(
                    'Both lb_val and ub_val have same value when should be strictlyincreasing/strictlydecreasing.'
                )

        if strictlyincreasing is True:
            increasing is True
        if strictlydecreasing is True:
            decreasing is True

        # parse different potential values
        if linspace is None:
            if loglinspace is False:
                xtry = np.linspace(lb, ub, numlinspace)
            else:
                xtry = np.exp(np.linspace(np.log(lb), np.log(ub), numlinspace))
        xval = []
        yval = []
        for x in xtry:
            try:
                yval.append(f1(x))
                xval.append(x)
            except Exception:
                None

        if len(xval) == 0:
            raise ValueError('No solutions.')

        if printdetails is True:
            print('xval:')
            print(xval)
            print('yval:')
            print(yval)
        sol = []
        for i in range(len(yval) - 1):
            if strictlyincreasing is True:
                if yval[i + 1] < yval[i]:
                    raise ValueError(
                        'yval are not strictlyincreasing as expected')
            if strictlydecreasing is True:
                if yval[i + 1] > yval[i]:
                    raise ValueError(
                        'yval are not strictlydecreasing as expected')
            if increasing is True:
                if yval[i + 1] > 0 and yval[i] < 0:
                    sol.append(i)
            elif decreasing is True:
                if yval[i + 1] < 0 and yval[i] > 0:
                    sol.append(i)
            else:
                if (yval[i + 1] > 0 and yval[i] < 0) or (yval[i + 1] < 0
                                                         and yval[i] > 0):
                    sol.append(i)
        if printdetails is True:
            print('sol:')
            print(sol)
        if len(sol) == 1:
            lb = xval[sol[0]]
            ub = xval[sol[0] + 1]
        else:
            raise ValueError(
                'Wrong number of solutions. Number of solutions: ' +
                str(len(sol)))
    else:
        if printdetails is True:
            print('lb and ub have opposite sign.')

    sol = brentq(f1, lb, ub)

    return (sol)
Example #49
0
def two_sample_conf_int(x,
                        y,
                        cl=0.95,
                        alternative="two-sided",
                        seed=None,
                        reps=10**4,
                        stat="mean",
                        shift=None):
    r"""
    One-sided or two-sided confidence interval for the parameter determining
    the treatment effect.  The default is the "shift model", where we are
    interested in the parameter d such that x is equal in distribution to
    y + d. In general, if we have some family of invertible functions parameterized
    by d, we'd like to find d such that x is equal in distribution to f(y, d).

    Parameters
    ----------
    x : array-like
        Sample 1
    y : array-like
        Sample 2
    cl : float in (0, 1)
        The desired confidence level. Default 0.95.
    alternative : {"two-sided", "lower", "upper"}
        Indicates the alternative hypothesis.
    seed : RandomState instance or {None, int, RandomState instance}
        If None, the pseudorandom number generator is the RandomState
        instance used by `np.random`;
        If int, seed is the seed used by the random number generator;
        If RandomState instance, seed is the pseudorandom number generator
    reps : int
        number of repetitions in two_sample
    stat : {'mean', 't'}
        The test statistic.

        (a) If stat == 'mean', the test statistic is (mean(x) - mean(y))
            (equivalently, sum(x), since those are monotonically related)
        (b) If stat == 't', the test statistic is the two-sample t-statistic--
            but the p-value is still estimated by the randomization,
            approximating the permutation distribution.
            The t-statistic is computed using scipy.stats.ttest_ind
        (c) If stat is a function (a callable object), the test statistic is
            that function.  The function should take a permutation of the pooled
            data and compute the test function from it. For instance, if the
            test statistic is the Kolmogorov-Smirnov distance between the
            empirical distributions of the two samples, $\max_t |F_x(t) - F_y(t)|$,
            the test statistic could be written:

            f = lambda u: np.max( \
                [abs(sum(u[:len(x)]<=v)/len(x)-sum(u[len(x):]<=v)/len(y)) for v in u]\
                )
    shift : float
        The relationship between x and y under the null hypothesis.

        (a) If None, the relationship is assumed to be additive (e.g. x = y+d)
        (b) A tuple containing the function and its inverse $(f, f^{-1})$, so
            $x_i = f(y_i, d)$ and $y_i = f^{-1}(x_i, d)$

    Returns
    -------
    tuple
        the estimated confidence limits

    Notes
    -----
    xtol : float
        Tolerance in brentq
    rtol : float
        Tolerance in brentq
    maxiter : int
        Maximum number of iterations in brentq
    """

    assert alternative in ("two-sided", "lower", "upper")

    if shift is None:
        shift_limit = max(abs(max(x) - min(y)), abs(max(y) - min(x)))
        # FIXME: unused observed
        # observed = np.mean(x) - np.mean(y)
    elif isinstance(shift, tuple):
        assert (callable(shift[0])), "Supply f and finverse in shift tuple"
        assert (callable(shift[1])), "Supply f and finverse in shift tuple"
        f = shift[0]
        finverse = shift[1]
        # Check that f is increasing in d; this is very ad hoc!
        assert (f(5, 1) < f(5, 2)), "f must be increasing in the parameter d"
        shift_limit = max(abs(fsolve(lambda d: f(max(y), d) - min(x), 0)),
                          abs(fsolve(lambda d: f(min(y), d) - max(x), 0)))
        # FIXME: unused observed
        # observed = fsolve(lambda d: np.mean(x) - np.mean(f(y, d)), 0)
    else:
        raise ValueError("Bad input for shift")
    ci_low = -shift_limit
    ci_upp = shift_limit

    if alternative == 'two-sided':
        cl = 1 - (1 - cl) / 2

    if alternative != "upper":
        if shift is None:
            g = lambda q: cl - two_sample_shift(x,
                                                y,
                                                alternative="less",
                                                seed=seed,
                                                shift=q,
                                                reps=reps,
                                                stat=stat)[0]
        else:
            g = lambda q: cl - two_sample_shift(
                x,
                y,
                alternative="less",
                seed=seed,
                shift=(lambda u: f(u, q), lambda u: finverse(u, q)),
                reps=reps,
                stat=stat)[0]
        ci_low = brentq(g, -2 * shift_limit, 2 * shift_limit)

    if alternative != "lower":
        if shift is None:
            g = lambda q: cl - two_sample_shift(x,
                                                y,
                                                alternative="greater",
                                                seed=seed,
                                                shift=q,
                                                reps=reps,
                                                stat=stat)[0]
        else:
            g = lambda q: cl - two_sample_shift(
                x,
                y,
                alternative="greater",
                seed=seed,
                shift=(lambda u: f(u, q), lambda u: finverse(u, q)),
                reps=reps,
                stat=stat)[0]
        ci_upp = brentq(g, -2 * shift_limit, 2 * shift_limit)

    return ci_low, ci_upp
    def calculate_base_resistance_standard_diameter(self, pile_diameter, vanimpecorrection=False, hcrit=0.2):
        """
        Calculates the base resistance according to De Beer's method for a pile diameter which is a multiple of 0.2m. The calculation happens in five steps:

            - Step 1: Correct the cone resistance for the different failure surface for a pile and a CPT using Equation 62 from De Beer's paper. This correction is especially necessary for the shallow layer where the angle beta is lower than 90°
            - Step 2: Apply a correction for the different stress level for a pile compared to a CPT
            - Step 3: Account for the transition from weaker the stronger layers by working downward along the CPT trace. The increase of resistance will be slower for a pile compared to a CPT
            - Step 4: Account for the transition from stronger to weaker layers by working through the CPT trace from the bottom up. A weaker layer will be felt sooner by the model pile than by the CPT
            - Step 5: Take the average unit base resistance for one diameter below the given level. The average value should note be greater than :math:`q_{p,q+1}` at the given level.

        .. math::
            \\text{Step 1}

            q_{p,(1)} = \\frac{q_c}{\\exp \\left( 2 \\cdot ( \\beta_c - \\beta_p ) \\cdot \\tan \\varphi \\right) }

            \\text{Step 2}

            A = \\frac{1 + \\frac{\\gamma \\cdot h_{crit}^{\\prime}}{2 \\cdot p_o}}{1 + \\frac{\\gamma \\cdot h_{crit}}{2 \\cdot p_o}}

            \\text{if } A \\cdot q_{p,(1)} > q_c \\implies A \\cdot q_{p,(1)} = q_c

            \\text{Step 3}

            q_{p,j+1} = q_{p,j} + \\left[ A \\cdot q_{p,(1),j+1} - q_{p,j} \\right] \\cdot \\frac{d}{D}

            \\text{if } q_{p,j+1} > q_{p,(1),j+1} \\implies q_{p,j+1} = q_{p,(1),j+1}

            \\text{Step 4}

            q_{p,q+1} = q_{p,q} + \\left[ (q_{p,j+1})_{q+1} - q_{p,q} \\right] \\cdot \\frac{d}{D}

            \\text{According to Van Impe:}

            q_{p,q+1} = q_{p,q} + 2 \\cdot \\left[ (q_{p,j+1})_{q+1} - q_{p,q} \\right] \\cdot \\frac{d}{D}

            \\text{if } q_{p,q+1} > (q_{p,j+1})_{q+1} \\implies q_{p,q+1} = (q_{p,j+1})_{q+1}


        For numerical stability, rows with zero cone resistance at the top of the cone resistance trace are discarded.

        :param pile_diameter: Diameter of the pile as a multiple of 0.2m
        :param vanimpecorrection: Boolean determining whether the upward correction according to De Beer's original paper (default) or Van Impe (multiplier of 2) needs to be taken into account.
        :param hcrit: :math:`h_{crit}` adopted for De Beer's calculation (based on the mechanical cone). Default=0.2m

        :return: Returns a dataframe `calc` with the different correction stages
        """
        calc = deepcopy(self.calculation_data)
        calc = calc[calc['qc [MPa]'] > 0].reset_index(drop=True)

        # ----------------------------------------------------
        # Step 1: Shallow depth failure surface correction
        # ----------------------------------------------------

        # Calculate phi according to Equation 23
        calc['phi [deg]'] = np.rad2deg(
            self.phi_func()(1000 * calc['qc [MPa]'] / calc['sigma_vo_eff [kPa]']))
        # Determine the values of the normalised depths h/d and h/D
        calc['h/d [-]'] = calc['z [m]'] / self.diameter_cone
        calc['h/D [-]'] = calc['z [m]'] / pile_diameter
        # Find values of beta for cone penetration test and pile according to Equation 60
        for i, row in calc.iterrows():
            try:
                root = brentq(
                    f=self.optimisation_func,
                    a=0,
                    b=0.5 * np.pi,
                    args=(row['h/d [-]'], np.deg2rad(row['phi [deg]'])))
            except:
                root = 0.5 * np.pi
            calc.loc[i, 'beta_c [rad]'] = root
            try:
                root = brentq(
                    f=self.optimisation_func,
                    a=0,
                    b=0.5 * np.pi,
                    args=(row['h/D [-]'], np.deg2rad(row['phi [deg]'])))
            except:
                root = 0.5 * np.pi
            calc.loc[i, 'beta_p [rad]'] = root
        # Apply Equation 62 to obtain qp
        calc['qp [MPa]'] = calc['qc [MPa]'] / \
            (np.exp(
              2 *
              (calc['beta_c [rad]'] - calc['beta_p [rad]']) *
              np.tan(np.deg2rad(calc['phi [deg]']))))

        # ----------------------------------------------------
        # Step 2: Stress level correction
        # ----------------------------------------------------

        calc['A qp [MPa]'] = list(map(lambda _qp, _po, _gamma, _qc: min(_qc, self.stress_correction(
            qc=_qp, po=_po, diameter_pile=pile_diameter, diameter_cone=self.diameter_cone, gamma=_gamma, hcrit=hcrit)),
                           calc['qp [MPa]'], calc['sigma_vo_eff [kPa]'], calc['Unit weight [kN/m3]'], calc['qc [MPa]']))

        # --------------------------------------------------------------------
        # Step 3: Corrections for transition from weaker to stronger layers
        # --------------------------------------------------------------------
        for i, row in calc.iterrows():
            if i > 0:
                calc.loc[i, 'qp,j+1 [MPa]'] = min(row['A qp [MPa]'] ,
                    calc.loc[i-1, 'qp,j+1 [MPa]'] + \
                    (row['A qp [MPa]'] - calc.loc[i-1, 'qp,j+1 [MPa]']) * \
                    (self.diameter_cone / pile_diameter))
            else:
                calc.loc[i, 'qp,j+1 [MPa]'] = 0

        # --------------------------------------------------------------------
        # Step 4: Corrections for transition from stronger to weaker layers
        # --------------------------------------------------------------------
        if vanimpecorrection:
            coefficient = 2.0
        else:
            coefficient = 1.0

        qu = np.zeros(len(calc['z [m]']))
        # Assign the last value of qd as the starting value of qu
        qu[-1] = calc['qp,j+1 [MPa]'].iloc[-1]
        for i, _qd in enumerate(calc['qp,j+1 [MPa]']):
            if i != 0:
                qu[-1 - i] = min(
                    qu[-i] +
                    coefficient * (calc['qp,j+1 [MPa]'].iloc[-1 - i] - qu[-i]) *
                    (self.diameter_cone / pile_diameter),
                    calc['qp,j+1 [MPa]'].iloc[-1 - i]
                )
        calc['qp,q+1 [MPa]'] = qu

        # --------------------------------------------------------------------
        # Step 5: Averaging to 1D below the reference level
        # --------------------------------------------------------------------
        for i, row in calc.iterrows():
            try:
                _window_data = calc[
                    (calc['z [m]'] >= row['z [m]']) &
                    (calc['z [m]'] <= (row['z [m]'] + pile_diameter))]
                calc.loc[i, "qb [MPa]"] = min(
                    row['qp,q+1 [MPa]'],
                    _window_data['qp,q+1 [MPa]'].mean())
            except:
                calc.loc[i, "qb [MPa]"] = row['qp,q+1 [MPa]']

        return calc
Example #51
0
 def find_setting(t):
     """Provide a moment of maximum altitude as `t`."""
     setting = optimize.brentq(f, t + 2 * step, t)
     return setting
    def find_Binder_crossing(self, mmax, mmin):
        """
            find crossing of 1st Binder cumulant with Bbar
            under bootstrap restricted to interval mmax...mmin

            There is an inner and an outer bootstrap:
            - The outer bootstrap (layer L1)  determines the stat. error on
                the value of m^2 at the crossing of the reweighted Binder
                cumulant with Bbar.
            - The inner bootstrap (layer L0) runs for each iteration of the
                minimiser within one bootstrap iteration. In order to reweight
                to a given point of m^2 we compute the reweighted result from
                nearby simulation point under the inner bootstrap and then
                determine the value of the Binder cumulant at that point by
                means of a weighted average.
        """
        print(separator)
        print("Starting determination of crossing point of Binder cumulant"
              "with Bbar=%.2f" % (self.Bbar))

        # Fill list with trivial L1 bootstrap indices (i.e. 0,1,2,3,5,...)
        # for central value and use L0_bsindices for the inner bootstrap
        self.rng_nest.seed(int(189123 * self.L / 3 * self.g * self.N))
        L1bs = []

        for j in range(len(self.actualm0sqlist)):
            N = self.phi2[str(j)].shape[0]
            L1bs.append(np.arange(int(np.floor(N / self.Nbin_tauint[j]))))
        L0bs = self.refresh_L0_bsindices()

        # compute central value for crossing
        try:
            mcsq, r = opt.brentq(self.reweight_Binder,
                                 mmin,
                                 mmax,
                                 args=(L1bs, L0bs),
                                 full_output=True,
                                 xtol=1e-6)

        except ValueError:
            print("No crossing point found -- aborting")
            exit()

        print("Crossing point central value found at mc^2=%e" % (mcsq))

        # compute crossing under Bootstrap
        print("Now starting bootstrap with %d samples:" % self.Nboot)
        bres = []

        for i in range(self.Nboot):  # start boostrap
            # assign the set of L0 bootstrap indices for the ith L1 boostrap
            # sample
            L1bs = self.L1_bsindices[i]
            L0bs = self.refresh_L0_bsindices()
            try:
                mcsq_i, r = opt.brentq(self.reweight_Binder,
                                       mmin,
                                       mmax,
                                       args=(L1bs, L0bs),
                                       full_output=True,
                                       xtol=1e-6)

            except ValueError:
                print("No crossing point found -- aborting")
                exit()

            bres.append(mcsq_i)
            print("bs sample %4d: mc^2=%e" % (i, mcsq_i))

        # compute the BS error
        dmcsq = np.sqrt(np.real(np.sum((np.array(bres) - mcsq) ** 2, 0)))\
            / np.sqrt(self.Nboot)

        print("result %.6f +/- %.6f" % (mcsq, dmcsq))
        print(separator)

        self.msq_final = mcsq
        self.dmsq_final = dmcsq
        self.msq_bins_final = bres
Example #53
0
 def find_rising(t):
     """Provide a moment of maximum altitude as `t`."""
     rising = optimize.brentq(f, t - 2 * step, t)
     return rising
Example #54
0
def main(args):
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
            print(len(pairs))  # 一共有6000对
            # 读入后如[['Abel_Pacheco','1','4']]

            # Get the paths for the corresponding images
            # 获取文件路径和是否匹配关系对
            paths, actual_issame = lfw.get_paths(
                os.path.expanduser(args.lfw_dir), pairs)
            # print(len(actual_issame))#len(actual_issame) = 6000
            # args.lfw_file_ext表示图像的格式,默认png格式
            # actual_issame为标志位,如果是一对actual_issame=true 否则等于false
            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
            image_size = args.image_size
            embedding_size = embeddings.get_shape()[1]  # 特征向量维数128
            print(embedding_size)
            # Run forward pass to calculate embeddings
            # 3. 使用前向传播验证
            print('Runnning forward pass on LFW images')
            batch_size = args.lfw_batch_size  # default=100
            nrof_images = len(paths)  # 测试的人脸比对次数6000,len(paths)=12000
            # print(paths)表示paths为读取图片的路径
            # print(nrof_images)
            nrof_batches = int(
                math.ceil(1.0 * nrof_images /
                          batch_size))  ## 总共批次数,ceil() 函数返回数字的上入整数。结果为120
            emb_array = np.zeros(
                (nrof_images, embedding_size))  # 声明固定大小的空矩阵12000*128
            for i in range(nrof_batches):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False,
                                           image_size)  # 加载图片做一些变换crop和flip
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)  # 特征向量push
                print(('WanCheng: %d' % (i + 1)))
            # 十折交叉验证(10-fold cross validation),精度测试方法。数据集分成10份,
            # 轮流将其中9份做训练集,1份做测试保,10次结果均值作算法精度估计
            tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
                emb_array, actual_issame,
                nrof_folds=args.lfw_nrof_folds)  # nrof_folds =10

            print('Accuracy: %1.3f+-%1.3f' %
                  (np.mean(accuracy), np.std(accuracy)))
            print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                  (val, val_std, far))

            auc = metrics.auc(fpr, tpr)
            print('Area Under Curve (AUC): %1.3f' % auc)
            eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x),
                         0., 1.)
            print('Equal Error Rate (EER): %1.3f' % eer)
Example #55
0
           bestblockhash))
    try:
        #https://gist.github.com/chris-belcher/647da261ce718fc8ca10
        import numpy as np
        from scipy.optimize import brentq
        deposit_times = np.array(deposit_times)
        now -= deposit_times[0]
        deposit_times -= deposit_times[0]
        deposits = np.array(deposits)

        def f(r, deposits, deposit_times, now, final_balance):
            return np.sum(
                np.exp((now - deposit_times) / 60.0 / 60 / 24 / 365)**r *
                deposits) - final_balance

        r = brentq(f, a=1, b=-1, args=(deposits, deposit_times, now, balance))
        print('continuously compounded equivalent annual interest rate = ' +
              str(r * 100) + ' %')
        print('(as if yield generator was a bank account)')
    except ImportError:
        print('numpy/scipy not installed, unable to calculate effective ' +
              'interest rate')

    total_wallet_balance = sum(wallet.get_balance_by_mixdepth().values())
    if balance != total_wallet_balance:
        print(('BUG ERROR: wallet balance (%s) does not match balance from ' +
               'history (%s)') %
              (sat_to_str(total_wallet_balance), sat_to_str(balance)))
    if utxo_count != len(wallet.unspent):
        print((
            'BUG ERROR: wallet utxo count (%d) does not match utxo count from '
# Preprocessing calculations
# Use thick lens theory to set up channel parameters for given focal length

quartic_correction = True
theta = 0.0
w00 = 1.0
f = 10 / mm
Rlens = 10 / mm
Lch = 2 / mm
f_num = 3.0

r00 = 0.5 * f / f_num  # spot size of radiation
c0 = 0.01
h0 = np.sqrt(1 - c0)
t = Lch / np.sqrt(1 - c0)
Omega = brentq(lambda q: q * np.tan(q) - t /
               (f - Lch / 2), 0.0, 0.999 * np.pi / 2) / t
c2 = Omega**2
c4 = 0.0
c6 = 0.0
eik_to_caustic = -2 / mm
if quartic_correction:
    c4 = -Omega**4 / 4
    x0 = 100 * r00
    c4 *= 1 + Lch**2 * (0.33 / x0**2 + 0.5 * Omega**2 / h0**2 + Omega**2)
    eik_to_caustic *= -1

rb = bundle_scale * r00
t00, band = helper.TransformLimitedBandwidth(w00, '100 fs', 4)
a00 = helper.InitialVectorPotential(w00, 1.0, f, f_num)
mess = mess + helper.ParaxialFocusMessage(w00, 1.0, f, f_num)
Example #57
0
    def initialize_bulk(self):
        """
        Initialize calculation parameters to their bulk values at the
        grid locations.
        """
        log.debug("Initializing bulk parameters")
        # Initialize the gap.
        g = delta_bcs(self.t, self.tc, interp=True, approx=True)
        self.gap = np.full(self.z.shape, g)

        # Initialize the pairing angle at the Matsubara energies.
        wn = (2 * np.arange(0, self.nc + 1) + 1) * np.pi * k * self.t
        wn = wn[:, np.newaxis]
        self.mtheta = np.arcsin(self.gap / np.sqrt(self.gap**2 + wn**2))

        # Initialize the order parameter.
        self.update_order()

        # Initialize the pairing angle.
        self.theta = np.empty((self.e.size, self.z.size), dtype=complex)
        zero = (self.e == 0)
        self.theta[~zero, :] = np.arctan(1j * self.gap
                                         / self.e[~zero, np.newaxis])
        self.theta[zero, :] = np.pi / 2

        # Update estimates if alpha is nonzero
        if self.alpha != 0:
            # Initialize the while loop parameters.
            r = np.inf
            i = 0
            last_order = self.order[0] / self.delta0

            # Compute the order parameter by iterating until convergence.
            log.debug("Computing the order parameter.")
            while r > self.rtol and i < self.max_iterations:
                # Update the iteration counter.
                i += 1

                # Solve for the pairing angle first
                mtheta = usadel_pairing_angle(
                    1j * wn, self.order[0], self.alpha)
                self.mtheta[:] = mtheta

                # Update the order parameter using the new pairing angle.
                self.update_order()

                # Save iteration history and evaluate convergence criteria.
                new_order = self.order[0] / self.delta0
                r = np.max(np.abs(new_order - last_order) / (
                        np.abs(new_order) + 1))
                last_order = self.order[0] / self.delta0
                log.debug("Iteration: {:d} :: R: {:g}".format(i, r))

            # Compute the pairing angle with the new order parameter.
            log.debug("Computing the pairing angle.")
            theta = usadel_pairing_angle(self.e, self.order[0], self.alpha)
            self.theta[:] = theta[:, np.newaxis]

            # Compute the energy gap.
            log.debug("Computing the gap energy.")

            def find_gap(e):
                en = e * self.order[0]
                th = usadel_pairing_angle(en, self.order[0], self.alpha)
                return np.cos(th).real - self.threshold

            dos = np.cos(self.theta[:, 0]).real
            try:
                max_e = self.e[dos > self.threshold].min() / self.order[0]
                min_e = self.e[dos < self.threshold].max() / self.order[0]
                self.gap[:] = brentq(find_gap, min_e, max_e) * self.order[0]
            except ValueError:  # the bounds didn't give opposite signs
                max_e = np.max(self.e) / self.order[0]
                try:
                    self.gap[:] = brentq(find_gap, 0, max_e) * self.order[0]
                except ValueError:
                    self.gap[:] = 0.
        log.debug("Bulk parameters initialized.")
 def rein(self, z, xtol=1e-6, xmin=1e-6, xmax=1.):
     bfunc = lambda theta: theta - self.alpha(theta, z)
     if bfunc(xmin) * bfunc(xmax) > 0:
         return 0.
     else:
         return brentq(bfunc, xmin, xmax, xtol=xtol)
Example #59
0
    def ulSigma(self, model, marginalize=False, toys=None, expected=False):
        """ upper limit obtained from the defined Data (using the signal prediction
            for each signal regio/dataset), by using
            the q_mu test statistic from the CCGV paper (arXiv:1007.1727).

        :params marginalize: if true, marginalize nuisances, else profile them
        :params toys: specify number of toys. Use default is none
        :params expected: compute the expected value, not the observed.
        :returns: upper limit on *production* xsec (efficiencies unfolded)
        """
        if model.zeroSignal():
            """ only zeroes in efficiencies? cannot give a limit! """
            return None
        if toys == None:
            toys = self.ntoys
        oldmodel = model
        if expected:
            model = copy.deepcopy(oldmodel)
            #model.observed = model.backgrounds
            for i, d in enumerate(model.backgrounds):
                model.observed[i] = int(NP.round(d))
        computer = LikelihoodComputer(model, toys)
        mu_hat = computer.findMuHat(model.signal_rel)
        theta_hat0, _ = computer.findThetaHat(0 * model.signal_rel)
        sigma_mu = computer.getSigmaMu(model.signal_rel)

        aModel = copy.deepcopy(model)
        aModel.observed = array(
            [NP.round(x + y) for x, y in zip(model.backgrounds, theta_hat0)])
        #print ( "aModeldata=", aModel.observed )
        #aModel.observed = array ( [ round(x) for x in model.backgrounds ] )
        aModel.name = aModel.name + "A"
        compA = LikelihoodComputer(aModel, toys)
        ## compute
        mu_hatA = compA.findMuHat(aModel.signal_rel)
        if mu_hat < 0.:
            mu_hat = 0.
        nll0 = computer.likelihood(model.signals(mu_hat),
                                   marginalize=marginalize,
                                   nll=True)
        if NP.isinf(nll0) and marginalize == False:
            logger.warning(
                "nll is infinite in profiling! we switch to marginalization, but only for this one!"
            )
            marginalize = True
            nll0 = computer.likelihood(model.signals(mu_hat),
                                       marginalize=True,
                                       nll=True)
            if NP.isinf(nll0):
                logger.warning(
                    "marginalization didnt help either. switch back.")
                marginalize = False
            else:
                logger.warning("marginalization worked.")
        nll0A = compA.likelihood(aModel.signals(mu_hatA),
                                 marginalize=marginalize,
                                 nll=True)

        def root_func(mu):
            ## the function to find the zero of (ie CLs - alpha)
            nsig = model.signals(mu)
            computer.ntot = model.backgrounds + nsig
            nll = computer.likelihood(nsig, marginalize=marginalize, nll=True)
            nllA = compA.likelihood(nsig, marginalize=marginalize, nll=True)
            qmu = 2 * (nll - nll0)
            if qmu < 0.: qmu = 0.
            sqmu = sqrt(qmu)
            qA = 2 * (nllA - nll0A)
            # print ( "mu: %s, qMu: %s, qA: %s nll0A: %s nllA: %s" % ( mu, qmu, qA, nll0A, nllA ) )
            if qA < 0.:
                qA = 0.
            sqA = sqrt(qA)
            CLsb = 1. - stats.multivariate_normal.cdf(sqmu)
            CLb = 0.
            if qA >= qmu:
                CLb = stats.multivariate_normal.cdf(sqA - sqmu)
            else:
                if qA == 0.:
                    CLsb = 1.
                    CLb = 1.
                else:
                    CLsb = 1. - stats.multivariate_normal.cdf(
                        (qmu + qA) / (2 * sqA))
                    CLb = 1. - stats.multivariate_normal.cdf(
                        (qmu - qA) / (2 * sqA))
            CLs = 0.
            if CLb > 0.:
                CLs = CLsb / CLb
            root = CLs - 1. + self.cl
            return root

        a, b = 1.5 * mu_hat, 2.5 * mu_hat + 2 * sigma_mu
        ctr = 0
        while True:
            while (NP.sign(root_func(a) * root_func(b)) > -.5):
                b = 1.4 * b  ## widen bracket FIXME make a linear extrapolation!
                a = a - (b - a) * .3  ## widen bracket
                if a < 0.: a = 0.
                ctr += 1
                if ctr > 20:  ## but stop after 20 trials
                    if toys > 2000:
                        logger.error(
                            "cannot find brent bracket after 20 trials. a,b=%s(%s),%s(%s)"
                            % (root_func(a), a, root_func(b), b))
                        return None
                    else:
                        logger.debug(
                            "cannot find brent bracket after 20 trials. but very low number of toys"
                        )
                        return self.ulSigma(model, marginalize, 4 * toys)
            try:
                mu_lim = optimize.brentq(root_func,
                                         a,
                                         b,
                                         rtol=1e-03,
                                         xtol=1e-06)
                return mu_lim
            except ValueError as e:  ## it could still be that the signs arent opposite
                # in that case, try again
                pass
Example #60
-1
File: glm.py Project: NICTA/revrand
def _rootfinding(fn, likelihood, likelihood_hypers, likelihood_args,
                 percentile):

    # CDF minus percentile for quantile root finding
    predCDF = lambda q, fs, percent: \
        (likelihood.cdf(q, fs, *chain(likelihood_hypers,
                                      likelihood_args))).mean() - percent

    # Convert alpha into percentages and get (conservative) bounds for brentq
    lpercent = (1 - percentile) / 2
    upercent = 1 - lpercent
    Eyn = likelihood.Ey(fn, *chain(likelihood_hypers, likelihood_args)).mean()
    lb, ub = -1000 * max(Eyn, 1), 1000 * max(Eyn, 1)

    # Do the root finding optimisation for upper and lower quantiles
    try:
        qln = brentq(predCDF, a=lb, b=ub, args=(fn, lpercent))
    except ValueError:
        qln = np.nan

    try:
        qun = brentq(predCDF, a=lb, b=ub, args=(fn, upercent))
    except ValueError:
        qun = np.nan

    return qln, qun