Пример #1
0
def raise_res(T, W, c, mu=0, sigma=1):
    """Increase the resolution of a wiener series by a factor of c.
        
        Returns a more reolved Wiener series and its associate time series
        
        T = the given Time series.
        W = the associated Wiener series.
        c = Scaling factor (integer greater than 1).
        mu = Mean of W's underlying normal distribution.
        sigma = Standard deviation of W's underlying normal distribution.
    """
    dT = T[1] - T[0]
    dt = float(T[1] - T[0]) / c
    t_series = []
    w_series = []
    for i in range(len(T) - 1):
        t = T[i]
        w_t = W[i]
        t_next = T[i + 1]
        w_next = W[i + 1]
        t_series.append(t)
        w_series.append(w_t)
        for j in range(c - 1):
            t += dt
            dW = w_next - w_t
            drawfrm_cum = np.sqrt(2) * np.sqrt(t_next - t) * sigma * erfc(random())
            if np.sqrt(2) * np.sqrt(t_next - t) * sigma * erfc(-2 * random()) < abs(dW):
                w_t += abs(gauss(0, np.sqrt(dt) * sigma)) * float(dW) / abs(dW)
            else:
                w_t += gauss(0, np.sqrt(dt) * sigma)
            t_series.append(t)
            w_series.append(w_t)
    t_series.append(T[-1])
    w_series.append(W[-1])
    return t_series, w_series
Пример #2
0
def truncatedgaussian_sample(xmin, xmax, mu, sigma, x):
    roottwo = 1.414213562373095
    temp = math.erfc((-xmin + mu) / (roottwo * sigma)) + x * (
        math.erfc((-xmax + mu) / (roottwo * sigma)) - math.erfc((-xmin + mu) / (roottwo * sigma))
    )
    y = mu + roottwo * sigma * coolmath.inverf(-1.0 + temp)
    return y
Пример #3
0
 def test_erfc(self):
     import math
     assert math.erfc(0.0) == 1.0
     assert math.erfc(-0.0) == 1.0
     assert math.erfc(float("inf")) == 0.0
     assert math.erfc(float("-inf")) == 2.0
     assert math.isnan(math.erf(float("nan")))
     assert math.erfc(1e-308) == 1.0
Пример #4
0
def test_erfc():
    table = [
        (0.0,  1.0000000),
        (0.05, 0.9436280),
        (0.1,  0.8875371),
        (0.15, 0.8320040),
        (0.2,  0.7772974),
        (0.25, 0.7236736),
        (0.3,  0.6713732),
        (0.35, 0.6206179),
        (0.4,  0.5716076),
        (0.45, 0.5245183),
        (0.5,  0.4795001),
        (0.55, 0.4366766),
        (0.6,  0.3961439),
        (0.65, 0.3579707),
        (0.7,  0.3221988),
        (0.75, 0.2888444),
        (0.8,  0.2578990),
        (0.85, 0.2293319),
        (0.9,  0.2030918),
        (0.95, 0.1791092),
        (1.0,  0.1572992),
        (1.1,  0.1197949),
        (1.2,  0.0896860),
        (1.3,  0.0659921),
        (1.4,  0.0477149),
        (1.5,  0.0338949),
        (1.6,  0.0236516),
        (1.7,  0.0162095),
        (1.8,  0.0109095),
        (1.9,  0.0072096),
        (2.0,  0.0046777),
        (2.1,  0.0029795),
        (2.2,  0.0018628),
        (2.3,  0.0011432),
        (2.4,  0.0006885),
        (2.5,  0.0004070),
        (2.6,  0.0002360),
        (2.7,  0.0001343),
        (2.8,  0.0000750),
        (2.9,  0.0000411),
        (3.0,  0.0000221),
        (3.1,  0.0000116),
        (3.2,  0.0000060),
        (3.3,  0.0000031),
        (3.4,  0.0000015),
        (3.5,  0.0000007),
        (4.0,  0.0000000),
    ]
    
    for x, y in table:
        AlmostEqual(y, math.erfc(x), tolerance=7)
        AlmostEqual(2.0 - y, math.erfc(-x), tolerance=7)
Пример #5
0
 def RandIntVec(self,ListSize, ListSumValue, Distribution='Normal'):
     """
     Inputs:
     ListSize = the size of the list to return
     ListSumValue = The sum of list values
     Distribution = can be 'uniform' for uniform distribution, 'normal' for a normal distribution ~ N(0,1) with +/- 3 sigma  (default), or a list of size 'ListSize' or 'ListSize - 1' for an empirical (arbitrary) distribution. Probabilities of each of the p different outcomes. These should sum to 1 (however, the last element is always assumed to account for the remaining probability, as long as sum(pvals[:-1]) <= 1).  
     Output:
     A list of random integers of length 'ListSize' whose sum is 'ListSumValue'.
     """
     if type(Distribution) == list:
         DistributionSize = len(Distribution)
         if ListSize == DistributionSize or (ListSize-1) == DistributionSize:
             Values = multinomial(ListSumValue,Distribution,size=1)
             OutputValue = Values[0]
     elif Distribution.lower() == 'uniform': #I do not recommend this!!!! I see that it is not as random (at least on my computer) as I had hoped
         UniformDistro = [1/ListSize for i in range(ListSize)]
         Values = multinomial(ListSumValue,UniformDistro,size=1)
         OutputValue = Values[0]
     elif Distribution.lower() == 'normal':
         """
         Normal Distribution Construction....It's very flexible and hideous
         Assume a +-3 sigma range.  Warning, this may or may not be a suitable range for your implementation!
         If one wishes to explore a different range, then changes the LowSigma and HighSigma values
         """
         LowSigma    = -3#-3 sigma
         HighSigma   = 3#+3 sigma
         if (float(ListSize) - 1) == 0:
             StepSize    = 0
         else:
             StepSize    = 1/(float(ListSize) - 1)
         ZValues     = [(LowSigma * (1-i*StepSize) +(i*StepSize)*HighSigma) for i in range(int(ListSize))]
         #Construction parameters for N(Mean,Variance) - Default is N(0,1)
         Mean        = 0
         Var         = 1
         #NormalDistro= [self.NormalDistributionFunction(Mean, Var, x) for x in ZValues]
         NormalDistro= list()
         for i in range(len(ZValues)):
             if i==0:
                 ERFCVAL = 0.5 * math.erfc(-ZValues[i]/math.sqrt(2))
                 NormalDistro.append(ERFCVAL)
             elif i ==  len(ZValues) - 1:
                 ERFCVAL = NormalDistro[0]
                 NormalDistro.append(ERFCVAL)
             else:
                 ERFCVAL1 = 0.5 * math.erfc(-ZValues[i]/math.sqrt(2))
                 ERFCVAL2 = 0.5 * math.erfc(-ZValues[i-1]/math.sqrt(2))
                 ERFCVAL = ERFCVAL1 - ERFCVAL2
                 NormalDistro.append(ERFCVAL)  
         #print "Normal Distribution sum = %f"%sum(NormalDistro)
         Values = multinomial(ListSumValue,NormalDistro,size=1)
         OutputValue = Values[0]
     else:
         raise ValueError ('Cannot create desired vector')
     return OutputValue
 def de_marsily_no_reaction(self,x,t):
   # Based on Equation 10.3.2 in Quantitative Hydrogeology, Ghislain de
   # Marsily, 1986.
   D_ = self.D
   U_over_porR = self.U/(self.porosity*self.R)
   two_sqrt_Dt_over_porR = 2.*math.sqrt(D_*t/(self.porosity*self.R))
   temp = 0.5* \
          (math.erfc((x-t*U_over_porR)/two_sqrt_Dt_over_porR) + \
           math.exp(self.U*x/D_) * \
           math.erfc((x+t*U_over_porR)/two_sqrt_Dt_over_porR))
   value = temp*(self.c1-self.c0) + self.c0
   return value
 def ogata_banks(self,x,t):
   # Based on "Solution of the Differential Equation of Longitudinal
   # Dispersion in Porous Media", USGS Professional Paper 411-A by
   # Akio Ogata and R.B. Banks, 1961.
   D_ = self.D/self.porosity
   v = self.U/self.porosity
   temp = 0.5* \
          (math.erfc((x-v/self.R*t)/(2.*math.sqrt(D_/self.R*t))) + \
           math.exp(v*x/D_) * \
           math.erfc((x+v/self.R*t)/(2.*math.sqrt(D_/self.R*t))))
   value = temp*(self.c1-self.c0) + self.c0
   return value
Пример #8
0
def getnormcdf(x,lowertail=True):
  """
  Get the normal CDF function. used to calculate p-value
  """
  # ax=math.fabs(x)
  #axv=math.erfc(x/(2**0.5))/2; # higher tail
  if lowertail==False:
    #return axv
    return math.erfc(x/(2**0.5))/2
  else:
    #return 1-axv
    return math.erfc(-x/(2**0.5))/2
Пример #9
0
def blackscholes(stockprice, strike, riskfree, time, volatility):
	d1 = (log(stockprice/strike)+(riskfree+.5*volatility**2)*time)/(volatility*sqrt(time))
	d2 = d1 - volatility*sqrt(time)

	callprice = stockprice*.5*erfc(-d1/sqrt(2))-strike*exp(-riskfree*time)*.5*erfc(-d2/sqrt(2))

	putprice = strike*exp(-riskfree*time)*.5*erfc(d2/sqrt(2))-stockprice*.5*erfc(d1/sqrt(2))

	calldelta = .5*erfc(-d1/sqrt(2))

	putdelta = calldelta - 1

	return [callprice, putprice, calldelta, putdelta]
Пример #10
0
def calculate_results(representedPalindromes, kmer_counts, 
	    	  totalCharCount, hypothesesCount):
    """
    INPUT:
	representedPalindromes - A collections.Counter() 
		object
	kmer_counts - A collections.Counter() object. 
	totalCharCount - An integer 
	hypothesesCount - An integer
    OUTPUT:
	tupleList - A list of tuples, each tuple
        contains the palindrome name, observed count,
        expected count, z-score and E-value. 

    Takes in a collections.Counter representedPalindromes,
    a collections.Counter kmer_counts,
    an int totalCharCount, and an int hypothesesCount.  For 
    each palindrome in representedPalindromes the expected
    value, z-Score, and e-value is calculated. These three
    values are coupled with the palindrome name and observed
    count, to create a tuple with five elements.  These 
    tuples are loaded into a list, which is returned after 
    all palindromes in representedPalindromes has been 
    iterated over. 
    """
    tupleList = []
    # The list of tuples. Each element in this list is a 
    # tuple, with five elements. The tuple elements
    # are palindrome name, observed count, expected count,
    # z-score and E-value. 
    for key, value in representedPalindromes.items():
        if (len(key)%2) == 1:
            keyList = list(key)
            if(keyList[len(key)/2]) in "ATCG": continue
	    # Throw out odd palindromes with middle characters
	    # ATCG, they are not desired.  
        expectedValue = expected_value(key,kmer_counts)
        standardDeviation = (math.sqrt(expectedValue *
		 (1 - (expectedValue/ totalCharCount))))
        zScore = ((kmer_counts[key] - expectedValue)
		/standardDeviation)
        if zScore <= 0:
           e_Value = ((math.erfc(-zScore/math.sqrt(2))/2)*
		2*hypothesesCount)
        else:
           e_Value = ((math.erfc(zScore/math.sqrt(2))/2)
		*2*hypothesesCount)
        tupleList.append((key, str(value), str(expectedValue),
		 zScore, e_Value))
    return tupleList
Пример #11
0
def compute_kullback_leibler_check_statistic(n=100, prngstate=None):
    """Compute the lowest of the survival function and the CDF of the exact KL
    divergence KL(N(mu1,s1)||N(mu2,s2)) w.r.t. the sample distribution of the
    KL divergence drawn by computing log(P(x|N(mu1,s1)))-log(P(x|N(mu2,s2)))
    over a sample x~N(mu1,s1). If we are computing the KL divergence
    accurately, the exact value should fall squarely in the sample, and the
    tail probabilities should be relatively large.

    """
    if prngstate is None:
        raise TypeError('Must explicitly specify numpy.random.RandomState')
    mu1 = mu2 = 0
    s1 = 1
    s2 = 2
    exact = gaussian_kl_divergence(mu1, s1, mu2, s2)
    sample = prngstate.normal(mu1, s1, n)
    lpdf1 = gaussian_log_pdf(mu1, s1)
    lpdf2 = gaussian_log_pdf(mu2, s2)
    estimate, std = kl.kullback_leibler(sample, lpdf1, lpdf2)
    # This computes the minimum of the left and right tail probabilities of the
    # exact KL divergence vs a gaussian fit to the sample estimate. There is a
    # distinct negative skew to the samples used to compute `estimate`, so this
    # statistic is not uniform. Nonetheless, we do not expect it to get too
    # small.
    return erfc(abs(exact - estimate) / std) / 2
Пример #12
0
 def F8_f(self):
     self.F8 = 0.5 * math.erfc(-self.z_score/math.sqrt(2))
     if self.F8 == 0:
         self.F8 = 10^-16
     else:
         self.F8 = self.F8
     return self.F8
Пример #13
0
  def Ea(self,n_1,r):
      '''
      This function computes Easiness of task dependent on Reaction Time and Accuracy
      Arguments:
      n_1 - One of the numerical numbers in the task
      r - Ratio of numbers in the task

      In order to compute the P_Error an erfc formula consisting of the two numbers and the weber fraction is used
      '''
      m = (self.m)
      intercept = self.i
      w = self.w

      dist = int(round(n_1 / r)) - n_1
      rt = (dist * m) + intercept # Computing Reaction_Time from the values obtained from the RT graph
      n_2 = int(round(n_1/r))
      numer = abs(n_1-n_2)
      denom = math.sqrt(2)*w*(((n_1**2)+(n_2**2))**0.5)
      P_Err = 0.5*math.erfc(numer/denom)
      P_A = 1 - P_Err
      #print(P_A)
      array_poss = np.random.choice([0,1],size=(10),p=[1-P_A, P_A]) # Generating bin values array (consisting of 0's and 1's)using P_Acc probability
      val = np.random.choice(array_poss) # Randomly choosing samples from array_poss
      rt = np.random.normal(rt,intercept) # Normal Distribution sampling of RT vals
      if rt<500:
          rt = 500 # re-evaluate RT's < 500
          rt = np.random.normal(rt,130)
      E = val - (rt/2000.) # Computing discrete Easiness value of a question

      return E, val, rt
Пример #14
0
    def _calc_real_and_point(self):
        """
        Determines the self energy -(eta/pi)**(1/2) * sum_{i=1}^{N} q_i**2
        
        If cell is charged a compensating background is added (i.e. a G=0 term)
        """
        all_nn = self._s.get_all_neighbors(self._rmax, True)

        forcepf = 2.0 * self._sqrt_eta / sqrt(pi)
        coords = self._coords
        numsites = self._s.num_sites
        ereal = np.zeros((numsites, numsites))
        epoint = np.zeros((numsites))
        forces = np.zeros((numsites, 3))
        for i in xrange(numsites):
            nn = all_nn[i] #self._s.get_neighbors(site, self._rmax)
            qi = self._oxi_states[i]
            epoint[i] = qi * qi
            epoint[i] *= -1.0 * sqrt(self._eta / pi)
            epoint[i] += qi * pi / (2.0 * self._vol * self._eta)  #add jellium term
            for j in range(len(nn)):  #for (nsite, rij)  in nn:
                nsite = nn[j][0]
                rij = nn[j][1]
                qj = compute_average_oxidation_state(nsite)
                erfcval = erfc(self._sqrt_eta * rij)
                ereal[nn[j][2], i] += erfcval * qi * qj / rij
                fijpf = qj / pow(rij, 3) * (erfcval + forcepf * rij * exp(-self._eta * pow(rij, 2)))
                forces[i] += fijpf * (coords[i] - nsite.coords) * qi * EwaldSummation.CONV_FACT

        ereal = ereal * 0.5 * EwaldSummation.CONV_FACT
        epoint = epoint * EwaldSummation.CONV_FACT
        return (ereal, epoint, forces)
Пример #15
0
def graph_FWHM_data_range(start_date=datetime.datetime(2015,3,6),
                          end_date=datetime.datetime(2015,4,15),tenmin=True,
                          path='/home/douglas/Dropbox (Thacher)/Observatory/Seeing/Data/',
                          write=True,outpath='./'):
    
    
    plot_params()
    fwhm = get_FWHM_data_range(start_date = start_date, end_date=end_date, path=path, tenmin=tenmin)

    # Basic stats
    med = np.median(fwhm)
    mean = np.mean(fwhm)
    fwhm_clip, low, high = sigmaclip(fwhm,low=3,high=3)
    meanclip = np.mean(fwhm_clip)

    # Get mode using kernel density estimation (KDE)
    vals = np.linspace(0,30,1000)
    fkde = gaussian_kde(fwhm)
    fpdf = fkde(vals)
    mode = vals[np.argmax(fpdf)]
    std = np.std(fwhm)


    plt.ion()
    plt.figure(99)
    plt.clf()
    plt.hist(fwhm, color='darkgoldenrod',bins=35)
    plt.xlabel('FWHM (arcsec)',fontsize=16)
    plt.ylabel('Frequency',fontsize=16)
    plt.annotate('mode $=$ %.2f" ' % mode, [0.87,0.85],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')
    plt.annotate('median $=$ %.2f" ' % med, [0.87,0.8],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')
    plt.annotate('mean $=$ %.2f" ' % mean, [0.87,0.75],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')

    xvals = np.linspace(0,30,1000)
    kde = gaussian_kde(fwhm)
    pdf = kde(xvals)
    dist_c = np.cumsum(pdf)/np.sum(pdf)
    func = interp1d(dist_c,vals,kind='linear')
    lo = np.float(func(math.erfc(1./np.sqrt(2))))
    hi = np.float(func(math.erf(1./np.sqrt(2))))

    disthi = np.linspace(.684,.999,100)
    distlo = disthi-0.6827
    disthis = func(disthi)
    distlos = func(distlo)

    interval = np.min(disthis-distlos)

    plt.annotate('1 $\sigma$ int. $=$ %.2f" ' % interval, [0.87,0.70],horizontalalignment='right',
                 xycoords='figure fraction',fontsize='large')
    
    
    plt.rcdefaults()

    plt.savefig(outpath+'Seeing_Cumulative.png',dpi=300)

    return
 def bear(self,x,t):
   # Based on Equation 10.6.22 in "Dynamics of Fluids in Porous Media", 
   # Jacob Bear, 1988.
   D_ = self.D/self.porosity
   v = self.U/self.porosity
   beta = math.sqrt(v*v/(4.*D_*D_)+ self.lam/D_)
   temp = 0.5 * \
          math.exp(v*x/(2.*D_)) * \
          (math.exp(-1.*beta*x) * \
           math.erfc((x-math.sqrt(v*v+4.*self.lam*D_)*t) / \
                     (2.*math.sqrt(D_*t))) + \
           math.exp(beta*x) * \
           math.erfc((x+math.sqrt(v*v+4.*self.lam*D_)*t) / \
                     (2.*math.sqrt(D_*t))))
   value = temp*(self.c1-self.c0) + self.c0
   return value
Пример #17
0
def monobit(bits):
	ceros=	bits.count("0")
	unos =  bits.count("1")
	frequencia = len(bits)
	SN = ceros-unos
	test = abs(SN)/sqrt(frequencia)
	return erfc(test/sqrt(2))
Пример #18
0
def rspace_sum(rs, qs, basis, kappa, rlim):
    """
    Real space part of the sum
    Parameters:
       rs -- list of particle positions
       qs -- list of particle charges
       basis -- real space basis
       kappa -- splitting parameter
       rlim -- size of lattice (one side of a cube of points)
    """
    rspace_sum = 0.0
    for i in range(len(rs)):
        for j in range(len(rs)):
            q = qs[i]*qs[j]
            r = rs[j] - rs[i]
            for n1 in range(-rlim+1, rlim):
                for n2 in range(-rlim+1, rlim):
                    for n3 in range(-rlim+1, rlim):
                        if i == j and n1 == 0 and n2 == 0 and n3 == 0:
                            continue
                        lat = n1*basis[0] + n2*basis[1] + n3*basis[2]
                        d = r + lat
                        rd = math.sqrt(d.dot(d))
                        rspace_sum += q * math.erfc(kappa*rd)/rd
    return rspace_sum
Пример #19
0
def distparams(dist):
    """
    Description:
    ------------
    Return robust statistics of a distribution of data values

    Example:
    --------
    med,mode,interval,lo,hi = distparams(dist)
    """

    from scipy.stats.kde import gaussian_kde
    from scipy.interpolate import interp1d
    vals = np.linspace(np.min(dist)*0.5,np.max(dist)*1.5,1000)
    kde = gaussian_kde(dist)
    pdf = kde(vals)
    dist_c = np.cumsum(pdf)/np.sum(pdf)
    func = interp1d(dist_c,vals,kind='linear')
    lo = np.float(func(math.erfc(1./np.sqrt(2))))
    hi = np.float(func(math.erf(1./np.sqrt(2))))
    med = np.float(func(0.5))
    mode = vals[np.argmax(pdf)]

    disthi = np.linspace(.684,.999,100)
    distlo = disthi-0.6827
    disthis = func(disthi)
    distlos = func(distlo)
    
    interval = np.min(disthis-distlos)

    return med,mode,interval,lo,hi
Пример #20
0
def TraditionalEwald(L,D,rs,nMax):
    alpha = 7./(L)

    VShort = lambda r,a: math.erfc(a*r)/r
    fVLong = lambda k2,a: 4*pi*math.exp(-k2/(4*a*a))/(k2*(L**D))

    # Short-ranged r-space
    Vss = []
    for r in rs:
      Vs = VShort(r,alpha)
      Vss.append(Vs)

    # Long-ranged k-space
    ns = Genns(nMax,D)
    ks = []
    fVls = []
    for n in ns:
      magn2 = dot(n,n)
      k2 = 4.*pi*pi*magn2/(L*L)
      ks.append(math.sqrt(k2))
      if magn2 != 0:
        fVl = fVLong(k2,alpha)
        fVls.append(fVl)
      else:
        fVls.append(0.)

    ks, fVls = GetUnique(ks,fVls)
    for (k,fVl) in sorted(zip(ks,fVls)):
      print k, fVl

    return [Vss,fVls]
Пример #21
0
def monobit (bits, p_value=0.01):
    """Monobit Test
    returns a tuple representing (metric, pass)"""
    S = bits.count(1) - bits.count(0) 
    S_obs = abs(S) / math.sqrt(len(bits))
    P = math.erfc(S_obs / math.sqrt(2))
    return P, P >= p_value
Пример #22
0
def phi(sigma):
    # sigma is your standard deviation
    # sigma = 3.0
    # 4.891639 sigma is (basically) 1 in a million
    sampleSize = (1./ (math.erfc(sigma/sqrt(2))))
    # "80 billion 'events' in an day at Intel..."
    msg = '1 in a', sampleSize, 'or ', 80e9/sampleSize, ' for an IntelDay'
    return (print(msg))
def levyCDF(x, mu = 0, sigma = 0.5):
    
    F = []
    for num in range(0, len(x)):
        temp = math.sqrt(sigma / (2*(x[num] - mu)))
        F.append(math.erfc(temp))
        
    return F
Пример #24
0
def runs_test2 (bits, p_value=0.01):
    """Custom Runs Test (pi = 0.5)
    returns a tuple representing (metric, pass)"""
    n = len(bits)
    pi = .5
    vobs = (bits[0:n-1] ^ bits[1:n]).count(1) + 1
    pval = math.erfc(abs(vobs-2*n*pi*(1-pi)) / (2 * math.sqrt(2*n) * pi * (1 - pi)))
    return pval, pval >= p_value
Пример #25
0
def frequency_test(bsq):
  bsqc = [ 2*x -1 for x in bsq ]
  n = len(bsq)
  s_obs = abs(sum(bsqc)) / sqrt(float(n))
  p_value = erfc(s_obs/sqrt(2))
  reason = "There is a disproportion between the number of 0 and 1:\n"
  reason += "#0: {} #1: {}".format(bsq.count(0), bsq.count(1))
  return p_value, reason
Пример #26
0
 def lognorm_cdf(x, mean, var):
     """ cumulative distribution function of a log-normal distribution
     parameterized by its `mean` and variance `var` """
     mean2 = mean**2
     return 0.5 * math.erfc(
         np.log(mean2 / (x*np.sqrt(mean2 + var))) /
         (np.sqrt(2 * np.log(1 + var / mean2)))
     )
Пример #27
0
def Q(nu, lambda_, x, epsilon=1e-5):
    '''
This function is translated from a Matlab function originally written by S.M. Kay, see [1].
Q computes the right-tail probability of a central or noncentral chi-squared PDF.

Input Parameters:

    nu      = Degrees of Freedom (DoF)
    lambda_ = Noncentrality parameter (0 for central chi-square)
    x       = Random variable
    epsilon = maximum allowable error (should be a small number such as 1e-5) due to the truncation of the infinite sum
    
[1] S. Kay, Fundamental of statistical signal processing, Vol. 2: detection theory, Prentice-Hall, Upper Saddle River, NJ, 1998.    
    '''
    from math import erfc, exp, factorial, pi, sqrt
    normRT = lambda x: 0.5*erfc(x/sqrt(2))
    t = exp(lambda_ / 2.0) * (1 - epsilon)
    sum_ = 1
    M = 0
    while sum_ < t:
        M += 1
        sum_ += ((lambda_ / 2.0)**M) / factorial(M)
        
    if nu / 2 - nu // 2: # nu is odd
        P = 2 * normRT(sqrt(x))
        g = Q2p = sqrt(2*x/pi) * exp(-x/2)
        if nu > 1:
            for m in range(5, nu+1, 2):
                g *= x / (m-2)
                Q2p += g
            P += exp(-lambda_ / 2) * Q2p
            for k in range(1, M+1):
                m = nu + 2 * k
                g *= x / (m - 2)
                Q2p += g
                arg = (exp(-lambda_/2) * (lambda_/2)**k) / factorial(k)
                P += arg * Q2p
        else: # nu == 1
            P += exp(-lambda_ / 2) * (lambda_ / 2) * Q2p
            for k in range(2, M+1):
                m = nu + 2*k
                g *= x / (m-2)
                Q2p += g
                arg = (exp(-lambda_ / 2) * (lambda_/2)**k) / factorial(k)
                P += arg * Q2p
    else: # nu is even
        g = Q2 = exp(-x/2)
        for m in range(4, nu+1, 2):
            g *= x / (m-2)
            Q2 += g
        P = exp(-lambda_/2) * Q2
        for k in range(1, M+1):
            m = nu + 2*k
            g *= x / (m-2)
            Q2 += g
            arg = (exp(-lambda_/2) * (lambda_/2)**k) / factorial(k)
            P += arg * Q2
    return P
Пример #28
0
def lognorm_cdf(x, mu, sigma):
    "check wikipedia"
    #print 'x:', x
    if x == 0:
        return 0

    top = -(math.log(x)-mu)
    down = sigma*math.sqrt(2)
    return math.erfc(top/down)/2
Пример #29
0
def logncdf(x, mu, sigma):  # 対数正規累積確率分布関数
    if x < 0:
        cdf = 0.0
    elif sp.isposinf(x):
        cdf = 1.0
    else:
        z = (sp.log(x) - mu) / float(sigma)
        cdf = 0.5 * (math.erfc(-z / sp.sqrt(2)))
    return cdf
def analytical_tr(T0, T1, kappa, t, z):
  z = np.array(z)  # to make sure it's an array

  T = np.zeros(z.size)

  for iz in range(z.size):
    T[iz] = math.erfc(z[iz] / (2.0 * (kappa * t)**0.5)) * (T0 - T1) + T1

  return T
def _CrosshairShotResults_getShotResult(base,
                                        cls,
                                        hitPoint,
                                        collision,
                                        direction,
                                        excludeTeam=0,
                                        piercingMultiplier=1):
    if config.get('sight/enabled', True) and battle.isBattleTypeSupported:
        global piercingActual, armorActual, shotResult, hitAngle, normHitAngle, piercingChance
        old_piercingActual = piercingActual
        old_armorActual = armorActual
        old_hitAngle = hitAngle
        piercingActual = None
        armorActual = None
        piercingChance = None
        hitAngle = None
        normHitAngle = None
        shotResult = PIERCING_CHANCE_KEY[_SHOT_RESULT.UNDEFINED]
        if collision is None:
            if (old_armorActual != armorActual) or (
                    old_piercingActual != piercingActual) or (old_hitAngle !=
                                                              hitAngle):
                as_event('ON_CALC_ARMOR')
            return _SHOT_RESULT.UNDEFINED
        entity = collision.entity
        if entity.__class__.__name__ not in ('Vehicle', 'DestructibleEntity'):
            if (old_armorActual != armorActual) or (
                    old_piercingActual != piercingActual) or (old_hitAngle !=
                                                              hitAngle):
                as_event('ON_CALC_ARMOR')
            return _SHOT_RESULT.UNDEFINED
        if entity.health <= 0 or entity.publicInfo['team'] == excludeTeam:
            if (old_armorActual != armorActual) or (
                    old_piercingActual != piercingActual) or (old_hitAngle !=
                                                              hitAngle):
                as_event('ON_CALC_ARMOR')
            return _SHOT_RESULT.UNDEFINED
        player = BigWorld.player()
        if player is None:
            if (old_armorActual != armorActual) or (
                    old_piercingActual != piercingActual) or (old_hitAngle !=
                                                              hitAngle):
                as_event('ON_CALC_ARMOR')
            return _SHOT_RESULT.UNDEFINED
        vDesc = player.getVehicleDescriptor()
        shell = vDesc.shot.shell
        caliber = shell.caliber
        shellKind = shell.kind
        ppDesc = vDesc.shot.piercingPower
        maxDist = vDesc.shot.maxDistance
        dist = (hitPoint - player.getOwnVehiclePosition()).length
        piercingPower = cls._computePiercingPowerAtDist(
            ppDesc, dist, maxDist, piercingMultiplier)
        fullPiercingPower = piercingPower
        minPP, maxPP = cls._computePiercingPowerRandomization(shell)
        result = _SHOT_RESULT.NOT_PIERCED
        isJet = False
        jetStartDist = None
        ignoredMaterials = set()
        collisionsDetails = cls._getAllCollisionDetails(
            hitPoint, direction, entity)
        if collisionsDetails is None:
            if (old_armorActual != armorActual) or (
                    old_piercingActual != piercingActual) or (old_hitAngle !=
                                                              hitAngle):
                as_event('ON_CALC_ARMOR')
            return _SHOT_RESULT.UNDEFINED
        for cDetails in collisionsDetails:
            if isJet:
                jetDist = cDetails.dist - jetStartDist
                if jetDist > 0.0:
                    piercingPower *= 1.0 - jetDist * cls._SHELL_EXTRA_DATA[
                        shellKind].jetLossPPByDist
            if cDetails.matInfo is None:
                result = cls._CRIT_ONLY_SHOT_RESULT
            else:
                matInfo = cDetails.matInfo
                if (cDetails.compName, matInfo.kind) in ignoredMaterials:
                    continue
                hitAngleCos = cDetails.hitAngleCos if matInfo.useHitAngle else 1.0
                hitAngle = hitAngleCos
                normHitAngle = hitAngle
                if not isJet and cls._shouldRicochet(shellKind, hitAngleCos,
                                                     matInfo, caliber):
                    normHitAngle = -1.0
                    break
                piercingPercent = 1000.0
                if piercingPower > 0.0:
                    penetrationArmor = cls._computePenetrationArmor(
                        shellKind, hitAngleCos, matInfo, caliber)
                    normHitAngle = matInfo.armor / penetrationArmor if penetrationArmor != 0.0 else hitAngle
                    piercingPercent = 100.0 + (penetrationArmor - piercingPower
                                               ) / fullPiercingPower * 100.0
                    piercingActual = int(piercingPower)
                    armorActual = int(penetrationArmor)
                    # piercingChance = max(0, min(1.0, (piercingPower / penetrationArmor - 0.75) * 2)) if penetrationArmor > 0.0 else 1.0
                    armorRatio = penetrationArmor / piercingPower - 1.0
                    # piercingChance = 1.0 if armorRatio < -0.25 else 0.5 * math.erfc(8.485281374238576 * armorRatio) if armorRatio <= 0.25 else 0.0
                    if armorRatio < -0.25:
                        piercingChance = 1.0
                    elif armorRatio > 0.25:
                        piercingChance = 0.0
                    else:
                        piercingChance = 0.5 * math.erfc(
                            8.485281374238576 * armorRatio)
                    piercingPower -= penetrationArmor
                    # log('penetrationArmor = %s     piercingPercent = %s    piercingPower = %s' % (penetrationArmor, piercingPercent, piercingPower))
                if matInfo.vehicleDamageFactor:
                    if minPP < piercingPercent < maxPP:
                        result = _SHOT_RESULT.LITTLE_PIERCED
                    elif piercingPercent <= minPP:
                        result = _SHOT_RESULT.GREAT_PIERCED
                    break
                elif matInfo.extra:
                    if piercingPercent <= maxPP:
                        result = cls._CRIT_ONLY_SHOT_RESULT
                if matInfo.collideOnceOnly:
                    ignoredMaterials.add((cDetails.compName, matInfo.kind))
            if piercingPower <= 0.0:
                break
            if cls._SHELL_EXTRA_DATA[shellKind].jetLossPPByDist > 0.0:
                isJet = True
                mInfo = cDetails.matInfo
                armor = mInfo.armor if mInfo is not None else 0.0
                jetStartDist = cDetails.dist + armor * 0.001
        shotResult = PIERCING_CHANCE_KEY[result]
        if (old_armorActual !=
                armorActual) or (old_piercingActual !=
                                 piercingActual) or (old_hitAngle != hitAngle):
            as_event('ON_CALC_ARMOR')
        return result
    else:
        return base(hitPoint, collision, direction, excludeTeam)
Пример #32
0
import numpy as np
from astropy.io import fits
from scipy.stats.distributions import chi2
from astropy.cosmology import FlatLambdaCDM
import math

cosmo = FlatLambdaCDM(H0=70,Om0=.3)

filename = 'output_cats/mass_select.fits'

with fits.open(filename) as hdu:
    data = hdu[1].data

    mask = data['stellar_mass_50'] > 10
    data = data[mask]

    mask = data['redshift_50'] > 2
    data = data[mask]

    mask = (chi2.sf(data['chisq_phot'],(data['n_bands']-10))) > (0.5*math.erfc(5*(2**-0.5)))
    data = data[mask]

    age = cosmo.age(data['redshift_50']).value*(10**9)
    factor = 0.2/age
    mask = 10**data['sSFR_50'] < factor
    data = data[mask]

    print(data.shape)
    newhdu = fits.BinTableHDU(data=data)
    newhdu.writeto('output_cats/sample.fits')
Пример #33
0
def kendalltau(x, y, use_ties=True, use_missing=True, method='auto'):
    assert x.size == y.size, "Both variables should have the same number of observations."
    n = x.size
    if n < 2:
        return np.nan, np.nan

    rx = rankdata(x, use_missing)
    ry = rankdata(y, use_missing)
    valid = (rx > 0) * (ry > 0)
    rx = rx[valid]
    ry = ry[valid]
    n = np.sum(valid)
    idx = rx.argsort()
    (rx, ry) = (rx[idx], ry[idx])

    C, D = 0, 0
    for i in range(len(ry)-1):
        C += ((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).sum()
        D += ((ry[i+1:] < ry[i]) * (rx[i+1:] > rx[i])).sum()

    xties, corr_x = count_tied_groups(x)
    yties, corr_y = count_tied_groups(y)
    if use_ties:
        denom = np.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
    else:
        denom = n*(n-1)/2.

    if denom == 0.:
        return np.nan, np.nan

    tau = (C-D) / denom

    if method == 'exact' and (len(xties) > 0 or len(yties) > 0):
        raise ValueError("Ties found, exact method cannot be used.")

    if method == 'auto':
        if (len(xties) == 0 and len(yties) == 0) and (n <= 33 or min(C, n*(n-1)/2.0-C) <= 1):
            method = 'exact'
        else:
            method = 'asymptotic'

    if len(xties) == 0 and len(yties) == 0 and method == 'exact':
        # Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970.
        c = int(min(C, (n*(n-1))/2-C))
        if n <= 0:
            raise ValueError
        elif c < 0 or 2*c > n*(n-1):
            raise ValueError
        elif n == 1:
            prob = 1.0
        elif n == 2:
            prob = 1.0
        elif c == 0:
            prob = 2.0/factorial(n)
        elif c == 1:
            prob = 2.0/factorial(n-1)
        else:
            old = [0.0]*(c+1)
            new = [0.0]*(c+1)
            new[0] = 1.0
            new[1] = 1.0
            for j in range(3,n+1):
                old = new[:]
                for k in range(1,min(j,c+1)):
                    new[k] += new[k-1]
                for k in range(j,c+1):
                    new[k] += new[k-1] - old[k-j]
            prob = 2.0*np.sum(np.asarray(new))/factorial(n)
    elif method == 'asymptotic':
        var_s = n*(n-1)*(2*n+5)
        if use_ties:
            v1x, v1y, v2x, v2y = 0, 0, 0, 0
            for k, v in xties.items():
                var_s -= v*k*(k-1)*(2*k+5)*1.
                v1x += v*k*(k-1)
                if n > 2:
                    v2x += v*k*(k-1)*(k-2)
            for k, v in yties.items():
                var_s -= v*k*(k-1)*(2*k+5)*1.
                v1y += v*k*(k-1)
                if n > 2:
                    v2y += v*k*(k-1)*(k-2)
            v1 = v1x * v1y
            v2 = v2x * v2y
            v1 /= 2.*n*(n-1)
            if n > 2:
                v2 /= 9.*n*(n-1)*(n-2)
            else:
                v2 = 0
        else:
            v1 = v2 = 0

        var_s /= 18.
        var_s += (v1 + v2)
        z = (C-D)/np.sqrt(var_s)
        prob = erfc(abs(z)/np.sqrt(2))
    return tau, prob
Пример #34
0
 def normal_cdf(x):
   return .5 * math.erfc(-x / math.sqrt(2))
Пример #35
0
 def erfc(self):
     import math
     return AdvFloat(math.erfc(self))
Пример #36
0
def pidistf(x):
    y = 0.9
    e = 4 + erfc(x / 5.3)
    s = y * e
    return s
Пример #37
0
def ber_mqam(EbN0, M):
    k = math.log(M, 2)
    return 2 / k * (1 - 1 / math.sqrt(M)) * math.erfc(
        math.sqrt(3 * EbN0 * k / (2 * (M - 1))))
def madelung(screening_length=inf, g_ewald=7.1, kmax=12, approx=False):
    """Madelung constant for 1x1x1 Yukawa lattice with neutralizing background.

    Keyword arguments:
    screening_length -- the screening length (default inf)
    g_ewald -- Ewald splitting parameter (default 7.1)
    kmax -- range of reciprocal space sum (default 12)
    approx -- whether to use closed-form approximation (default False)

    See eqn (2.19) of Salin and Caillol, J. Chem. Phys. 113, 10459 (2000).
    (alpha = 1/screening_length, beta = g_ewald)
    """

    if isinf(screening_length):
        return -2.837297479
    if screening_length == 0:
        return 0

    a = 1.0 / screening_length  # screening wavenumber
    a2 = a * a

    # closed-form approximation
    if approx:
        r = 0.911544
        r2 = r * r
        return (4 * pi / a2 *
                (a * r + 1 + a2 * r2 / 3) - 1 / r) * exp(-a * r) - 4 * pi / a2

    b = 2 * g_ewald
    b2 = b * b
    kmax2 = kmax * kmax
    pi2 = pi * pi

    s = 0  # running total

    # Sum over the (+,+,+) octant, then multiply the result by 8.
    for k in list(product(range(kmax + 1), repeat=3))[1:]:
        k2 = k[0] * k[0] + k[1] * k[1] + k[2] * k[2]
        if k2 <= kmax2:
            q2 = 4 * pi2 * k2  # squared wavenumber
            t = exp(-(q2 + a2) / b2) / (q2 + a2)
            for i in range(3):
                # Correct for overcounting when k is on an axial plane.
                if k[i] == 0:
                    t /= 2
            s += t
    # This is where we multiply by 8.
    # (We also need a 4*pi because we are using Gaussian units.)
    s *= 32 * pi

    # See last line of eqn (2.19) of Salin and Caillol.
    # (Note the sign error in the final term.)
    s -= b * exp(-a2 / b2) / sqrt(pi)
    s += a * erfc(a / b)
    if a > 0.001:
        s += 4 * pi * (exp(-a2 / b2) - 1) / a2
    else:  # If a is small, use expansion about a = 0.
        s += -4 * pi / b2 + 2 * pi * a2 / (b2 * b2) - 2 * pi * a2 * a2 / (
            3 * b2 * b2 * b2)

    return s
Пример #39
0
def main():
    a = 5.873
    b = 4
    c = -2.7

    # number-theoretic and representation functions
    # https://docs.python.org/2/library/math.html#number-theoretic-and-representation-functions
    print("\nNumber-theoretic and representation functions")

    y = math.ceil(a)
    print("math.ceil({}) = {}".format(a, y))

    y = math.floor(a)
    print("math.floor({}) = {}".format(a, y))

    y = math.copysign(a, c)
    print("math.copysign({}, {}) = {}".format(a, c, y))

    y = math.fabs(c)
    print("math.fabs({}) = {}".format(c, y))

    y = math.factorial(b)
    print("math.factorial({}) = {}".format(b, y))

    y = math.fmod(a, b)
    print("math.fmod({}, {}) = {}".format(a, b, y))

    (y, z) = math.frexp(c)
    print("math.frexp({}) = ({}, {})".format(c, y, z))

    y = math.fsum([.1,.2,.3,.4,.5,.6,.7,.8,.9])
    print("math.fsum([.1,.2,.3,.4,.5,.6,.7,.8,.9]) = {}".format(y))

    y = math.isfinite(a)
    print("math.isfinite({}) = {}".format(a, y))

    y = math.isinf(a)
    print("math.isinf({}) = {}".format(a, y))

    y = math.isnan(c)
    print("math.isnan({}) = {}".format(c, y))

    y = math.ldexp(c, b)
    print("math.ldexp({}, {}) = {}".format(c, b, y))

    y = math.modf(a)
    print("math.modf({}) = {}".format(a, y))

    y = math.trunc(a)
    print("math.trunc({}) = {}".format(a, y))

    # Power and logarithmic functions
    print("\nPower and logarithmic functions")

    y = math.exp(b)
    print("math.exp({}) = {}".format(b, y))

    y = math.expm1(b)
    print("math.expm1({}) = {}".format(b, y))

    y = math.log(a)
    print("math.log({}) = {}".format(a, y))

    y = math.log1p(a)
    print("math.log1p({}) = {}".format(a, y))

    y = math.log2(a)
    print("math.log2({}) = {}".format(a, y))

    y = math.log10(a)
    print("math.log10({}) = {}".format(a, y))

    y = math.pow(a, b)
    print("math.pow({}, {}) = {}".format(a, b, y))

    y = math.sqrt(b)
    print("math.sqrt({}) = {}".format(b, y))

    # Trigonometric functions
    print("\nTriginometric functions")

    a = 0.24235
    b = 0.5953

    y = math.acos(a)
    print("math.acos({}) = {}".format(a, y))

    y = math.asin(a)
    print("math.asin({}) = {}".format(a, y))

    y = math.atan(a)
    print("math.atan({}) = {}".format(a, y))

    y = math.atan2(a,b)
    print("math.atan2({},{}) = {}".format(a, b, y))
    
    a = 90
    b = 15

    y = math.sin(a)
    print("math.sin({}) = {}".format(a, y))

    y = math.cos(a)
    print("math.cos({}) = {}".format(a, y))

    y = math.tan(a)
    print("math.tan({}) = {}".format(a, y))

    y = math.hypot(a, b)
    print("math.hypot({}, {}) = {}".format(a, b, y))

    # Angular conversion
    print("\nAngular conversion")

    a = 0.83

    y = math.degrees(a)
    print("math.degrees({}) = {}".format(a, y))

    y = math.radians(b)
    print("math.radians({}) = {}".format(b, y))

    # Hyperbolic functions
    print("\nHyperbolic functions")

    a = 90

    y = math.acosh(b)
    print("math.acosh({}) = {}".format(b, y))

    y = math.asinh(a)
    print("math.asinh({}) = {}".format(a, y))

    y = math.atanh(0.53)
    print("math.atanh({}) = {}".format(0.53, y))

    y = math.cosh(b)
    print("math.cosh({}) = {}".format(b, y))

    y = math.sinh(a)
    print("math.sinh({}) = {}".format(a, y))

    y = math.tanh(b)
    print("math.tanh({}) = {}".format(b, y))

    # Special functions
    print("\nSpecial functions")

    a = 34

    y = math.erf(a)
    print("math.erf({}) = {}".format(a, y))

    y = math.erfc(a)
    print("math.erfc({}) = {}".format(a, y))

    y = math.gamma(a)
    print("math.gamma({}) = {}".format(a, y))

    y = math.lgamma(a)
    print("math.lgamma({}) = {}".format(a, y))
Пример #40
0
math.sin(x) 	# Return the sine of x radians.
math.tan(x) 	# Return the tangent of x radians.
math.degrees(x) 	# Converts angle x from radians to degrees.
math.radians(x) 	# Converts angle x from degrees to radians.

################# Hyperbolic functions #################
math.acosh(x) 	# Return the inverse hyperbolic cosine of x.
math.asinh(x) 	# Return the inverse hyperbolic sine of x.
math.atanh(x) 	# Return the inverse hyperbolic tangent of x.
math.cosh(x) 	# Return the hyperbolic cosine of x.
math.sinh(x) 	# Return the hyperbolic sine of x.
math.tanh(x) 	# Return the hyperbolic tangent of x.

################# Special functions #################
math.erf(x) 	# Return the error function at x.
math.erfc(x) 	# Return the complementary error function at x.
math.gamma(x) 	# Return the Gamma function at x.
math.lgamma(x) 	# Return the natural logarithm of the absolute value of the Gamma function at x.

################# Constants #################
math.pi 	# The mathematical constant π = 3.141592..., to available precision.
math.e 		# The mathematical constant e = 2.718281..., to available precision.




# Example

import math
import time
def main():

    with open(sys.argv[1], 'r') as f:
        sample = f.readline()

    table = {}
    bin_sample = hex2bin(sample)
    n = len(bin_sample)

    # Calculate best values for L and Q based on length_bin_sample

    # [n, L, Q = 2 * 10^L, expectedValue, variance]
    input_size = [
        [387840, 6, 640, 5.2177052, 2.954],
        [904960, 7, 1280, 6.1962507, 3.125],
        [2068480, 8, 2560, 7.1836656, 3.238],
        [4654080, 9, 5120, 8.1764248, 3.311],
        [10342400, 10, 10240, 9.1723243, 3.35],
        [22753280, 11, 20480, 10.170032, 3.384],
        [49643520, 12, 40960, 11.168765, 3.401],
        [107560960, 13, 81920, 12.168070, 3.410],
        [231669760, 14, 163840, 13.167693, 3.416],
        [496435200, 15, 327680, 14.167488, 3.419],
        [1059061760, 16, 655360, 15.167379, 3.421]
    ]

    L = input_size[0][1]
    Q = input_size[0][2]
    expectedValue = input_size[0][3]
    variance = input_size[0][4]
    for i in range(1, len(input_size) - 1):
        if (input_size[i][0] >= n) and (input_size[i + 1][0] < n):
            L = input_size[i][1]
            Q = input_size[i][2]
            expectedValue = input_size[i][3]
            variance = input_size[i][4]
            break
    if n >= input_size[len(input_size) - 1][0]:
        L = input_size[len(input_size) - 1][1]
        Q = input_size[len(input_size) - 1][2]
        expectedValue = input_size[len(input_size) - 1][3]
        variance = input_size[len(input_size) - 1][4]

    initialization_segment = bin_sample[0 : Q * L]
    K = n/L - Q
    test_segment = bin_sample[Q * L : (Q + K) * L]

    backtrack("", L, table)

    for i in range((Q - 1) * L, -1, -L):
        block_id = i/L + 1
        block = str(initialization_segment[i : i + L])
        if table[block] < block_id:
            table[block] = block_id

    sum = 0
    for i in range(0, K * L, L):
        block_id = Q + i/L + 1
        block = str(test_segment[i : i + L])
        diff = block_id - table[block]
        sum += math.log(diff, 2)
        table[block] = block_id

    fn = sum / K

    c = 0.7 - 0.8/L + (4 + 32/L) * math.pow(K, -3/L) / 15
    sigma = c * math.sqrt(variance/K)
    pvalue = math.erfc(math.fabs((fn - expectedValue) / (math.sqrt(2) * sigma)))

    print 'P-value: ' + str(pvalue)
    if (pvalue < 0.1):
        print 'Sequence is non-random'
    else:
        print 'Sequence is random'
Пример #42
0
def FitEMG(x, a, b, c):
    y = (a / 2) * math.exp((a / 2) * (2 * b + a * c**2 - 2 * x)) * math.erfc(
        (b + a * c * +2 - x) / (math.sqrt(2) * c))
    return y
Пример #43
0
def math_erfc(A, B):
    i = cuda.grid(1)
    B[i] = math.erfc(A[i])
Пример #44
0
def neon_math_erfc(self):
    x = self.stack.pop().value
    self.stack.append(Value(decimal.Decimal(math.erfc(float(x)))))
Пример #45
0
def maurers_universal_test(bits, patternlen=None, initblocks=None):
    n = len(bits)

    # Step 1. Choose the block size
    if patternlen != None:
        L = patternlen
    else:
        ns = [904960, 2068480, 4654080, 10342400,
              22753280, 49643520, 107560960,
              231669760, 496435200, 1059061760]
        L = 6
        if n < 387840:
            print("Error. Need at least 387840 bits. Got %d." % n)
            # exit()
            return False, 0.0, None
        for threshold in ns:
            if n >= threshold:
                L += 1

                # Step 2 Split the data into Q and K blocks
    nblocks = int(math.floor(n / L))
    if initblocks != None:
        Q = initblocks
    else:
        Q = 10 * (2 ** L)
    K = nblocks - Q

    # Step 3 Construct Table
    nsymbols = (2 ** L)
    T = [0 for x in range(nsymbols)]  # zero out the table
    for i in range(Q):  # Mark final position of
        pattern = bits[i * L:(i + 1) * L]  # each pattern
        idx = pattern2int(pattern)
        T[idx] = i + 1  # +1 to number indexes 1..(2**L)+1
        # instead of 0..2**L
    # Step 4 Iterate
    sum = 0.0
    for i in range(Q, nblocks):
        pattern = bits[i * L:(i + 1) * L]
        j = pattern2int(pattern)
        dist = i + 1 - T[j]
        T[j] = i + 1
        sum = sum + math.log(dist, 2)
    print("  sum =", sum)

    # Step 5 Compute the test statistic
    fn = sum / K
    print("  fn =", fn)

    # Step 6 Compute the P Value
    # Tables from https://static.aminer.org/pdf/PDF/000/120/333/
    # a_universal_statistical_test_for_random_bit_generators.pdf
    ev_table = [0, 0.73264948, 1.5374383, 2.40160681, 3.31122472,
                4.25342659, 5.2177052, 6.1962507, 7.1836656,
                8.1764248, 9.1723243, 10.170032, 11.168765,
                12.168070, 13.167693, 14.167488, 15.167379]
    var_table = [0, 0.690, 1.338, 1.901, 2.358, 2.705, 2.954, 3.125,
                 3.238, 3.311, 3.356, 3.384, 3.401, 3.410, 3.416,
                 3.419, 3.421]

    # sigma = math.sqrt(var_table[L])
    mag = abs((fn - ev_table[L]) / (
                (0.7 - 0.8 / L + (4 + 32 / L) * (pow(K, -3 / L)) / 15) * (math.sqrt(var_table[L] / K)) * math.sqrt(2)))
    P = math.erfc(mag)

    success = (P >= 0.01)
    return (success, P, None)
Пример #46
0
def normal_cdf(x, mu, sig):
    return erfc((mu - x) / sig / SQRT2) / 2.0
Пример #47
0
alc_indices = range(2)

excls = {0: (), 1: ()}

atmtypes = [('DUM_HC', 'DUM_HC'), ('DUM_OW_spc', 'DUM_OW_spc')]

charges = {0: (0.1, 0.0), 1: (-0.1, 0.0)}

rtol = 1e-6
rc = 0.9

# determine ewald stuff
xvals = np.linspace(0, 5, 1000000)
vals = np.zeros_like(xvals)
for i, x in enumerate(xvals):
    vals[i] = erfc(x)

etol_ind = np.argmax(vals < rtol)
beta_smooth = xvals[etol_ind] / rc
print("smoothing width: {}".format(beta_smooth))

ke = 138.9354859  # conv factor to get kJ/mol

fudge = 1.0

ewald_self_this = 0.0
ewald_self_for = [0.0 for for_lmbda in for_lmbdas]

for alc_idx in alc_indices:
    charge_a, charge_b = charges[alc_idx]
    ewald_self_this += -ke * beta_smooth / np.sqrt(np.pi) * (
Пример #48
0
def qfunc(x):
    y = 0.5 * math.erfc(np.sqrt(x / 2))
    return y
def test(input, n, patternlen=None, initblocks=None):

    # Step 1. Choose the block size
    if patternlen != None:
        L = patternlen  
    else: 
        ns = [904960,2068480,4654080,10342400,
              22753280,49643520,107560960,
              231669760,496435200,1059061760]
        L = 6
        if n < 387840:
            # Too little data. Inputs of length at least 387840 are recommended
            return [0] * 8
        for threshold in ns:
            if n >= threshold:
                L += 1 

    # Step 2 Split the data into Q and K blocks
    nblocks = int(math.floor(n/L))
    if initblocks != None:
        Q = initblocks
    else:
        Q = 10*(2**L)
    K = nblocks - Q
    
    # Step 3 Construct Table
    nsymbols = (2**L)
    T=[0 for x in range(nsymbols)] # zero out the table
    for i in range(Q):             # Mark final position of
        pattern = input[i*L:(i+1)*L] # each pattern
        idx = int(pattern, 2)
        T[idx]=i+1      # +1 to number indexes 1..(2**L)+1
                        # instead of 0..2**L
    # Step 4 Iterate
    sum = 0.0
    for i in range(Q,nblocks):
        pattern = input[i*L:(i+1)*L]
        j = int(pattern,2)
        dist = i+1-T[j]
        T[j] = i+1
        sum = sum + math.log(dist,2)
    
    # Step 5 Compute the test statistic
    fn = sum/K
       
    # Step 6 Compute the P Value
    # Tables from https://static.aminer.org/pdf/PDF/000/120/333/
    # a_universal_statistical_test_for_random_bit_generators.pdf
    ev_table =  [0,0.73264948,1.5374383,2.40160681,3.31122472,
                 4.25342659,5.2177052,6.1962507,7.1836656,
                 8.1764248,9.1723243,10.170032,11.168765,
                 12.168070,13.167693,14.167488,15.167379]
    var_table = [0,0.690,1.338,1.901,2.358,2.705,2.954,3.125,
                 3.238,3.311,3.356,3.384,3.401,3.410,3.416,
                 3.419,3.421]
                 
    # sigma = math.sqrt(var_table[L])
    sigma = abs((fn - ev_table[L])/((math.sqrt(var_table[L]))*math.sqrt(2)))
    P = math.erfc(sigma)

    success = (P >= 0.01)
    return [n, nblocks, L, K, Q, sigma, P, success]
Пример #50
0
 

os.system('clear')
loop = 0

Pe = [0,0,0,0,0,0,0,0,0,0,0,0,0]
ber_sim = [0,0,0,0,0,0,0,0,0,0,0,0,0]
SNRdb = [-2,-1,0,1,2,3,4,5,6,7,8,9,10]
No_of_bits = 100000000 # 1e8



for iter in range(1,14,1):
#for SNRdb in range(-2,2,1):
    SNR = 10**(SNRdb[loop]/10.0)  # linear SNR
    Pe[loop] = 0.5*erfc(np.sqrt(SNR))
    #print(Pe[loop])
    No = 1/SNR
    #print(No)
    tx_sig = np.random.binomial(n=1, p=0.5, size=No_of_bits)
    
    tx_sig = 2*tx_sig-1
    #print(tx_sig)
    noise = np.sqrt(No/2)*np.random.randn(No_of_bits)
    #print(noise)
    rx_sig =  tx_sig + noise
    #rx_sig =  conv(tx_sig, multi_path_channel) + noise
    #print(rx_sig)
    decision = np.sign(rx_sig)
    #print(decision)
    err = decision - tx_sig
Пример #51
0
def erfc(x):
    return math.erfc(x)
print

print '{:^3} {:^6} {:^6} {:^6}'.format('R', 'Arcsin', 'Arccos', 'Arctan')
print '{:-^3} {:-^6} {:-^6} {:-^6}'.format('', '', '', '')
for r in [ 0, 0.5, 1 ]:
    print '{:3.1f} {:6.4f} {:6.4f} {:6.4f}'.format( r, math.asin(r), math.acos(r), math.atan(r) )
print

## 5.4.10 Hyperbolic Functions
# Hyperbolic functions  appear in linear differential equations
print '{:^4} {:^6} {:^6} {:^6}'.format('X', 'sinh', 'cosh', 'tanh')
print '{:-^4} {:-^6} {:-^6} {:-^6}'.format('', '', '', '')
for x in xrange(0, 11, 2):
    x = x/10.0
    print '{:4.2f} {:6.4f} {:6.4f} {:6.4f}'.format( x, math.sinh(x), math.cosh(x), math.tanh(x) )
print

## 5.4.11 Special Functions
# Gauss Error function [ erf(-x) == -erf(x)
print '{:^5} {:7}'.format('X', 'erf(x)')
print '{:-^5} {:-^7}'.format('', '')
for x in [-3, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 3]:
    print '{:5.2f} {:7.4f}'.format( x, math.erf(x) )
print

# And the complimentary error functions
print '{:^5} {:7}'.format('X', 'erfc(x)')
print '{:-^5} {:-^7}'.format('', '')
for x in [-3, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 3]:
    print '{:5.2f} {:7.4f}'.format( x, math.erfc(x) )
print
Пример #53
0
def Q(x):

    return (1 / 2) * math.erfc(x / math.sqrt(2))
def normcdf(n):
    return 0.5 * math.erfc(-n * math.sqrt(0.5))
Пример #55
0
def pvalue(x, sigma):

    return 0.5 * erfc(x / (sigma * np.sqrt(2)))
Пример #56
0
import matplotlib.pyplot as plt
import numpy as np
import math as mt

B = np.linspace(0, 20, 200)
A = [2, 4, 8, 16, 32, 64]
C = np.ones([200]) * 0.02
print(C)

for i in range(6):
    M = A[i]
    y = [(mt.sqrt(M) - 1)/mt.sqrt(M)*mt.log(M,2) * mt.erfc(mt.sqrt(3*mt.log(M,2)*(10**(x/10))/(2*M-2))) + \
        (mt.sqrt(M) - 2) / mt.sqrt(M) * mt.log(M, 2) * mt.erfc(mt.sqrt(3 * mt.log(M, 2)*(10**(x/10)) / (2 * M - 2))) for x in B]

    plt.plot(B, y, '.')
    plt.plot(B, C, '-')
    plt.semilogy(B, y)

    plt.ylim(0.0, 100)

plt.show()
Пример #57
0
def ber_qpsk(EbN0):
    return 0.5 * math.erfc(math.sqrt(EbN0))
Пример #58
0
# Hyperbolic functions
#
if not math.isclose(math.sinh(1), (math.e - 1 / math.e) / 2):
    fail("math.isclose(math.sinh(1), (math.e - 1/math.e) / 2)")
if not math.isclose(math.cosh(1), (math.e + 1 / math.e) / 2):
    fail("math.isclose(math.cosh(1), (math.e + 1/math.e) / 2)")
if not math.isclose(math.tanh(1), math.sinh(1) / math.cosh(1)):
    fail("math.isclose(math.tanh(1), math.sinh(1)/math.cosh(1))")
if not math.isclose(math.asinh(math.sinh(1)), 1):
    fail("math.isclose(math.asinh(math.sinh(1)), 1)")
if not math.isclose(math.acosh(math.cosh(1)), 1):
    fail("math.isclose(math.acosh(math.cosh(1)), 1)")
if not math.isclose(math.atanh(math.tanh(1)), 1):
    fail("math.isclose(math.atanh(math.tanh(1)), 1)")
#
# Special functions
#
if not math.isclose(math.erf(1), 0.8427007929497149):
    fail("math.isclose(math.erf(1), 0.8427007929497149)")
if not math.isclose(math.erfc(1), 1 - math.erf(1)):
    fail("math.isclose(math.erfc(1), 1-math.erf(1))")
if not math.isclose(math.gamma(10), math.factorial(9)):
    fail("math.isclose(math.gamma(10), math.factorial(9))")
if not math.isclose(math.lgamma(10), math.log(math.factorial(9))):
    fail("math.isclose(math.lgamma(10), math.log(math.factorial(9)))")
#
# Constants
#
if not math.isclose(math.pi * 2, math.tau):
    fail("math.isclose(math.pi * 2, math.tau)")
Пример #59
0
def _slow_ndtr_cpu(x):
    return 0.5 * math.erfc(-x / 2**0.5)
Пример #60
0
def ber_mpsk(EbN0, M):
    k = math.log(M, 2)
    return 1 / k * math.erfc(math.sqrt(EbN0 * k) * math.sin(math.pi / M))