Example #1
0
    def _evaluatepot(self,R):
        """
        Pot= G*( M(r)/r + (Mtot/rc)*gamma(m(p-2),R^1/m)/gamma(m(p-3)   )
        ma M(r)= Mtot * (1- Gamma(m(3-p),r**(1/m))/Gamma(m(3-p))
        quindi

        Pot= (GMtot/rc)*((rc/r)*((1- Gamma(m(3-p),r/rc**(1/m))/Gamma(m(3-p)) + gamma(m(2-p),R^1/m)/gamma(m(3-p) )

        :param R:
        :return:
        """
        m=self.m
        p=self.p
        rc=self.rc
        x=m*(3-p)
        y=m*(2-p)

        R=np.asarray(R)

        a=(1/R)*(1-gammaincc(x,(R/rc)**(1/m)))

        b1=gammaincc(y,(R/rc)**(1/m))*gamma(y)
        b2=gamma(x)

        b=(1/rc)*b1/b2


        return self._potnorm*( a+b)
Example #2
0
def serialtest(binin, m=16):
    ''' The focus of this test is the frequency of each and every overlapping m-bit pattern across the entire sequence. The purpose of this test is to determine whether the number of occurrences of the 2m m-bit overlapping patterns is approximately the same as would be expected for a random sequence. The pattern can overlap.'''
    n = len(binin)
    hbin = binin + binin[0:m - 1:]
    f1a = [hbin[xs:m + xs:] for xs in range(n)]
    oo = set(f1a)
    f1 = [f1a.count(xs) ** 2 for xs in oo]
    f1 = list(map(f1a.count, oo))
    cou = f1a.count
    f2a = [hbin[xs:m - 1 + xs:] for xs in range(n)]
    f2 = [f2a.count(xs) ** 2 for xs in set(f2a)]
    f3a = [hbin[xs:m - 2 + xs:] for xs in range(n)]
    f3 = [f3a.count(xs) ** 2 for xs in set(f3a)]
    psim1 = 0
    psim2 = 0
    psim3 = 0
    if m >= 0:
        suss = reduce(su, f1)
        psim1 = 1.0 * 2 ** m * suss / n - n
    if m >= 1:
        suss = reduce(su, f2)
        psim2 = 1.0 * 2 ** (m - 1) * suss / n - n
    if m >= 2:
        suss = reduce(su, f3)
        psim3 = 1.0 * 2 ** (m - 2) * suss / n - n
    d1 = psim1 - psim2
    d2 = psim1 - 2 * psim2 + psim3
    pval1 = spc.gammaincc(2 ** (m - 2), d1 / 2.0)
    pval2 = spc.gammaincc(2 ** (m - 3), d2 / 2.0)
    return [pval1, pval2]
Example #3
0
def gamma_hazard_scipy(x, a, b, dt=1e-4):
    """
    Compute the hazard function for a gamma process with parameters a,b
    where a and b are the parameters of the gamma PDF:
    y(t) = x^(a-1) \exp(-x/b) / (\Gamma(a)*b^a)

    Inputs:
        x   - in units of seconds
        a   - dimensionless
        b   - in units of seconds

    See also:
        inh_gamma_generator
    """

    # This algorithm is presently not used by
    # inh_gamma_generator as it has numerical problems
    # Try: 
    # plot(stgen.gamma_hazard(arange(0,1000.0,0.1),10.0,1.0/50.0))
    # and look for the kinks.

    if check_dependency('scipy'):
        from scipy.special import gammaincc
    Hpre = -log(gammaincc(a,(x-dt)/b))
    Hpost = -log(gammaincc(a,(x+dt)/b))
    val =  0.5*(Hpost-Hpre)/dt

    if isinstance(val,numpy.ndarray):
        val[numpy.isnan(val)] = 1.0/b
        return val
    elif numpy.isnan(val):
        return 1.0/b
    else:
        return val
Example #4
0
    def ggrb_int_cpl(a, Ec, Emin, Emax):

        # Gammaincc does not support quantities
        i1 = gammaincc(2 + a, Emin / Ec) * gamma(2 + a)
        i2 = gammaincc(2 + a, Emax / Ec) * gamma(2 + a)

        return -Ec * Ec * (i2 - i1)
def custom_incomplete_gamma(a, x):
    """ Incomplete gamma function.

    For the case covered by scipy, a > 0, scipy is called. Otherwise the gamma function
    recurrence relations are called, extending the scipy behavior.

    Parameters
    -----------
    a : array_like

    x : array_like

    Returns
    --------
    gamma : array_like

    Examples
    --------
    >>> a, x = 1, np.linspace(1, 10, 100)
    >>> g = custom_incomplete_gamma(a, x)
    >>> a = 0
    >>> g = custom_incomplete_gamma(a, x)
    >>> a = -1
    >>> g = custom_incomplete_gamma(a, x)
    """

    if isinstance(a, np.ndarray):

        if not isinstance(x, np.ndarray):
            x = np.repeat(x, len(a))

        if len(a) != len(x):
            msg = ("The ``a`` and ``x`` arguments of the "
                   "``custom_incomplete_gamma`` function must have the same"
                   "length.\n")
            raise HalotoolsError(msg)

        result = np.zeros(len(a))

        mask = (a < 0)
        if np.any(mask):
            result[mask] = ((custom_incomplete_gamma(a[mask]+1, x[mask]) -
                x[mask]**a[mask] * np.exp(-x[mask])) / a[mask])
        mask = (a == 0)
        if np.any(mask):
            result[mask] = -expi(-x[mask])
        mask = a > 0
        if np.any(mask):
            result[mask] = gammaincc(a[mask], x[mask]) * gamma(a[mask])

        return result

    else:

        if a < 0:
            return (custom_incomplete_gamma(a+1, x) - x**a * np.exp(-x))/a
        elif a == 0:
            return -expi(-x)
        else:
            return gammaincc(a, x) * gamma(a)
Example #6
0
def log_d_pois_like_trunc_5(d,s1,s2,a,p):
    """double poisson w max 5 goals"""
    #dp = np.sign(d)*np.power(np.abs(d),p)
    dp = 1.5*np.arctan(d)    #print(dp)
    return ( log(a)*(s1+s2)+dp*(s1-s2) - 2*a*cosh(dp)
         -gammaln(s1+1) - gammaln(s2+1) 
        -log(gammaincc(6,a*exp(-dp))*gammaincc(6,a*exp(dp)) ) ) 
Example #7
0
 def rhs(ins):
     """
     Right-hand side function for computing the bounding ellipsoid
     radius given a desired maximum error bound for the first
     derivative of the Riemann theta function.
     """
     return gamma((g+1)/2)*gammaincc((g+1)/2, ins) +               \
         np.sqrt(Pi)*normTinv*L * gamma(g/2)*gammaincc(g/2, ins) - \
         float(lhs)
Example #8
0
def amoroso_cdf(x, parameters):
    '''Cumulative of the Amoroso at x given parameters (a, theta, alpha, beta)'''
    a, theta, alpha, beta = parameters
    z =  (x - a) / theta
    # avoid NaN
    if theta < 0:
        if x >= a:
            return 1.0
        else:
            return gammaincc(alpha, z**beta)
    else:
        if x <= a:
            return 1.0
        else:
            return 1 - gammaincc(alpha, z**beta)
Example #9
0
    def integrate(self,lower,upper,method=None,**kwargs):
        """
        Analytically Compute Schechter integral using incomplete gamma
        functions. If `method` is not None, numerical integration will be used.
        The gamma functions break down for alpha<=-1, so numerical is used if
        that is the case.
        """
        if self.alpha<=-1:
            method = True #use default numerical method, because gamma functions fail for alpha<=-1
        if method is not None:
            return FunctionModel1D.integrate(self,lower,upper,method,**kwargs)
        
        
        from scipy.special import gamma,gammainc,gammaincc
            
        s = self.alpha+1
        u = upper/self.Lstar
        l = lower/self.Lstar
        
        if upper==np.inf and lower<=0:
            I = gamma(s)
        elif upper==np.inf:
            I = gammaincc(s,l)*gamma(s)
        elif lower==0:
            I = gammainc(s,u)*gamma(s)
        else:
            I = (gammainc(s,u) - gammainc(s,l))*gamma(s)

        return self.phistar*I
Example #10
0
 def mass(self, x):
     """
     (dimensionless) Mass function of the Einasto profile
     """
     alpha = self.alpha
     factor = (np.exp(2) * alpha**(3.-alpha) / 8.)**(1./alpha)
     return (1. - gammaincc(3./alpha, 2.*x**alpha/alpha)) * factor
    def curve_fit(self, full_output=False, print_out=False, reval=False):
        """only handles one function model. Doesn't handle errors which are dependant on the parameters"""
        # Set-Up
        x = [fitwith.eqn.inputs for fitwith in self.fitwith]
        y = np.concatenate(self.measured.evaln)
        sigma_y = np.concatenate(self.measured.d_evaln)

        # print(len(self.ymodel(x, *self.Params.p0)))

        # Evaluating
        (p, C) = opt.curve_fit(self.ymodel, x, y, sigma=sigma_y, p0=self.Params.p0)

        # Updating
        self.params = p
        self.update_fitwith_params(*p, reval=reval)  # *** should be doing this? ***
        self.chisq = np.sum((y - self.ymodel(x, *p))**2 / sigma_y**2)
        self.cov = C

        if full_output + print_out > 0:  # either fulloutput or printout is True
            full = {}  # Full output dictionary
            full["params"] = p
            full["cov"] = C
            full["chisq"] = self.chisq
            full["dof"] = len(y) - len(p)
            full["Q"] = sf.gammaincc(0.5 * full["dof"], 0.5 * full["chisq"])

            if print_out is True:
                print("Best fit:")
                for i, val in enumerate(p):
                    print("a{} = {} +/- {}".format(i, val, np.sqrt(C[i, i])))
                print("chisq = {} \nndof = {} \ngoodness of fit = {}".format(full["chisq"], full["dof"], full["Q"]))
            if full_output is True:
                return p, C, full
        return p, C
Example #12
0
    def chi2prob(self, chi2=None):
        """Probability that chi2 per dof is greater than specified value.

        Parameters
        ----------
        chi2 : array_like, optional
            Chi square per degree of freedom.  If not specified, will use
            ``self.chi2``.  Note that this is `chi2` *per degree of freedom*.

        Returns
        -------
        prob : ndarray
            Probability that `chi2` of fit will be greater than given `chi2`.
        """
        # import scipy.special
        # forces import of pkg_resources (via _ellip_harm_2)
        # This import catalogues the entire python installation, which may
        # consist of thousands of packages (e.g.- full Macports or Anaconda
        # installation), and takes several seconds.  This is an unreasonable
        # penalty for simply importing lsqfit, since the only place the
        # special function is required is in this rarely used method.
        # Therefore, defer importing scipy.special to runtime here.
        from scipy.special import gammaincc  # noqa
        if chi2 is None:
            chi2 = self.chi2
        chi2 = asfarray(chi2)
        hndof = 0.5 * self.ndof
        return gammaincc(hndof, hndof*chi2)
Example #13
0
def linearcomplexitytest(binin,m=500):
    ##
    ## Focus:
    ##  test the length of a generating feedback register.
    ##
    ## Purpose:
    ##  determine whether or not the sequence is complex enough to 
    ##  be considered random. Random sequences are characterized by a longer 
    ##  feedback register. A short feedback register implies non-randomness.
    ##
    ## Outcome:
    ##  If the computed P-value is < 0.01, then conclude that the sequence 
    ##  is non-random. Otherwise, conclude that the sequence is random.
    ##

    k = 6
    pi = [0.01047, 0.03125, 0.125, 0.5, 0.25, 0.0625, 0.020833]
    avg = 0.5*m + (1.0/36)*(9 + (-1)**(m + 1)) - (m/3.0 + 2.0/9)/2**m
    blocks = Util.stringpart(binin, m)
    bign = len(blocks)
    lc = ([lincomplex(chunk) for chunk in blocks])
    t = ([-1.0*(((-1)**m)*(chunk-avg)+2.0/9) for chunk in lc])
    vg=np.histogram(t,bins=[-9999999999,-2.5,-1.5,-0.5,0.5,1.5,2.5,9999999999])[0][::-1]
    im=([((vg[ii]-bign*pi[ii])**2)/(bign*pi[ii]) for ii in xrange(7)])
    chisqr=reduce(Util.su,im)
    pval=spc.gammaincc(k/2.0,chisqr/2.0)
    return pval
Example #14
0
def approximateentropytest(binin, m=10):
    ##
    ## Focus:
    ##  test the frequency of each and every overlapping m-bit pattern. 
    ##
    ## Purpose:
    ##  compare the frequency of overlapping blocks of two 
    ##  consecutive/adjacent lengths (m and m+1) against the expected 
    ##  result for a random sequence.
    ##
    ## Outcome:
    ##  If the computed P-value is < 0.01, then conclude that the sequence 
    ##  is non-random. Otherwise, conclude that the sequence is random.
    ##

    n = len(binin)
    f1a = [(binin + binin[0:m - 1:])[xs:m + xs:] for xs in xrange(n)]
    f1 = [[xs, f1a.count(xs)] for xs in sorted(set(f1a))]
    f2a = [(binin + binin[0:m:])[xs:m + 1 + xs:] for xs in xrange(n)]
    f2 = [[xs, f2a.count(xs)] for xs in sorted(set(f2a))]
    c1 = [1.0 * f1[xs][1] / n for xs in xrange(len(f1))]
    c2 = [1.0 * f2[xs][1] / n for xs in xrange(len(f2))]
    phi1 = reduce(Util.su, map(Util.logo, c1))
    phi2 = reduce(Util.su, map(Util.logo, c2))
    apen = phi1 - phi2
    chisqr = 2.0 * n * (np.log(2) - apen)
    pval = spc.gammaincc(2 ** (m - 1), chisqr / 2.0)
    return pval
Example #15
0
def nonoverlappingtemplatematchingtest(binin, mat="000000001", num=8):
    ##
    ## Focus:
    ##  test the number of occurrences of pre-defined target substrings. 
    ##
    ## Purpose: 
    ##  reject sequences that exhibit too many occurrences of a given 
    ##  non-periodic (aperiodic) pattern. For this test and for the 
    ##  Overlapping Template Matching test, an m-bit window is used to 
    ##  search for a specific m-bit pattern. If the pattern is not found, 
    ##  the window slides one bit position. For this test, when the pattern 
    ##  is found, the window is reset to the bit after the found pattern,
    ##   and the search resumes.
    ##
    ## Outcome:
    ##  If the computed P-value is < 0.01, then conclude that the sequence is 
    ##  non-random. Otherwise, conclude that the sequence is random.
    ##

    n = len(binin)
    m = len(mat)
    M = n/num
    blocks = [binin[xs*M:M+xs*M:] for xs in xrange(n/M)]
    counts = [xx.count(mat) for xx in blocks]
    avg = 1.0 * (M-m+1)/2 ** m
    var = M*(2**-m -(2*m-1)*2**(-2*m))
    chisqr = reduce(Util.su, [(xs - avg) ** 2 for xs in counts]) / var
    pval = spc.gammaincc(1.0 * len(blocks) / 2, chisqr / 2)
    return pval
Example #16
0
def custom_incomplete_gamma(a, x):
    """ Incomplete gamma function. 
    
    For the case covered by scipy, a > 0, scipy is called. Otherwise the gamma function 
    recurrence relations are called, extending the scipy behavior. The only other difference from the 
    scipy function is that in `custom_incomplete_gamma` only supports the case where the input ``a`` is a scalar.
    
    Parameters
    -----------
    a : float 
    
    x : array_like 
    
    Returns 
    --------
    gamma : array_like 

    Examples 
    --------
    >>> a, x = 1, np.linspace(1, 10, 100)
    >>> g = custom_incomplete_gamma(a, x)
    >>> a = 0
    >>> g = custom_incomplete_gamma(a, x)
    >>> a = -1
    >>> g = custom_incomplete_gamma(a, x)
    """

    if a < 0:
        return (custom_incomplete_gamma(a + 1, x) - x ** a * np.exp(-x)) / a
    elif a == 0:
        return -expi(-x)
    else:
        return gammaincc(a, x) * gamma(a)
Example #17
0
def blockfrequencytest(binin, nu=128):
    ''' The focus of the test is the proportion of zeroes and ones within M-bit blocks. The purpose of this test is to determine whether the frequency of ones is an M-bit block is approximately M/2.'''
    ss = [int(el) for el in binin]
    tt = [1.0 * sum(ss[xs * nu:nu + xs * nu:]) / nu for xs in range(len(ss) / nu)]
    uu = list(map(sus, tt))
    chisqr = 4 * nu * reduce(su, uu)
    pval = spc.gammaincc(len(tt) / 2.0, chisqr / 2.0)
    return pval
Example #18
0
 def insert_row(self, objectId, band, chisq, dof):
     chi2prob = gammaincc(dof/2., chisq/2.)
     query = """insert into Chisq set objectId=%(objectId)i,
                filterName='%(band)s', chisq=%(chisq)12.4e,
                dof=%(dof)i, chi2prob=%(chi2prob)12.4e
                on duplicate key update
                chisq=%(chisq)12.4e, dof=%(dof)i,
                chi2prob=%(chi2prob)12.4e""" % locals()
     self.apply(query)
Example #19
0
def serialtest(binin, m=16):
    ##
    ## Focus:
    ##  test the frequency of each and every overlapping m-bit pattern 
    ##  across the entire sequence.
    ##
    ## Purpose:
    ##  determine whether the number of occurrences of the 2m m-bit 
    ##  overlapping patterns is approximately the same as would be 
    ##  expected for a random sequence. The pattern can overlap.
    ##
    ## Outcome:
    ##  If the computed P-value is < 0.01, then conclude that the sequence 
    ##  is non-random. Otherwise, conclude that the sequence is random.
    ##

    n = len(binin)
    hbin=binin+binin[0:m-1:]
    f1a = [hbin[xs:m+xs:] for xs in xrange(n)]
    oo=set(f1a)
    f1 = [f1a.count(xs)**2 for xs in oo]
    f1 = map(f1a.count,oo)
    cou=f1a.count
    f2a = [hbin[xs:m-1+xs:] for xs in xrange(n)]
    f2 = [f2a.count(xs)**2 for xs in set(f2a)]
    f3a = [hbin[xs:m-2+xs:] for xs in xrange(n)]
    f3 = [f3a.count(xs)**2 for xs in set(f3a)]
    psim1 = 0
    psim2 = 0
    psim3 = 0
    if m >= 0:
        suss = reduce(Util.su,f1)
        psim1 = 1.0 * 2 ** m * suss / n - n
    if m >= 1:
        suss = reduce(Util.su,f2)
        psim2 = 1.0 * 2 ** (m - 1) * suss / n - n
    if m >= 2:
        suss = reduce(Util.su,f3)
        psim3 = 1.0 * 2 ** (m - 2) * suss / n - n
    d1 = psim1-psim2
    d2 = psim1-2 * psim2 + psim3
    pval1 = spc.gammaincc(2 ** (m - 2), d1 / 2.0)
    pval2 = spc.gammaincc(2 ** (m - 3), d2 / 2.0)
    return [pval1, pval2]
Example #20
0
def gammainc_fun( a, z ):
    if np.any(z < 0):
        print('ERROR: z must be >= 0')
        return
    if a == 0:
        return -expi(-z)
    elif a < 0:
        return ( gammainc_fun(a+1,z) - np.power(z,a) * np.exp(-z) ) / a
    else:
        return gammaincc(a,z) * gamma(a)
Example #21
0
    def _evaluatemass(self,R):
        """
        formula: M(r)= Mtot * (1- Gamma(m(3-p),r/rc**(1/m))/Gamma(m(3-p))
        :param R:
        :return:
        """
        R=np.asarray(R)
        num=gammaincc(self.m*(3-self.p),(R/self.rc)**self.nu) #Non mi serve dividere per Gamma(2m), perchè in scipy la gammaincc è gia normalizzta
                                          #su gamma

        return self.Mmax*(1-num)
Example #22
0
 def lnPchisq(self, chisq, ndof):
     """Shortcut to log(gamma function) for chi-square fit probability
     
     If the chi-square is less than 1000, evaluates the probability of
     a good fit for N degrees of freedom using scipy.special.gammaincc().
     Otherwise, uses an asymptotic series to prevent underflow.
     """
     if chisq < 1000.0:
         return np.log(gammaincc(0.5*ndof, 0.5*chisq))
     else:
         return gammainccln(0.5*ndof, 0.5*chisq)
Example #23
0
def GammaInc( a, z ):
    if z.any() < 0:
        print('ERROR: z must be >= 0')
        return
    if a == 0:
        return -expi(-z)
    
    elif a < 0:
        return ( GammaInc(a+1,z) - np.power(z,a) * np.exp(-z) ) / a
    else:
        return gammaincc(a,z) * gamma(a)
Example #24
0
def nonoverlappingtemplatematchingtest(binin, mat="000000001", num=8):
    ''' The focus of this test is the number of occurrences of pre-defined target substrings. The purpose of this test is to reject sequences that exhibit too many occurrences of a given non-periodic (aperiodic) pattern. For this test and for the Overlapping Template Matching test, an m-bit window is used to search for a specific m-bit pattern. If the pattern is not found, the window slides one bit position. For this test, when the pattern is found, the window is reset to the bit after the found pattern, and the search resumes.'''
    n = len(binin)
    m = len(mat)
    M = n / num
    blocks = [binin[xs * M:M + xs * M:] for xs in range(n / M)]
    counts = [xx.count(mat) for xx in blocks]
    avg = 1.0 * (M - m + 1) / 2 ** m
    var = M * (2 ** -m - (2 * m - 1) * 2 ** (-2 * m))
    chisqr = reduce(su, [(xs - avg) ** 2 for xs in counts]) / var
    pval = spc.gammaincc(1.0 * len(blocks) / 2, chisqr / 2)
    return pval
Example #25
0
def linearcomplexitytest(binin, m=500):
    ''' The focus of this test is the length of a generating feedback register. The purpose of this test is to determine whether or not the sequence is complex enough to be considered random. Random sequences are characterized by a longer feedback register. A short feedback register implies non-randomness.'''
    k = 6
    pi = [0.01047, 0.03125, 0.125, 0.5, 0.25, 0.0625, 0.020833]
    avg = 0.5 * m + (1.0 / 36) * (9 + (-1) ** (m + 1)) - (m / 3.0 + 2.0 / 9) / 2 ** m
    blocks = stringpart(binin, m)
    bign = len(blocks)
    lc = ([lincomplex(chunk) for chunk in blocks])
    t = ([-1.0 * (((-1) ** m) * (chunk - avg) + 2.0 / 9) for chunk in lc])
    vg = np.histogram(t, bins=[-9999999999, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 9999999999])[0][::-1]
    im = ([((vg[ii] - bign * pi[ii]) ** 2) / (bign * pi[ii]) for ii in range(7)])
    chisqr = reduce(su, im)
    pval = spc.gammaincc(k / 2.0, chisqr / 2.0)
    return pval
Example #26
0
def frequency_test_within_a_block(E, M=8):
    n = len(E)
    N = int(n / M)
    pi = []
    for i in range(N):
        pi.append(0)
        for j in range(M):
            pi[i] += int(E[(i * M) + j])
        pi[i] /= M
    X = 0
    for i in range(N):
        X += (pi[i] - 0.5)**2
    X *= 4 * M
    p = gammaincc(N / 2, X / 2)
    return p
Example #27
0
    def testLargeXSmallA(self, dtype, rtol, atol):
        self.maybe_skip_test(dtype)
        rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
        # Test values near zero.
        x = np.random.uniform(low=100., high=200.,
                              size=[NUM_SAMPLES]).astype(dtype)
        a = np.random.uniform(low=0.3, high=1.,
                              size=[NUM_SAMPLES]).astype(dtype)

        expected_values = sps.gammaincc(a, x)
        with self.session() as sess:
            with self.test_scope():
                y = _igammac(a, x)
            actual = sess.run(y)
        self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
Example #28
0
def aproximateentropytest(binin, m=2):
    ''' The focus of this test is the frequency of each and every overlapping m-bit pattern. The purpose of the test is to compare the frequency of overlapping blocks of two consecutive/adjacent lengths (m and m+1) against the expected result for a random sequence.'''
    n = len(binin)
    f1a = [(binin + binin[0:m - 1:])[xs:m + xs:] for xs in xrange(n)]
    f1 = [[xs, f1a.count(xs)] for xs in sorted(set(f1a))]
    f2a = [(binin + binin[0:m:])[xs:m + 1 + xs:] for xs in xrange(n)]
    f2 = [[xs, f2a.count(xs)] for xs in sorted(set(f2a))]
    c1 = [1.0 * f1[xs][1] / n for xs in xrange(len(f1))]
    c2 = [1.0 * f2[xs][1] / n for xs in xrange(len(f2))]
    phi1 = reduce(su, map(logo, c1))
    phi2 = reduce(su, map(logo, c2))
    apen = phi1 - phi2
    chisqr = 2.0 * n * (np.log(2) - apen)
    pval = spc.gammaincc(2**(m - 1), chisqr / 2.0)
    return pval
Example #29
0
def psi_einasto(r,alpha,rho_2_1e14,r_2):
    """Einasto"""
    ### map betweenEinasto params and Miller et al params###
    rho_2 = rho_2_1e14*1e14

    n = 1/alpha
    rho_0 = rho_2 * np.exp(2.*n)
    h = r_2 / (2.*n)**n

    d_n = (3*n) - (1./3.) + (8./1215.*n) + (184./229635.*n**2.) + (1048./31000725.*n**3.) - (17557576./1242974068875. * n**4.) 
    gamma_3n = 2 * ((ss.gammainc(3*n , d_n) ) * ss.gamma(3*n))
    Mtot = 4. * np.pi * rho_0 * Msun * (h**3.) * n * gamma_3n #kg

    G_newton = astroc.G.to( u.Mpc *  u.km**2 / u.s**2 / u.kg).value #Mpc km2/s^2 kg

    ### phi orig ###
    s_orig = r/h

    part1_orig = 1. - ( ( ss.gammaincc(3.*n,s_orig**(1./n)) * ss.gamma(3.*n) ) / gamma_3n )
    part2_orig = (s_orig *  ss.gammaincc(2.*n,s_orig**(1./n)) * ss.gamma(2.*n) ) / gamma_3n

    phi_ein_orig =  -(G_newton* Mtot/(s_orig*h)) * (part1_orig+part2_orig)

    return phi_ein_orig
Example #30
0
def linearcomplexitytest(binin, m=500):
    ''' The focus of this test is the length of a generating feedback register. The purpose of this test is to determine whether or not the sequence is complex enough to be considered random. Random sequences are characterized by a longer feedback register. A short feedback register implies non-randomness.'''
    k = 6
    pi = [0.01047, 0.03125, 0.125, 0.5, 0.25, 0.0625, 0.020833]
    avg = 0.5*m + (1.0/36)*(9 + (-1)**(m + 1)) - (m/3.0 + 2.0/9)/2**m
    blocks = stringpart(binin, m)
    bign = len(blocks)
    lc = ([lincomplex(chunk) for chunk in blocks])
    t = ([-1.0*(((-1)**m)*(chunk-avg)+2.0/9) for chunk in lc])
    vg = np.histogram(t, bins=[-9999999999, -2.5, -
                               1.5, -0.5, 0.5, 1.5, 2.5, 9999999999])[0][::-1]
    im = ([((vg[ii]-bign*pi[ii])**2)/(bign*pi[ii]) for ii in range(7)])
    chisqr = reduce(su, im)
    pval = spc.gammaincc(k/2.0, chisqr/2.0)
    return pval
Example #31
0
def aproximateentropytest(binin, m=10):
    ''' The focus of this test is the frequency of each and every overlapping m-bit pattern. The purpose of the test is to compare the frequency of overlapping blocks of two consecutive/adjacent lengths (m and m+1) against the expected result for a random sequence.'''
    n = len(binin)
    f1a = [(binin + binin[0:m - 1:])[xs:m + xs:] for xs in range(n)]
    f1 = [[xs, f1a.count(xs)] for xs in sorted(set(f1a))]
    f2a = [(binin + binin[0:m:])[xs:m + 1 + xs:] for xs in range(n)]
    f2 = [[xs, f2a.count(xs)] for xs in sorted(set(f2a))]
    c1 = [1.0 * f1[xs][1] / n for xs in range(len(f1))]
    c2 = [1.0 * f2[xs][1] / n for xs in range(len(f2))]
    phi1 = reduce(su, list(map(logo, c1)))
    phi2 = reduce(su, list(map(logo, c2)))
    apen = phi1 - phi2
    chisqr = 2.0 * n * (np.log(2) - apen)
    pval = spc.gammaincc(2 ** (m - 1), chisqr / 2.0)
    return pval
Example #32
0
def random_excursions(bit_string):
    """
    http://qrng.b-phot.org/static/media/NistTestsLongDescription.pdf

    :param bit_string: string of bits
    :return p-values: result of test, if all 8 are >= 0.01
    """
    '''
    int_data = []
    for i in bit_string:
        a = -1.0 if (i == '0') else 1.0
        int_data.append(a)
    '''
    int_data = [(-1.0 if (i == '0') else 1.0) for i in bit_string]

    # calculate cumulative sum and append 0 to beginning and end
    cumulative_sum = np.cumsum(int_data)
    cumulative_sum = np.append(cumulative_sum, [0])
    cumulative_sum = np.append([0], cumulative_sum)

    states = np.array([-4, -3, -2, -1, 1, 2, 3, 4])

    # identify locations where cumulative sum visits 0
    position = np.where(cumulative_sum == 0)[0]

    cycles = []
    for pos in list(range(len(position) - 1)):
        cycles.append(cumulative_sum[position[pos]:position[pos + 1] + 1])

    num_cycles = len(cycles)

    state_count = []
    for cycle in cycles:
        state_count.append(
            ([len(np.where(cycle == state)[0]) for state in states]))
    state_count = np.transpose(np.clip(state_count, 0, 5))

    su = []
    for cycle in range(6):
        su.append([(sct == cycle).sum() for sct in state_count])
    su = np.transpose(su)

    piks = ([([get_pik_value(uu, state) for uu in range(6)])
             for state in states])
    inner_term = num_cycles * np.array(piks)
    chi = np.sum(1.0 * (np.array(su) - inner_term)**2 / inner_term, axis=1)
    p_values = ([gammaincc(2.5, cs / 2.0) for cs in chi])
    return p_values
Example #33
0
    def _ord_gennorm_partial_moment(n: int, z: int, beta: float) -> float:
        r"""
        Partial moment for ordinary generalized normal parameterization.

        Parameters
        ----------
        n : int
            Order of partial moment
        z : float
            Upper bound for partial moment integral
        beta : float
            Parameter of generalized normal

        Returns
        -------
        float
            Partial moment

        Notes
        -----
        The standard parameterization follows:

        .. math::

        f(x)=\frac{\beta}{2\Gamma(\beta^{-1})}\text{exp}\left(-|x|^{\beta}\right)
        """
        if n < 0:
            return nan

        w = 0.5 * beta / gamma((1 / beta))

        # integral over (-inf, min(z,0))
        lz = abs(min(z, 0)) ** beta
        lterm = (
            w
            * ((-1) ** n)
            * (1 / beta)
            * gamma((n + 1) / beta)
            * gammaincc((n + 1) / beta, lz)
        )

        # remaining integral
        rz = max(0, z) ** beta
        rterm = w * (1 / beta) * gamma((n + 1) / beta) * gammainc((n + 1) / beta, rz)

        moment = lterm + rterm

        return moment
Example #34
0
def frequency_within_a_block(M, n):
	epsilon = get_value('epsilon')
	p_value_dict = get_value('p_value_dict')
	summary = 0
	length_of_a_block = n / M
	for i in range(length_of_a_block):
		summary_of_a_block = 0
		for j in range(M):
			summary_of_a_block += epsilon[i * M + j]
		pi = float(summary_of_a_block) / M
		v = pi - 0.5
		summary += v ** 2 
	chi_squared = 4 * M * summary
	p_value = gammaincc(length_of_a_block / 2.0, chi_squared / 2.0)
	p_value_dict['frequency_within_a_block'].append(p_value)
	set_value('finished_tests_num', get_value('finished_tests_num') + 1)
Example #35
0
def frequency_within_block(bits, num_of_blocks):
    block_size = int(math.floor(len(bits) / num_of_blocks))
    n = int(block_size * num_of_blocks)

    proportions = list()
    for i in xrange(num_of_blocks):
        block = bits[i * (block_size):((i + 1) * (block_size))]
        zeroes, ones = count_ones_zeroes(block)
        proportions.append(Fraction(ones, block_size))

    chisq = 0.0
    for prop in proportions:
        chisq += 4.0 * block_size * ((prop - Fraction(1, 2))**2)

    p = gammaincc((num_of_blocks / 2.0), float(chisq) / 2.0)
    return p
        def transcendental_eq_3d(savefig):

            f = lambda x: prm.rho * prm.L_m * x**3 - prm.q_0 / (
                16 * np.pi
            ) * np.exp(-(x**2) / prm.kappa_l) + prm.k_s * np.sqrt(
                prm.kappa_s
            ) * (prm.theta_m - prm.theta_i) * np.exp(-(x**2) / prm.kappa_s) / (
                (-2) * gamma(0.5) * gammaincc(0.5, x**2 / prm.kappa_s) + 2 * np
                .sqrt(prm.kappa_s) / x * np.exp(-x**2 / prm.kappa_s))

            lambda_ = fsolve(f, 0.00001, xtol=1e-10)

            if savefig:
                ax, _ = graph_transcendental_eq(lambda_, f, 3)
                ax.axhline(y=0, lw=1, color='k')
            return lambda_[0]
Example #37
0
def v_esc_theory_non_flat_lil(theta,z_c,alpha,rho_2_1e14,r_2,beta,lil_omega_M,lil_omega_lambda):
    """cosmology"""
    qHsquared = ( (0.5 * lil_omega_M * (1+z_c)**3.)- lil_omega_lambda) * 100.**2.

    """Einasto"""
    ### map betweenEinasto params and Miller et al params###
    rho_2 = rho_2_1e14*1e14

    n = 1/alpha
    rho_0 = rho_2 * np.exp(2.*n)
    h = r_2 / (2.*n)**n

    d_n = (3*n) - (1./3.) + (8./1215.*n) + (184./229635.*n**2.) + (1048./31000725.*n**3.) - (17557576./1242974068875. * n**4.) 
    gamma_3n = 2 * ((ss.gammainc(3*n , d_n) ) * ss.gamma(3*n))
    Mtot = 4. * np.pi * rho_0 * Msun * (h**3.) * n * gamma_3n #kg

    G_newton = astroc.G.to( u.Mpc *  u.km**2 / u.s**2 / u.kg).value #Mpc km2/s^2 kg

    r_eq_cubed = -(G_newton*Mtot) / qHsquared #Mpc ^3
    r_eq = (r_eq_cubed)**(1.0/3.0)# Mpc


    """if r_eq -> infinity then return vanilla Einasto"""
    if math.isnan(r_eq) == False:
        ### phi orig ###
        s_orig = r/h

        part1_orig = 1. - ( ( ss.gammaincc(3.*n,s_orig**(1./n)) * ss.gamma(3.*n) ) / gamma_3n )
        part2_orig = (s_orig *  ss.gammaincc(2.*n,s_orig**(1./n)) * ss.gamma(2.*n) ) / gamma_3n

        phi_ein_orig =  -(G_newton* Mtot/(s_orig*h)) * (part1_orig+part2_orig)

        ### phi r_eq ###
        s_req = r_eq/h

        part1_req = 1. - ( ( ss.gammaincc(3.*n,s_req**(1./n)) * ss.gamma(3.*n) ) / gamma_3n )
        part2_req = (s_req *  ss.gammaincc(2.*n,s_req**(1./n)) * ss.gamma(2.*n) ) / gamma_3n

        phi_ein_req =  -(G_newton* Mtot/(s_req*h)) * (part1_req+part2_req)

        v_esc = np.sqrt(-2.*( phi_ein_orig - phi_ein_req ) - qHsquared * (r**2 - r_eq**2) )

    elif math.isnan(r_eq) == True:
        ### phi orig ###
        s_orig = r/h

        part1_orig = 1. - ( ( ss.gammaincc(3.*n,s_orig**(1./n)) * ss.gamma(3.*n) ) / gamma_3n )
        part2_orig = (s_orig *  ss.gammaincc(2.*n,s_orig**(1./n)) * ss.gamma(2.*n) ) / gamma_3n

        phi_ein_orig =  -(G_newton* Mtot/(s_orig*h)) * (part1_orig+part2_orig)

        v_esc = np.sqrt(-2.* phi_ein_orig  ) 

    v_esc_projected = v_esc / np.sqrt(g_beta(beta))
    return v_esc_projected
Example #38
0
def v_esc_theory_flat_lambda(theta,z_c,alpha,rho_2_1e14,r_2,beta,little_h,omega_DE):
    """cosmology"""
    H_z = H_z_function(z_c, [omega_DE,little_h],'flat_lambda')
    q_z= q_z_function(z_c,omega_DE,'flat_lambda')

    """Einasto"""
    ### map betweenEinasto params and Miller et al params###
    rho_2 = rho_2_1e14*1e14

    n = 1/alpha
    rho_0 = rho_2 * np.exp(2.*n)
    h = r_2 / (2.*n)**n

    d_n = (3*n) - (1./3.) + (8./1215.*n) + (184./229635.*n**2.) + (1048./31000725.*n**3.) - (17557576./1242974068875. * n**4.) 
    gamma_3n = 2 * ((ss.gammainc(3*n , d_n) ) * ss.gamma(3*n))
    Mtot = 4. * np.pi * rho_0 * Msun * (h**3.) * n * gamma_3n #kg

    G_newton = astroc.G.to( u.Mpc *  u.km**2 / u.s**2 / u.kg).value #Mpc km2/s^2 kg

    r_eq_cubed = -((G_newton*Mtot) / (q_z * H_z**2.)) #Mpc ^3
    r_eq = (r_eq_cubed)**(1.0/3.0)# Mpc

    """if r_eq -> infinity then return vanilla Einasto"""
    if q_z < 0.:
        ### phi orig ###
        s_orig = r/h

        part1_orig = 1. - ( ( ss.gammaincc(3.*n,s_orig**(1./n)) * ss.gamma(3.*n) ) / gamma_3n )
        part2_orig = (s_orig *  ss.gammaincc(2.*n,s_orig**(1./n)) * ss.gamma(2.*n) ) / gamma_3n

        phi_ein_orig =  -(G_newton* Mtot/(s_orig*h)) * (part1_orig+part2_orig)

        ### phi r_eq ###
        s_req = r_eq/h

        part1_req = 1. - ( ( ss.gammaincc(3.*n,s_req**(1./n)) * ss.gamma(3.*n) ) / gamma_3n )
        part2_req = (s_req *  ss.gammaincc(2.*n,s_req**(1./n)) * ss.gamma(2.*n) ) / gamma_3n

        phi_ein_req =  -(G_newton* Mtot/(s_req*h)) * (part1_req+part2_req)

        v_esc = np.sqrt(-2.*( phi_ein_orig - phi_ein_req ) - q_z * (H_z**2.) * (r**2 - r_eq**2) )

    elif q_z >= 0.:
        ### phi orig ###
        s_orig = r/h

        part1_orig = 1. - ( ( ss.gammaincc(3.*n,s_orig**(1./n)) * ss.gamma(3.*n) ) / gamma_3n )
        part2_orig = (s_orig *  ss.gammaincc(2.*n,s_orig**(1./n)) * ss.gamma(2.*n) ) / gamma_3n

        phi_ein_orig =  -(G_newton* Mtot/(s_orig*h)) * (part1_orig+part2_orig)

        v_esc = np.sqrt(-2.*( phi_ein_orig  )) 

    v_esc_projected = v_esc / np.sqrt(g_beta(beta))
    return v_esc_projected
Example #39
0
def longestrunones8(binin):
    ''' The focus of the test is the longest run of ones within M-bit blocks. The purpose of this test is to determine whether the length of the longest run of ones within the tested sequence is consistent with the length of the longest run of ones that would be expected in a random sequence. Note that an irregularity in the expected length of the longest run of ones implies that there is also an irregularity in the expected length of the longest run of zeroes. Long runs of zeroes were not evaluated separately due to a concern about statistical independence among the tests.'''
    m = 8
    k = 3
    pik = [0.2148, 0.3672, 0.2305, 0.1875]
    blocks = [binin[xs*m:m+xs*m:] for xs in range(len(binin) / m)]
    n = len(blocks)
    counts1 = [xs+'01' for xs in blocks] # append the string 01 to guarantee the length of 1
    counts = [xs.replace('0',' ').split() for xs in counts1] # split into all parts
    counts2 = [list(map(len, xx)) for xx in counts]
    counts4 = [(4 if xx > 4 else xx) for xx in map(max,counts2)]
    freqs = [counts4.count(spi) for spi in [1, 2, 3, 4]]
    chisqr1 = [(freqs[xx]-n*pik[xx])**2/(n*pik[xx]) for xx in range(4)]
    chisqr = reduce(su, chisqr1)
    pval = spc.gammaincc(k / 2.0, chisqr / 2.0)
    return pval
Example #40
0
def longestrunones8(binin):
    ''' The focus of the test is the longest run of ones within M-bit blocks. The purpose of this test is to determine whether the length of the longest run of ones within the tested sequence is consistent with the length of the longest run of ones that would be expected in a random sequence. Note that an irregularity in the expected length of the longest run of ones implies that there is also an irregularity in the expected length of the longest run of zeroes. Long runs of zeroes were not evaluated separately due to a concern about statistical independence among the tests.'''
    m = 8
    k = 3
    pik = [0.2148, 0.3672, 0.2305, 0.1875]
    blocks = [binin[xs * m:m + xs * m:] for xs in range(len(binin) / m)]
    n = len(blocks)
    counts1 = [xs + '01' for xs in blocks]  # append the string 01 to guarantee the length of 1
    counts = [xs.replace('0', ' ').split() for xs in counts1]  # split into all parts
    counts2 = [list(map(len, xx)) for xx in counts]
    counts4 = [(4 if xx > 4 else xx) for xx in map(max, counts2)]
    freqs = [counts4.count(spi) for spi in [1, 2, 3, 4]]
    chisqr1 = [(freqs[xx] - n * pik[xx]) ** 2 / (n * pik[xx]) for xx in range(4)]
    chisqr = reduce(su, chisqr1)
    pval = spc.gammaincc(k / 2.0, chisqr / 2.0)
    return pval
Example #41
0
 def validate(self, bitstring: str) -> bool:
     validate_numeral(bitstring, ALPHABETS["BINARY"])
     n: int = len(bitstring)
     if n < 100:
         return False
     N: int = n // self.blocksize
     ki_square_obs: float = 0.0
     for i in range(N):
         block: str = bitstring[
             i * self.blocksize : (i + 1) * self.blocksize
         ]
         pi: float = block.count("1") / self.blocksize
         ki_square_obs += (pi - 0.5) ** 2
     ki_square_obs *= 4 * self.blocksize
     p_value: float = gammaincc(N / 2, ki_square_obs / 2)
     return p_value >= 0.01
def liner_complexity_test(key, n, M=500, b_print=True):
    if n < 1000000:
        __print(
            b_print, '{:40} : Error. Need at  1,000,000 bits. Got {}.'.format(
                'liner complexity test', n))
        return [0], False

    N = n // M

    K = 6

    key = ''.join(list(map(str, key)))
    split_key = list(split_list(key, M))

    if len(split_key[-1]) != len(split_key[0]):
        split_key = split_key[0:-1]

    L = list(map(bma, split_key))

    myu = M / 2 + (9 + (-1)**(M + 1)) / 36 - (M / 3 + 2 / 9) / 2**M

    T = list(map(lambda x: (-1)**M * (x - myu) + 2 / 9, L))

    v = [0] * 7

    for i in T:
        if i <= -2.5: v[0] += 1
        elif i <= -1.5: v[1] += 1
        elif i <= -0.5: v[2] += 1
        elif i <= 0.5: v[3] += 1
        elif i <= 1.5: v[4] += 1
        elif i <= 2.5: v[5] += 1
        else: v[6] += 1

    pi = [0.010417, 0.03125, 0.125, 0.5, 0.25, 0.0625, 0.020833]

    chi_squared_obs = sum(
        list(map(lambda x, y: (x - N * y)**2 / (N * y), v, pi)))

    p = sc.gammaincc(K / 2, chi_squared_obs / 2)

    b = (p >= 0.01)

    __print(b_print, '{:40} : {:.3f} -> {} '.format('liner complexity test', p,
                                                    b))

    return [p], b
def test(input, n):

    M8 = [0.2148, 0.3672, 0.2305, 0.1875]

    # Length of blocks
    M = 8

    K = 3

    N = 16

    # Table of frequencies
    v = [0, 0, 0, 0, 0, 0, 0]

    for i in range(N):  # over each block
        #find the longest run
        block = input[i * M:((i + 1) * M)]  # Block i

        run = 0
        longest = 0
        for j in range(M):  # Count the bits.
            if block[j] == '1':
                run += 1
                if run > longest:
                    longest = run
            else:
                run = 0

        if longest <= 1: v[0] += 1
        elif longest == 2: v[1] += 1
        elif longest == 3: v[2] += 1
        else: v[3] += 1

    # Compute Chi-Sq
    chi_sq = 0.0
    for i in range(K + 1):
        p_i = M8[i]
        upper = (v[i] - N * p_i)**2
        lower = N * p_i
        chi_sq += upper / lower
    # p-value
    p = ss.gammaincc(K / 2.0, chi_sq / 2.0)

    if p >= 0.01:
        return print("04 = ", True)
    else:
        return print("04 = ", False)
Example #44
0
def test_schechter_lf_redshift():

    from skypy.galaxy.redshift import schechter_lf_redshift, redshifts_from_comoving_density
    from astropy.cosmology import FlatLambdaCDM
    from scipy.special import gamma, gammaincc

    # fix this cosmology
    cosmo = FlatLambdaCDM(H0=70, Om0=0.3)

    # parameters for the sampling
    z = np.linspace(1e-10, 2., 1000)
    M_star = -20
    phi_star = 1e-3
    alpha = -0.5
    m_lim = 30.
    fsky = 1/41253

    # sample redshifts
    z_gal = schechter_lf_redshift(z, M_star, phi_star, alpha, m_lim, fsky, cosmo, noise=False)

    # the absolute magnitude limit as function of redshift
    M_lim = m_lim - cosmo.distmod(z).value

    # lower limit of unscaled Schechter random variable
    x_min = 10.**(-0.4*(M_lim - M_star))

    # density with factor from upper incomplete gamma function
    density = phi_star*gamma(alpha+1)*gammaincc(alpha+1, x_min)

    # turn into galaxies/surface area
    density *= 4*np.pi*fsky*cosmo.differential_comoving_volume(z).to_value('Mpc3/sr')

    # integrate total number
    n_gal = np.trapz(density, z, axis=-1)

    # make sure noise-free sample has right size
    assert np.isclose(len(z_gal), n_gal, atol=1.0)

    # turn density into CDF
    cdf = density  # same memory
    np.cumsum((density[1:]+density[:-1])/2*np.diff(z), out=cdf[1:])
    cdf[0] = 0
    cdf /= cdf[-1]

    # check distribution of sample
    D, p = kstest(z_gal, lambda z_: np.interp(z_, z, cdf))
    assert p > 0.01, 'D = {}, p = {}'.format(D, p)
Example #45
0
def nonoverlappingtemplatematchingtest(binin, mat="000000001", num=8):
    ''' The focus of this test is the number of occurrences of pre-defined target substrings.
        The purpose of this test is to reject sequences that exhibit too many occurrences of a given non-periodic (aperiodic) pattern.
        For this test and for the Overlapping Template Matching test, an m-bit window is used to search for a specific m-bit pattern.
        If the pattern is not found, the window slides one bit position. For this test, when the pattern is found,
        the window is reset to the bit after the found pattern, and the search resumes.
    '''
    n = len(binin)
    m = len(mat)
    M = n / num
    blocks = [binin[xs * M:M + xs * M:] for xs in xrange(n / M)]
    counts = [xx.count(mat) for xx in blocks]
    avg = 1.0 * (M - m + 1) / 2**m
    var = M * (2**-m - (2 * m - 1) * 2**(-2 * m))
    chisqr = reduce(su, [(xs - avg)**2 for xs in counts]) / var
    pval = spc.gammaincc(1.0 * len(blocks) / 2, chisqr / 2)
    return pval > 0.1
Example #46
0
    def test_igamc(self):
        a_list = np.arange(0.01, 10, 0.05)
        x_list = np.arange(0, 10, 0.1)
        test_params = cartesian([a_list, x_list]).astype(np.float64)

        python_results = np.zeros(test_params.shape[0])
        for ind in range(test_params.shape[0]):
            python_results[ind] = gammaincc(test_params[ind, 0],
                                            test_params[ind, 1])

        opencl_results = igamc().evaluate(
            {
                'a': test_params[:, 0],
                'x': test_params[:, 1]
            }, test_params.shape[0])

        assert_allclose(opencl_results, python_results, atol=1e-7, rtol=1e-7)
def gammap(gamma,
           x):
    '''
    TODO
    
    :param gamma: 
    :param x:
    :return:  
    '''

    if x < (gamma + 1.0):
    
        return gammainc(gamma, x)
    
    else:
    
        return 1.0 - gammaincc(gamma, x)
Example #48
0
  def testPoissonCDFNonIntegerValues(self):
    batch_size = 6
    lam = tf.constant([3.0] * batch_size)
    lam_v = 3.0
    x = np.array([2.2, 3.1, 4., 5.5, 6., 7.], dtype=np.float32)

    poisson = self._make_poisson(rate=lam)
    cdf = poisson.cdf(x)
    self.assertEqual(cdf.get_shape(), (6,))

    # The Poisson CDF should be valid on these non-integer values, and
    # equal to igammac(1 + x, rate).
    self.assertAllClose(self.evaluate(cdf), special.gammaincc(1. + x, lam_v))

    with self.assertRaisesOpError("cannot contain fractional components"):
      poisson_validate = self._make_poisson(rate=lam, validate_args=True)
      self.evaluate(poisson_validate.cdf(x))
Example #49
0
def longest_run_of_ones(n):
    epsilon = get_value('epsilon')
    p_value_dict = get_value('p_value_dict')
    if n < 128:
        p_value = 0.0
    else:
        if n < 6272:
            K = 3
            M = 8
            V = (1, 2, 3, 4)
            pi = (0.2148, 0.3672, 0.2305, 0.1875)
        elif n < 750000:
            K = 5
            M = 128
            V = (4, 5, 6, 7, 8, 9)
            pi = (0.1174, 0.2430, 0.2493, 0.1752, 0.1027, 0.1124)
        else:
            K = 6
            M = 10000
            V = (10, 11, 12, 13, 14, 15, 16)
            pi = (0.0882, 0.2092, 0.2483, 0.1933, 0.1208, 0.0675, 0.0727)
        length_of_a_block = n / M
        v = [0, 0, 0, 0, 0, 0, 0]
        for i in range(length_of_a_block):
            v_obs = 0
            run = 0
            for j in range(M):
                if epsilon[i * M + j] == 1:
                    run += 1
                    v_obs = run if run > v_obs else v_obs
                else:
                    run = 0
            if v_obs <= V[0]:
                v[0] += 1
            for j in range(1, K):
                if v_obs == V[j]:
                    v[j] += 1
            if v_obs >= V[K]:
                v[K] += 1
        chi_squared = 0
        for i in range(K + 1):
            chi_squared += ((v[i] - length_of_a_block * pi[i])**
                            2) / (length_of_a_block * pi[i])
        p_value = gammaincc(K / 2.0, chi_squared / 2.0)
    p_value_dict['longest_run_of_ones'].append(p_value)
    set_value('finished_tests_num', get_value('finished_tests_num') + 1)
Example #50
0
def random_excursions(bin_data):
    # Turn all the binary digits into +1 or -1
    int_data = numpy.zeros(len(bin_data))
    for i in range(len(bin_data)):
        if bin_data[i] == '0':
            int_data[i] = -1.0
        else:
            int_data[i] = 1.0

    # Calculate the cumulative sum
    cumulative_sum = numpy.cumsum(int_data)
    # Append a 0 to the end and beginning of the sum
    cumulative_sum = numpy.append(cumulative_sum, [0])
    cumulative_sum = numpy.append([0], cumulative_sum)

    # These are the states we are going to look at
    x_values = numpy.array([-4, -3, -2, -1, 1, 2, 3, 4])

    # Identify all the locations where the cumulative sum revisits 0
    position = numpy.where(cumulative_sum == 0)[0]
    # For this identify all the cycles
    cycles = []
    for pos in range(len(position) - 1):
        # Add this cycle to the list of cycles
        cycles.append(cumulative_sum[position[pos]:position[pos + 1] + 1])
    num_cycles = len(cycles)

    state_count = []
    for cycle in cycles:
        # Determine the number of times each cycle visits each state
        state_count.append(
            ([len(numpy.where(cycle == state)[0]) for state in x_values]))
    state_count = numpy.transpose(numpy.clip(state_count, 0, 5))

    su = []
    for cycle in range(6):
        su.append([(sct == cycle).sum() for sct in state_count])
    su = numpy.transpose(su)

    piks = ([([get_pik_value(uu, state) for uu in range(6)])
             for state in x_values])
    inner_term = num_cycles * numpy.array(piks)
    chi = numpy.sum(1.0 * (numpy.array(su) - inner_term)**2 / inner_term,
                    axis=1)
    p_values = ([spc.gammaincc(2.5, cs / 2.0) for cs in chi])
    return p_values
Example #51
0
 def non_overlapping_patterns(self,
                              bin_data: str,
                              pattern="000000001",
                              num_blocks=8):
     """
     Note that this description is taken from the NIST documentation [1]
     [1] http://csrc.nist.gov/publications/nistpubs/800-22-rev1a/SP800-22rev1a.pdf
     The focus of this test is the number of occurrences of pre-specified target strings. The purpose of this
     test is to detect generators that produce too many occurrences of a given non-periodic (aperiodic) pattern.
     For this test and for the Overlapping Template Matching test of Section 2.8, an m-bit window is used to
     search for a specific m-bit pattern. If the pattern is not found, the window slides one bit position. If the
     pattern is found, the window is reset to the bit after the found pattern, and the search resumes.
     :param bin_data: a binary string
     :param pattern: the pattern to match to
     :return: the p-value from the test
     """
     n = len(bin_data)
     pattern_size = len(pattern)
     block_size = math.floor(n / num_blocks)
     pattern_counts = np.zeros(num_blocks)
     # For each block in the data
     for i in range(num_blocks):
         block_start = i * block_size
         block_end = block_start + block_size
         block_data = bin_data[block_start:block_end]
         # Count the number of pattern hits
         j = 0
         while j < block_size:
             sub_block = block_data[j:j + pattern_size]
             if sub_block == pattern:
                 pattern_counts[i] += 1
                 j += pattern_size
             else:
                 j += 1
     # Calculate the theoretical mean and variance
     mean = (block_size - pattern_size + 1) / pow(2, pattern_size)
     var = block_size * ((1 / pow(2, pattern_size)) -
                         (((2 * pattern_size) - 1) /
                          (pow(2, pattern_size * 2))))
     # Calculate the Chi Squared statistic for these pattern matches
     chi_squared = 0
     for i in range(num_blocks):
         chi_squared += pow(pattern_counts[i] - mean, 2.0) / var
     # Calculate and return the p value statistic
     p_val = spc.gammaincc(num_blocks / 2, chi_squared / 2)
     return p_val
Example #52
0
def g_huber2(t, r, qH=0.8, cH=None, bH=None, aH=None):
    """
    Computes g(t) of the Huber distribution
    
    Possible input combinations:
        t, r
        t, r, qH
        t, r, cH, bH, aH . This option is provided to improve performance
        because it allows to avoid the calculation of the constants cH, bH and
        aH in every loop iteration
    
    Args:
        t : 1darray of size N, squared Mahalanobis distances
        r : int, dimension
        qH : float, tuning parameter, standard value 0.8, choose qH > 0.701
        cH : float, tuning parameter
        bH : float, tuning parameter
        aH : float, tuning parameter
        
    Returns:
        g: 1darray of size N, g(t) of Huber distribution
        
    Raises:
        ValueError: If incorrect combination of inputs
    """

    if sum([s is None for s in [cH, bH, aH]]) != 3 and sum(
        [s is None for s in [cH, bH, aH]]) != 0:
        raise ValueError("Incorrect combination of inputs")

    igamma = lambda a, b: gammaincc(a, b) * gamma(a)

    if sum([s is None for s in [cH, bH, aH]]) == 3:
        cH = np.sqrt(chi2.ppf(qH, r))
        bH = chi2.cdf(cH**2, r + 2) + cH**2 / r * (1 - chi2.cdf(cH**2, r))
        aH = gamma(r / 2) / np.pi**(r / 2) / (
            (2 * bH)**(r / 2) * (gamma(r / 2) - igamma(r / 2, cH**2 /
                                                       (2 * bH))) +
            (2 * bH * cH**r * np.exp(-cH**2 / (2 * bH))) / (cH**2 - bH * r))

    g = np.zeros(len(t))
    g[t <= cH**2] = aH * np.exp(-t[t <= cH**2] / (2 * bH))
    g[t > cH**2] = aH * (np.exp(1) * t[t > cH**2] / cH**2)**(-cH**2 / (2 * bH))

    return g
Example #53
0
def overlappingtemplatematchingtest(binin,mat="111111111",num=1032,numi=9):
    ''' The focus of this test is the number of pre-defined target substrings. The purpose of this test is to reject sequences that show deviations from the expected number of runs of ones of a given length. Note that when there is a deviation from the expected number of ones of a given length, there is also a deviation in the runs of zeroes. Runs of zeroes were not evaluated separately due to a concern about statistical independence among the tests. For this test and for the Non-overlapping Template Matching test, an m-bit window is used to search for a specific m-bit pattern. If the pattern is not found, the window slides one bit position. For this test, when the pattern is found, the window again slides one bit, and the search is resumed.'''
    n = len(binin)
    bign = int(n / num)
    m = len(mat)
    lamda = 1.0 * (num - m + 1) / 2 ** m
    eta = 0.5 * lamda
    pi = [pr(i, eta) for i in range(numi)]
    pi.append(1 - reduce(su, pi))
    v = [0 for x in range(numi + 1)]
    blocks = stringpart(binin, num)
    blocklen = len(blocks[0])
    counts = [occurances(i,mat) for i in blocks]
    counts2 = [(numi if xx > numi else xx) for xx in counts]
    for i in counts2: v[i] = v[i] + 1
    chisqr = reduce(su, [(v[i]-bign*pi[i])** 2 / (bign*pi[i]) for i in range(numi + 1)])
    pval = spc.gammaincc(0.5*numi, 0.5*chisqr)
    return pval
Example #54
0
  def testPoissonCDFNonIntegerValues(self):
    with self.test_session():
      batch_size = 6
      lam = tf.constant([3.0] * batch_size)
      lam_v = 3.0
      x = np.array([2.2, 3.1, 4., 5.5, 6., 7.], dtype=np.float32)

      poisson = self._make_poisson(rate=lam)
      cdf = poisson.cdf(x)
      self.assertEqual(cdf.get_shape(), (6,))

      # The Poisson CDF should be valid on these non-integer values, and
      # equal to igammac(1 + x, rate).
      self.assertAllClose(cdf.eval(), special.gammaincc(1. + x, lam_v))

      with self.assertRaisesOpError("cannot contain fractional components"):
        poisson_validate = self._make_poisson(rate=lam, validate_args=True)
        poisson_validate.cdf(x).eval()
Example #55
0
def overlappingtemplatematchingtest(binin, mat="111111111", num=1032, numi=5):
    ''' The focus of this test is the number of pre-defined target substrings. The purpose of this test is to reject sequences that show deviations from the expected number of runs of ones of a given length. Note that when there is a deviation from the expected number of ones of a given length, there is also a deviation in the runs of zeroes. Runs of zeroes were not evaluated separately due to a concern about statistical independence among the tests. For this test and for the Non-overlapping Template Matching test, an m-bit window is used to search for a specific m-bit pattern. If the pattern is not found, the window slides one bit position. For this test, when the pattern is found, the window again slides one bit, and the search is resumed.'''
    n = len(binin)
    bign = int(n / num)
    m = len(mat)
    lamda = 1.0 * (num - m + 1) / 2 ** m
    eta = 0.5 * lamda
    pi = [pr(i, eta) for i in range(numi)]
    pi.append(1 - reduce(su, pi))
    v = [0 for x in range(numi + 1)]
    blocks = stringpart(binin, num)
    blocklen = len(blocks[0])
    counts = [occurances(i, mat) for i in blocks]
    counts2 = [(numi if xx > numi else xx) for xx in counts]
    for i in counts2: v[i] = v[i] + 1
    chisqr = reduce(su, [(v[i] - bign * pi[i]) ** 2 / (bign * pi[i]) for i in range(numi + 1)])
    pval = spc.gammaincc(0.5 * numi, 0.5 * chisqr)
    return pval
Example #56
0
def longestrunonestest(binin):
    ##
    ## Focus:
    ##  test the longest run of ones within M-bit blocks.
    ##
    ## Purpose:
    ##  determine whether the length of the longest run of ones
    ##  within the tested sequence is consistent with the length
    ##  of the longest run of ones that would be expected in a
    ##  random sequence. Note that an irregularity in the expected
    ##  length of the longest run of ones implies that there is also
    ##  an irregularity in the expected length of the longest run of
    ##  zeroes. Long runs of zeroes were not evaluated separately due
    ##  to a concern about statistical independence among the tests.
    ##
    ## Outcome:
    ##  If the computed P-value is < 0.01, then conclude that the
    ##  sequence is non-random. Otherwise, conclude that the sequence
    ##  is random.
    ##

    m = 8
    k = 3
    pik = [0.2148, 0.3672, 0.2305, 0.1875]
    blocks = [binin[xs * m:m + xs * m:] for xs in xrange(len(binin) / m)]
    n = len(blocks)

    ##
    ## append the string 01 to guarantee the length of 1
    ##
    counts1 = [xs + '01' for xs in blocks]

    ##
    ## split into all parts
    ##
    counts = [xs.replace('0', ' ').split() for xs in counts1]
    counts2 = [map(len, xx) for xx in counts]
    counts4 = [(4 if xx > 4 else xx) for xx in map(max, counts2)]
    freqs = [counts4.count(spi) for spi in [1, 2, 3, 4]]
    chisqr1 = [(freqs[xx] - n * pik[xx])**2 / (n * pik[xx])
               for xx in xrange(4)]
    chisqr = reduce(Util.su, chisqr1)
    pval = spc.gammaincc(k / 2.0, chisqr / 2.0)
    return pval
Example #57
0
def inc_gamma(s, x):
    r"""Calculate the (upper) incomplete gamma function.

    Given by: :math:`\Gamma(s,x) = \int_x^{\infty} t^{s-1}\,e^{-t}\,{\rm d}t`

    Parameters
    ----------
    s : :class:`float`
        exponent in the integral
    x : :class:`numpy.ndarray`
        input values
    """
    if np.isclose(s, 0):
        return sps.exp1(x)
    if np.isclose(s, np.around(s)) and s < -0.5:
        return x**(s - 1) * sps.expn(int(1 - np.around(s)), x)
    if s < 0:
        return (inc_gamma(s + 1, x) - x**s * np.exp(-x)) / s
    return sps.gamma(s) * sps.gammaincc(s, x)
Example #58
0
def BlockFrequency(datalist, blocksize=1000, alpha=0.01):
    def get01Sequence(data):
        return [0 if e < 0.5 else 1 for e in data]

    def PropotionOneInList(data):
        return 1.0 * sum(data) / len(data)

    passcnt = 0
    for data in datalist:
        sequence01 = get01Sequence(data)
        block_num = int(len(sequence01) / blocksize)
        # calc chisq for all blocks
        chisq_obs = 0.0
        for i in range(block_num):
            pi_i = PropotionOneInList(sequence01[(i * blocksize):(i + 1) *
                                                 blocksize])
            chisq_obs += 4.0 * blocksize * (pi_i - 0.5)**2
        pvalue = spc.gammaincc(block_num / 2.0, chisq_obs / 2.0)
    return chisq_obs, pvalue
Example #59
0
def longestrunones128(binin):  # not well tested yet
    if len(binin) > 128:
        m = 128
        k = 5
        n = len(binin)
        pik = [ 0.1174, 0.2430, 0.2493, 0.1752, 0.1027, 0.1124 ]
        blocks = [binin[xs * m:m + xs * m:] for xs in range(len(binin) / m)]
        n = len(blocks)
        counts = [xs.replace('0', ' ').split() for xs in blocks]
        counts2 = [list(map(len, xx)) for xx in counts]
        counts3 = [(1 if xx < 1 else xx) for xx in map(max, counts2)]
        counts4 = [(4 if xx > 4 else xx) for xx in counts3]
        chisqr1 = [(counts4[xx] - n * pik[xx]) ** 2 / (n * pik[xx]) for xx in range(len(counts4))]
        chisqr = reduce(su, chisqr1)
        pval = spc.gammaincc(k / 2.0, chisqr / 2.0)
    else:
        print('longestrunones128 failed, too few bits:', len(binin))
        pval = 0
    return pval
    def plot_2MRS_sens_Mertsch(self, correct_lumi=1, **kwargs):
        sec_to_year = 3600*24*365.
        Nbgd = 30                                       # events
        mu = 2.9                                          # events
        zc=0.02                                         # w/o units
        dOmega = 4*np.pi                                # sr
        sigma = np.sqrt(2e-2)                           # sr
        c_H0 = 299792.458/(0.71 * 100)                  # Mpc
        dm1 = lambda L: 180 * np.sqrt(L/1e42)           # Mpc, give L in erg / s
        lambda_c = lambda L: (dm1(L) / (zc*c_H0))**2    # w/o units
        h = lambda L: dOmega * dm1(L)**3. * gamma(mu-3./2.) / (3.*gamma(mu))
        nominator = lambda L: gamma(mu) + lambda_c(L)**(3./2.)*gammaincc(mu-3./2., lambda_c(L))*gamma(mu-3./2)-gammaincc(mu, lambda_c(L))*gamma(mu)
        f = lambda L: dOmega * dm1(L)**3. * nominator(L)/(3*lambda_c(L)**(3./2.)*gamma(mu))
        sqrt_term = lambda L, TSp: np.sqrt(1 + (4*Nbgd*f(L)**2 * (4*np.pi*sigma)**2/(TSp * h(L)**2)))
        rho = lambda L, TSp: TSp* h(L) / (2*f(L)**2 * (4*np.pi*sigma)**2) * (1+sqrt_term(L, TSp))

        lumis = np.logspace(40,44, 101)
        handle, = plt.loglog(lumis*sec_to_year, rho(lumis, 0.45), color="k", ls=":")  #  Eq. 2.17, assuming TS(p)=0.45
        return handle, "arXiv:1612.07311\ncorrelation with 2MRS"