Пример #1
0
 def getActuallyKnownRatio(self, ct=None):
     if ct==None:
         ct=time()
     tot=sum(1 for v,t in self.actuallyKnownHist if t>ct-actuallyKnownTime)
     actuallyKnown=sum(1 for v,t in self.actuallyKnownHist if t>ct-actuallyKnownTime and v)
     if tot==0:
         return 0., 0., 1.
     else:
         r=float(actuallyKnown)/tot
         alpha=1-conf
         return (r,0. if actuallyKnown==0 else btdtri(actuallyKnown,tot-actuallyKnown+1,alpha/2),
                 1. if actuallyKnown==tot else btdtri(actuallyKnown+1,tot-actuallyKnown,1-alpha/2))
Пример #2
0
    def ppf(self, x):
        """
        Computes the percent point function of the distribution at the point(s)
        x. It is defined as the inverse of the CDF. y = ppf(x) can be
        interpreted as the argument y for which the value of the cdf(x) is equal
        to y. Essentially that means the random varable y is the place on the
        distribution the CDF evaluates to x.

        Parameters
        ----------
        x: array, dtype=float, shape=(m x n), bounds=(0,1)
            The value(s) at which the user would like the ppf evaluated.
            If an array is passed in, the ppf is evaluated at every point
            in the array and an array of the same size is returned.

        Returns
        -------
        ppf: array, dtype=float, shape=(m x n)
            The ppf at each point in x.
        """
        if (x <= 0).any() or (x >= 1).any():
            raise ValueError('all values in x must be between 0 and 1, \
                             exclusive')
        ppf = btdtri(self.alpha, self.beta, x)

        return ppf
Пример #3
0
def probf_baharev(df1, df2, noncen, fcrit):
    x = 1 - special.btdtri(df1, df2, fcrit)
    eps = 1.0e-7
    itr_cnt = 0
    f = None

    while itr_cnt <= 10:
        mu = noncen / 2.0
        ql = poisson.ppf(eps, mu)
        qu = poisson.ppf(1 - eps, mu)
        k = qu
        c = beta.cdf(x, df1 + k, df2)
        d = x * (1.0 - x) / (df1 + k - 1.0) * beta.pdf(x, df1 + k - 1, df2, 0)
        p = poisson.pmf(k, mu)
        f = p * c
        p = k / mu * p

        k = qu - 1
        while k >= ql:
            c = c + d
            d = (df1 + k) / (x * (df1 + k + df2 - 1)) * d
            f = f + p * c
            p = k / mu * p
            k = k - 1
        itr_cnt = itr_cnt + 1

    if (itr_cnt == 11):
        print("newton iteration failed")

    return f
Пример #4
0
    def ppf(self, x):
        """
        Computes the percent point function of the distribution at the point(s)
        x. It is defined as the inverse of the CDF. y = ppf(x) can be
        interpreted as the argument y for which the value of the cdf(x) is equal
        to y. Essentially that means the random varable y is the place on the
        distribution the CDF evaluates to x.

        Parameters
        ----------
        x: array, dtype=float, shape=(m x n), bounds=(0,1)
            The value(s) at which the user would like the ppf evaluated.
            If an array is passed in, the ppf is evaluated at every point
            in the array and an array of the same size is returned.

        Returns
        -------
        ppf: array, dtype=float, shape=(m x n)
            The ppf at each point in x.
        """
        if (x <=0).any() or (x >=1).any():
            raise ValueError('all values in x must be between 0 and 1, \
                             exclusive')
        ppf = btdtri(self.alpha, self.beta, x)

        return ppf
def main():
    parcellations = putils.get_cammoun_schaefer(data_dir=ROIDIR)

    # output dataframe
    cols = ['parcellation', 'scale', 'spintype', 'n_sig']
    data = pd.DataFrame(columns=cols)

    # let's run all our parcellations
    for parcellation, annotations in parcellations.items():
        print(f'PARCELLATION: {parcellation}')
        for scale in annotations:
            for spintype in SPINTYPES:
                data = data.append(run_null(parcellation, scale, spintype),
                                   ignore_index=True)

            # now calculate parametric null
            nsdata = load_data(parcellation, scale, spintype)
            corrs = np.corrcoef(nsdata.T)
            # this calculates the correlation value for p < ALPHA cutoff
            ab = (len(nsdata) / 2) - 1
            cutoff = 1 - (special.btdtri(ab, ab, ALPHA / 2) * 2)
            # now add the parametric null to our giant summary dataframe
            data = data.append(pd.DataFrame({
                'parcellation': parcellation,
                'scale': scale,
                'spintype': 'naive-para',
                'n_sig': np.sum(np.triu(corrs > cutoff, k=1))
            }, index=[0]), ignore_index=True)

    data.to_csv(NSDIR / 'ns_summary.csv.gz', index=False)
Пример #6
0
 def _ppf(self, rho, a, b, p, q):  
     # subclass the _ppf method (returns the inverse cdf of the
     # beinf distribution). NOTE: This is not a true inverse
     # when p!=0 since the same SIC value can be returned
     # for a range of probabilities at the endpoints
     if np.all(p==1):
         gamma_ = 0.0
     else:
         gamma_ = (rho - p*(1-q))/(1-p)
         
     condlist=[np.logical_and(rho>=0., rho<=p*(1-q)), 
           np.logical_and(rho>p*(1-q),rho<(1-p*q)),
           np.logical_and(rho>=1-p*q, rho<=1.)]
           
     choicelist=[0.0, special.btdtri(a,b,gamma_), 1.0]
           
     return np.select(condlist, choicelist)
Пример #7
0
 def _ppf(self, q, a, b):
     return special.btdtri(a, b, q)
Пример #8
0
def efficiency_err(group, threshold, PUWP, ISOWP, upper=False):
    tot = group.shape[0]

    if PUWP == '99':
        if ISOWP == '10':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP10 == True) &
                        (group.cl3d_pubdt_passWP99 == True)].shape[0]
        elif ISOWP == '15':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP15 == True) &
                        (group.cl3d_pubdt_passWP99 == True)].shape[0]
        elif ISOWP == '20':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP20 == True) &
                        (group.cl3d_pubdt_passWP99 == True)].shape[0]
        elif ISOWP == '90':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP90 == True) &
                        (group.cl3d_pubdt_passWP99 == True)].shape[0]
        elif ISOWP == '95':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP95 == True) &
                        (group.cl3d_pubdt_passWP99 == True)].shape[0]
        else:
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP99 == True) &
                        (group.cl3d_pubdt_passWP99 == True)].shape[0]
    elif PUWP == '95':
        if ISOWP == '10':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP10 == True) &
                        (group.cl3d_pubdt_passWP95 == True)].shape[0]
        elif ISOWP == '15':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP15 == True) &
                        (group.cl3d_pubdt_passWP95 == True)].shape[0]
        elif ISOWP == '20':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP20 == True) &
                        (group.cl3d_pubdt_passWP95 == True)].shape[0]
        elif ISOWP == '90':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP90 == True) &
                        (group.cl3d_pubdt_passWP95 == True)].shape[0]
        elif ISOWP == '95':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP95 == True) &
                        (group.cl3d_pubdt_passWP95 == True)].shape[0]
        else:
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP99 == True) &
                        (group.cl3d_pubdt_passWP95 == True)].shape[0]
    else:
        if ISOWP == '10':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP10 == True) &
                        (group.cl3d_pubdt_passWP90 == True)].shape[0]
        elif ISOWP == '15':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP15 == True) &
                        (group.cl3d_pubdt_passWP90 == True)].shape[0]
        elif ISOWP == '20':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP20 == True) &
                        (group.cl3d_pubdt_passWP90 == True)].shape[0]
        elif ISOWP == '90':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP90 == True) &
                        (group.cl3d_pubdt_passWP90 == True)].shape[0]
        elif ISOWP == '95':
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP95 == True) &
                        (group.cl3d_pubdt_passWP90 == True)].shape[0]
        else:
            sel = group[(group.cl3d_pt_c3 > threshold)
                        & (group.cl3d_isobdt_passWP99 == True) &
                        (group.cl3d_pubdt_passWP90 == True)].shape[0]

    # clopper pearson errors --> ppf gives the boundary of the cinfidence interval, therefore for plotting we have to subtract the value of the central value float(sel)/float(tot)!!
    alpha = (1 - 0.9) / 2
    if upper:
        if sel == tot:
            return 0.
        else:
            return abs(
                btdtri(sel + 1, tot - sel, 1 - alpha) -
                float(sel) / float(tot))
    else:
        if sel == 0:
            return 0.
        else:
            return abs(
                float(sel) / float(tot) - btdtri(sel, tot - sel + 1, alpha))
Пример #9
0
 def _ppf(self, qloc, a, b):
     return special.btdtri(a, b, qloc)
Пример #10
0
 def quantile(self, p):
     return btdtri(
         self.N[1], self.N[0],
         p)  # Bug: do not call btdtri with (0.5,0.5,0.5) in scipy < 0.9
Пример #11
0
 def quantile(self, p):
     return btdtri(self.N[1], self.N[0], p) # Bug: do not call btdtri with (0.5,0.5,0.5) in scipy < 0.9
Пример #12
0
 def _ppf(self, q):
     a, b = np.loadtxt(os.path.join(FILE_DIR, 'distr_par.txt'), delimiter = ',')
     return special.btdtri(a, b, q)
Пример #13
0
 def quantile(self, p):
     return btdtri(self.N[1], self.N[0], p)
Пример #14
0
    def quantile(self, *q):
        # check array for numpy structure
        q = check_array(q, reduce_args=True, ensure_1d=True)

        return sc.btdtri(self.alpha, self.beta, q)
Пример #15
0
 def quantile(self, p):
     return btdtri(self.params[1], self.params[0], p)
Пример #16
0
def btdtri_comp(a, b, p):
    return btdtri(a, b, 1-p)
Пример #17
0
def btdtri_comp(a, b, p):
    return btdtri(a, b, 1-p)
Пример #18
0
    def quantile(self, p):
        """Return the p quantile of the Beta posterior (using :func:`scipy.stats.btdtri`).

        - Used only by :class:`BayesUCB` and :class:`AdBandits` so far.
        """
        return btdtri(self.N[1], self.N[0], p)