def Basis1(self, P, gamma):
     """
     目的:許容値の計算(平均値,標準偏差ともに未知)
     """
     up = norm.ppf(1 - P)
     self.k = nct.ppf(1 - gamma, self.n - 1,
                      np.sqrt(self.n) * up) / np.sqrt(self.n)
     return self.mu - self.k * self.sigm
示例#2
0
def qt(p,df,ncp=0):
    """
    Calculates the quantile function of the t-distribution
    """
    from scipy.stats import t,nct
    if ncp==0:
        result=t.ppf(q=p,df=df,loc=0,scale=1)
    else:
        result=nct.ppf(q=p,df=df,nc=ncp,loc=0,scale=1)
    return result
示例#3
0
def k_pc(p,c,n):
    # Compute the knockdown factor for a basis value,
    # assuming normally distributed data
    #
    # Usage
    #   k = k_pc(p,c,n)
    # Arguments
    #   p = Desired population fraction (B: 0.90, A: 0.99)
    #   c = Desired confidence level    (B: 0.95, A: 0.95)
    #   n = Number of samples
    # Returns
    #   k = Knockdown factor; B = \hat{X} - k * S

    return nct.ppf(c,n-1,-norm.ppf(1-p)*sqrt(n)) / sqrt(n)
示例#4
0
def get_tolerance(xdata, ydata, SI, rmse):
    """function to obtain the confidence interval and the tolerance interval for scatter index and RMSE between
    observations and model data"""

    # Two-sided inverse Students t-distribution
    # p - probability, df - degrees of freedom
    tinv = lambda p, df: abs(t.ppf(p / 2, df))
    ts = tinv(0.05, len(xdata) - 2)  # 95% confidence

    # confidence intervals; Average t*Stdev*(1/sqrt(n))
    #plt.plot(axlims,[fitvalmin+ts*fitvalmin,fitvalmax+ts*fitvalmax],'k..', linewidth=1, label='95% confidence')
    print('[Information] The 95% confidence interval (scatter index) is = ' +
          str(SI * ts))
    print('[Information] The 95% confidence interval (RMSD) is = ' +
          str(ts * rmse))
    # prediction intervals; t*StDev*(sqrt(1+(1/n)))

    # Tolerance intervals; Average k*StDev
    # sample size
    n = len(xdata)
    # Percentile for the TI to estimate
    p = 0.99
    # confidence level
    g = 0.95
    # (100*p)th percentile of the standard normal distribution
    zp = norm.ppf(p)
    # gth quantile of a non-central t distribution
    # with n-1 degrees of freedom and non-centrality parameter np.sqrt(n)*zp
    tt = nct.ppf(g, df=n - 1., nc=np.sqrt(n) * zp)
    # k factor from Young et al paper
    k = tt / np.sqrt(n)

    print(
        '[Information] The 99% tolerance interval for a 95% confidence (scatter index) is = '
        + str(k * SI))
    print(
        '[Information] The 99% tolerance interval for a 95% confidence (RMSD) is = '
        + str(k * rmse))

    return SI * ts, ts * rmse, k * SI, k * rmse
示例#5
0
def K_scipy(p, g, n):
	from scipy.stats import nct
	return nct.ppf(1-g, n-1, sqrt(n) * x_scipy(p)) / sqrt(n)
示例#6
0
def fK_pc(p,c,n):
    return nct.ppf(c,n-1,-norm.ppf(1-p)*np.sqrt(n)) / np.sqrt(n)
示例#7
0
from scipy.stats import nct
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)

# Calculate a few first moments:

df, nc = 14, 0.24
mean, var, skew, kurt = nct.stats(df, nc, moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(nct.ppf(0.01, df, nc), nct.ppf(0.99, df, nc), 100)
ax.plot(x, nct.pdf(x, df, nc), 'r-', lw=5, alpha=0.6, label='nct pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = nct(df, nc)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = nct.ppf([0.001, 0.5, 0.999], df, nc)
np.allclose([0.001, 0.5, 0.999], nct.cdf(vals, df, nc))
# True

# Generate random numbers:
示例#8
0
def K_scipy(p, g, n):
    from scipy.stats import nct
    return nct.ppf(1 - g, n - 1, sqrt(n) * x_scipy(p)) / sqrt(n)
示例#9
0
def fK_pc(p, c, n):
    """ Basis value knockdown factor for normally distributed random variables."""
    return nct.ppf(c, n - 1, -norm.ppf(1 - p) * np.sqrt(n)) / np.sqrt(n)
示例#10
0
def ksingle(p, c, n):
    """
    Compute statistical k-factor for a single-sided tolerance limit.

    Parameters
    ----------
    p : scalar or array_like; real
        Portion of population to bound; 0 < p < 1
    c : scalar or array_like; real
        Probability level (confidence); 0 < c < 1
    n : scalar or array_like; integer
        Number of observations in sample; n > 1

    Returns
    -------
    k : scalar or ndarray; real
        The statistical k-factor for a single-sided tolerance
        limit.

    Notes
    -----
    The inputs `p`, `c`, and `n` must be broadcast-compatible.

    The k-factor allows the computation of a tolerance bound that has
    the probability `c` of bounding at least the proportion `p` of the
    population.  The statistics are based on having a sample of a
    normally distributed population; `n` is the number of observations
    in the sample.

    The tolerance bound is computed by::

        bound = m + k std  (or  bound = m - k std)

    where `m` is the sample mean and `std` is the sample standard
    deviation.

    .. note::
        The math behind this routine is covered in the pyYeti
        :ref:`tutorial`: :doc:`/tutorials/flight_data_statistics`.
        There is also a link to the source Jupyter notebook at the top
        of the tutorial.

    See also
    --------
    :func:`kdouble`

    Examples
    --------
    Assume we have 21 samples. Determine the k-factor to have a 90%
    probability of bounding 99% of the population. In other words, we
    need the 'P99/90' single-sided k-factor for N = 21.  (From table:
    N = 21, k = 3.028)

    >>> from pyyeti.stats import ksingle
    >>> ksingle(.99, .90, 21)                # doctest: +ELLIPSIS
    3.0282301090342...

    Make a table of single-sided k-factors using 50% confidence. The
    probabilities will be: 95%, 97.725%, 99% and 99.865%. Number of
    samples will be: 2-10, 1000000. Have `n` define the rows and `p`
    define the columns:

    >>> import numpy as np
    >>> from pandas import DataFrame
    >>> n = [[i] for i in range(2, 11)]  # create list of lists
    >>> n.append([1000000])
    >>> p = [.95, .97725, .99, .99865]
    >>> table = ksingle(p, .50, n)
    >>> DataFrame(table, index=[i[0] for i in n], columns=p)
              0.95000   0.97725   0.99000   0.99865
    2        2.338727  2.880624  3.375968  4.391208
    3        1.938416  2.369068  2.764477  3.579188
    4        1.829514  2.231482  2.600817  3.362580
    5        1.779283  2.168283  2.525770  3.263359
    6        1.750462  2.132099  2.482840  3.206631
    7        1.731792  2.108690  2.455081  3.169958
    8        1.718720  2.092314  2.435669  3.144318
    9        1.709060  2.080220  2.421337  3.125390
    10       1.701632  2.070925  2.410323  3.110845
    1000000  1.644854  2.000003  2.326348  2.999978
    """
    n = np.asarray(n)
    sn = np.sqrt(n)
    pnonc = sn * norm.ppf(p)
    return nct.ppf(c, n - 1, pnonc) / sn
示例#11
0
def normal(x, p, g):
    r"""
    Compute one-side tolerance bound using the normal distribution.

    Computes the one-sided tolerance interval using the normal distribution.
    This follows the derivation in [1] to calculate the interval as a factor
    of sample standard deviations away from the sample mean. See also [2].

    Parameters
    ----------
    x : ndarray (1-D, or 2-D)
        Numpy array of samples to compute the tolerance bound. Assumed data
        type is np.float. Shape of (m, n) is assumed for 2-D arrays with m
        number of sets of sample size n.
    p : float
        Percentile for the TI to estimate.
    g : float
        Confidence level where g > 0. and g < 1.

    Returns
    -------
    ndarray (1-D)
        The normal distribution toleranace bound.

    References
    ----------
    [1] Young, D. S. (2010). tolerance: An R Package for Estimating
        Tolerance Intervals. Journal of Statistical Software; Vol 1, Issue 5
        (2010). Retrieved from http://dx.doi.org/10.18637/jss.v036.i05

    [2] Montgomery, D. C., & Runger, G. C. (2018). Chapter 8. Statistical
        Intervals for a Single Sample. In Applied Statistics and Probability
        for Engineers, 7th Edition.

    Examples
    --------
    Estimate the 10th percentile lower bound with 95% confidence of the
    following 100 random samples from a normal distribution.

    >>> import numpy as np
    >>> import toleranceinterval as ti
    >>> x = np.random.nomral(100)
    >>> lb = ti.oneside.normal(x, 0.1, 0.95)

    Estimate the 90th percentile upper bound with 95% confidence of the
    following 100 random samples from a normal distribution.

    >>> ub = ti.oneside.normal(x, 0.9, 0.95)

    """
    x = numpy_array(x)  # check if numpy array, if not make numpy array
    x = assert_2d_sort(x)
    m, n = x.shape
    if p < 0.5:
        p = 1.0 - p
        minus = True
    else:
        minus = False
    zp = norm.ppf(p)
    t = nct.ppf(g, df=n - 1., nc=np.sqrt(n) * zp)
    k = t / np.sqrt(n)
    if minus:
        return x.mean(axis=1) - (k * x.std(axis=1, ddof=1))
    else:
        return x.mean(axis=1) + (k * x.std(axis=1, ddof=1))