Example #1
0
    def ppf(self, x):
        """
        Computes the percent point function of the distribution at the point(s)
        x. It is defined as the inverse of the CDF. y = ppf(x) can be
        interpreted as the argument y for which the value of the cdf(x) is equal
        to y. Essentially that means the random varable y is the place on the
        distribution the CDF evaluates to x.

        Parameters
        ----------
        x: array, dtype=float, shape=(m x n), bounds=(0,1)
            The value(s) at which the user would like the ppf evaluated.
            If an array is passed in, the ppf is evaluated at every point
            in the array and an array of the same size is returned.

        Returns
        -------
        ppf: array, dtype=float, shape=(m x n)
            The ppf at each point in x.
        """
        if (x <= 0).any() or (x >= 1).any():
            raise ValueError('all values in x must be between 0 and 1, \
                             exclusive')
        ppf = stdtrit(self.nu, x)

        return ppf
Example #2
0
def WriteFilteredGauss(self):
    '''Write the Gaussian-derived J/V data derived from the filtered input data
        according to the cutoffs specified by the user.'''
    _fn = os.path.join(self.opts.out_dir,
                       self.opts.outfile + "_filteredGauss.txt")
    with open(_fn, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, dialect='JV')
        writer.writerow([
            "Potential (V)", "Log|J|", "Standard Deviation",
            "Standard Error of the Mean",
            "%s%% confidence interval" % (100 * (1 - self.opts.alpha))
        ])

        for x in self.XY:
            _sem = self.XY[x]['filtered_hist']['std'] / np.sqrt(
                self.opts.degfree - 1 or 1)
            writer.writerow([
                '%f' % x,
                '%0.4f' % self.XY[x]['filtered_hist']['mean'],
                '%0.4f' % self.XY[x]['filtered_hist']['std'],
                '%0.4f' % _sem,
                '%0.4f' %
                (_sem *
                 stdtrit(self.opts.degfree - 1 or 1, 1 - self.opts.alpha))
            ])
Example #3
0
def k_factor(df=inf, p=95):
    """Return the a coverage factor for an uncertainty interval

    :arg df: the degrees-of-freedom (>1)
    :arg p: the coverage probability (%)
    :type df: float
    :type p: int or float

    Evaluates the coverage factor for an uncertainty interval
    with coverage probability ``p`` and degrees-of-freedom ``df``
    based on the Student t-distribution. 
    
    **Example**::
    
        >>> reporting.k_factor(3)
        3.182446305284263

    """
    if p <= 0.0 or p >= 100.0:
        raise RuntimeError("invalid p: {}".format(p))

    p = (1.0 + p / 100.0) / 2.0

    if df > inf_dof:
        # inverse cumulative Gaussian distribution
        return special.ndtri(p)
    elif df >= 1:
        # inverse cumulative Student-t distribution
        return special.stdtrit(df, p)
    else:
        raise RuntimeError("invalid df: {}".format(df))
Example #4
0
def main():
    nobs = 1. / (1. / nobs1 + 1. / nobs2)

    crit_upp = stats.t.isf(alpha / 2, df)
    print('crit_upp', crit_upp)
    crit_low_ppf = stats.t.ppf(alpha / 2, df)
    print('crit_low_ppf', crit_low_ppf)
    crit_low = special.stdtrit(df, alpha / 2)
    print('crit_low', crit_low)

    pow_upp = stats.nct._sf(crit_upp, df, effect_size * np.sqrt(nobs))
    print('pow_upp', pow_upp)
    pow_low = stats.nct._cdf(crit_low, df, effect_size * np.sqrt(nobs))
    print('pow_low', pow_low)

    pow = pow_upp + pow_low
    print('pow', pow)

    # Final
    analysis = TTestIndPower()
    observed_power = analysis.solve_power(effect_size,
                                          nobs1=nobs1,
                                          alpha=alpha,
                                          alternative='larger')
    print('observed_power', observed_power)
Example #5
0
    def ppf(self, x):
        """
        Computes the percent point function of the distribution at the point(s)
        x. It is defined as the inverse of the CDF. y = ppf(x) can be
        interpreted as the argument y for which the value of the cdf(x) is equal
        to y. Essentially that means the random varable y is the place on the
        distribution the CDF evaluates to x.

        Parameters
        ----------
        x: array, dtype=float, shape=(m x n), bounds=(0,1)
            The value(s) at which the user would like the ppf evaluated.
            If an array is passed in, the ppf is evaluated at every point
            in the array and an array of the same size is returned.

        Returns
        -------
        ppf: array, dtype=float, shape=(m x n)
            The ppf at each point in x.
        """
        if (x <= 0).any() or (x >= 1).any():
            raise ValueError(
                "all values in x must be between 0 and 1, \
                             exclusive"
            )
        ppf = stdtrit(self.nu, x)

        return ppf
Example #6
0
 def compute_confidence_interval_global_mean(self, interval_size=0.95):
     sample_mean = self.compute_sample_mean()
     sample_variance = self.compute_sample_variance()
     radius = int(
         stdtrit(self.df.size - 1, 1 - (1 - interval_size) / 2) *
         np.sqrt(sample_variance / self.df.size))
     return np.asarray([sample_mean - radius, sample_mean + radius])
Example #7
0
	def get_confidence_interval(self,alpha=0.05):
		#if len(self.result) >= 30:
		n = len(self.result)
		tvalue = stdtrit(n,1-alpha)
		mu = self.get_mean()
		s = sqrt(self.get_var())
		return (mu - tvalue*s/sqrt(n),mu + tvalue*s/sqrt(n))
		"""
def test_stdtr_stdtri_invalid():
    # a mix of large and inf df with t/p equal to nan
    df = [1e10, 1e12, 1e120, np.inf]
    x = np.nan
    res1 = stdtr(df, x)
    res2 = stdtrit(df, x)
    res_ex = 4 * [np.nan]
    assert_equal(res1, res_ex)
    assert_equal(res2, res_ex)
def test_stdtrit_vs_R_large_df():
    df = [1e10, 1e12, 1e120, np.inf]
    p = 0.1
    res = stdtrit(df, p)
    # R Code:
    #   options(digits=20)
    #   qt(0.1, c(1e10, 1e12, 1e120, Inf))
    res_R = [
        -1.2815515656292593150, -1.2815515655454472466, -1.2815515655446008125,
        -1.2815515655446008125
    ]
    assert_allclose(res, res_R, rtol=1e-15)
    # last value should also agree with ndtri
    assert_equal(res[3], ndtri(0.1))
Example #10
0
 def _ppf(self, qloc, idx, df, cache):
     dim = self._rotation.index(idx)
     conditions = [
         self._get_cache(dim_, cache, get=1)
         for dim_ in self._rotation[:dim]
     ]
     assert not any([
         isinstance(condition, chaospy.Distribution)
         for condition in conditions
     ])
     qloc = numpy.vstack(conditions + [qloc])
     zloc = special.stdtrit(df, qloc)
     out = special.stdtr(df, self._inv_transform[idx, :len(qloc)].dot(zloc))
     return out
Example #11
0
def predict(x, node, interval=None):
    while True:
        try:
            node = pick_split(x, node)
        except AttributeError:
            if interval:
                a, b, mode = node.interval
                if mode == 0:
                    df, sd = a, b
                    z = sc.stdtrit(df, interval)
                    return (node.prediction, node.prediction - z * sd,
                            node.prediction + z * sd)
                if mode == 1:
                    lower, upper = a, b
                    return (node.prediction, lower, upper)
            return node.prediction
Example #12
0
 def PlotR(self, ax):
     if self.opts.logr:
         ax.set_title("Semilog Plot of |R|")
         ax.set_ylabel(r'log|R|')
     else:
         ax.set_title("Plot of |R|")
         ax.set_ylabel(r'|R|')
     ax.set_xlabel("Potenial (V)")
     Y, Yerr = [], []
     for x in self.XY:
         Y.append(self.XY[x]['R']["hist"]["mean"])
         _sem = float(self.XY[x]['R']["hist"]["std"]) / np.sqrt(
             self.opts.degfree - 1 or 1)
         _t_val = _sem * stdtrit(self.opts.degfree - 1 or 1,
                                 1 - self.opts.alpha)
         Yerr.append(_t_val)
     ax.errorbar(list(self.XY), Y, yerr=Yerr, marker='o', lw=0.0, color='k')
Example #13
0
 def PlotSegmentedGauss(self, ax):
     '''Plot segmented J/V data'''
     ax.set_title("Semilog Plots of |J| by Trace")
     ax.set_ylabel(r'Current Density $log_{10}|J(\mathrm{A cm}^{-2})|$')
     ax.set_xlabel(r'Potential (V)')
     X, Y = {}, {}
     Yerr = {}
     for segment in self.segments:
         for trace in self.segments[segment]:
             # TODO: Fix this hack
             if trace == 'combined':
                 continue
             if trace not in X:
                 X[trace] = []
                 Y[trace] = []
                 Yerr[trace] = []
             for x in self.segments[segment][trace]:
                 X[trace].append(x)
                 _hist = self.segments[segment][trace][x]
                 Y[trace].append(_hist['mean'])
                 _sem = float(_hist['std']) / np.sqrt(self.opts.degfree - 1
                                                      or 1)
                 _t_val = _sem * stdtrit(self.opts.degfree - 1 or 1,
                                         1 - self.opts.alpha)
                 Yerr[trace].append(_t_val)
     for trace in Y:
         ax.errorbar(X[trace],
                     Y[trace],
                     yerr=Yerr[trace],
                     marker='o',
                     lw=0,
                     elinewidth=0.25,
                     capsize=0.5,
                     label='Trace %s' % (trace + 1))
     handles, labels = ax.get_legend_handles_labels()
     handles = [
         h[0] if isinstance(h, container.ErrorbarContainer) else h
         for h in handles
     ]
     ax.legend(handles, labels)
Example #14
0
 def _lower(self, a):
     return special.stdtrit(a, 1e-16) * 10
Example #15
0
 def _ppf(self, q, a):
     return special.stdtrit(a, numpy.clip(q, 1e-16, 1 - 1e-16))
Example #16
0
from numpy import random as nprd
from scipy import special as func


def sampling(mu, sigma2, N):
    x = nprd.normal(mu, np.sqrt(sigma2), N)
    return x


## true value
mu = 10
sigma2 = 30
N = 10  # sample size
sqrtn = np.sqrt(N)
## iteration times
M = 1000
## confidence level
alpha = 0.05
## results
included = np.zeros(M)
for i in range(M):
    x = sampling(mu, sigma2, 10)
    xmean = np.mean(x)
    xstd = np.std(x)
    lower = xmean - func.stdtrit(N - 1, 1 - alpha / 2) * xstd / sqrtn
    upper = xmean + func.stdtrit(N - 1, 1 - alpha / 2) * xstd / sqrtn
    # 如果包含真值
    if mu >= lower and mu <= upper:
        included[i] = 1
print("The prob. of included=", np.mean(included))
Example #17
0
def WriteGauss(self):
    '''Write the Gaussian-derived data for J, R and the differential conductance data.'''
    _fn = os.path.join(self.opts.out_dir, self.opts.outfile + "_Gauss.txt")
    with open(_fn, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, dialect='JV')
        writer.writerow([
            "Potential (V)", "Log|J|", "Standard Deviation",
            "Standard Error of the Mean",
            "%s%% confidence interval" % (100 * (1 - self.opts.alpha))
        ])

        for x in self.XY:
            _sem = self.XY[x]['hist']['std'] / np.sqrt(self.opts.degfree - 1
                                                       or 1)
            writer.writerow([
                '%0.4f' % x,
                '%0.4f' % self.XY[x]['hist']['mean'],
                '%0.4f' % self.XY[x]['hist']['std'],
                '%0.4f' % _sem,
                '%0.4f' %
                (_sem *
                 stdtrit(self.opts.degfree - 1 or 1, 1 - self.opts.alpha))
            ])

    _fn = os.path.join(self.opts.out_dir,
                       self.opts.outfile + "_Gauss_noFirstTraces.txt")
    with open(_fn, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, dialect='JV')
        writer.writerow([
            "Potential (V)", "Log|J|", "Standard Deviation",
            "Standard Error of the Mean",
            "%s%% confidence interval" % (100 * (1 - self.opts.alpha))
        ])

        for x in self.XY:
            _sem = self.XY[x]['hist_nofirst']['std'] / np.sqrt(
                self.opts.degfree - 1 or 1)
            writer.writerow([
                '%0.4f' % x,
                '%0.4f' % self.XY[x]['hist_nofirst']['mean'],
                '%0.4f' % self.XY[x]['hist_nofirst']['std'],
                '%0.4f' % _sem,
                '%0.4f' %
                (_sem *
                 stdtrit(self.opts.degfree - 1 or 1, 1 - self.opts.alpha))
            ])

    _fn = os.path.join(self.opts.out_dir, self.opts.outfile + "_RGauss.txt")
    with open(_fn, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, dialect='JV')
        if self.opts.logr:
            writer.writerow([
                "Potential (V)", "log |R|", "Standard Deviation",
                "%s%% confidence interval" % (100 * (1 - self.opts.alpha))
            ])
        else:
            writer.writerow([
                "Potential (V)", "|R|", "Standard Deviation",
                "%s%% confidence interval" % (100 * (1 - self.opts.alpha))
            ])
        for x in self.XY:
            _sem = float(self.XY[x]['R']['hist']['std']) / np.sqrt(
                self.opts.degfree - 1 or 1)
            _t_val = _sem * stdtrit(self.opts.degfree - 1 or 1,
                                    1 - self.opts.alpha)
            writer.writerow([
                '%0.4f' % x,
                '%0.4f' % self.XY[x]['R']['hist']['mean'],
                '%0.4f' % self.XY[x]['R']['hist']['std'],
                "%0.4f" % _t_val
            ])
    _fn = os.path.join(self.opts.out_dir,
                       self.opts.outfile + "_logdJdVGauss.txt")
    with open(_fn, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, dialect='JV')
        writer.writerow([
            "Potential (V)", "Log|dJ/dV|", "Standard Deviation",
            "%s%% confidence interval" % (100 * (1 - self.opts.alpha))
        ])
        for x in self.GHists:
            _sem = float(self.GHists[x]['hist']['std']) / np.sqrt(
                self.opts.degfree - 1 or 1)
            _t_val = _sem * stdtrit(self.opts.degfree - 1 or 1,
                                    1 - self.opts.alpha)
            writer.writerow([
                '%0.4f' % x,
                '%0.4f' % self.GHists[x]['hist']['mean'],
                '%0.4f' % self.GHists[x]['hist']['std'],
                "%0.4f" % _t_val
            ])
    _fn = os.path.join(self.opts.out_dir, self.opts.outfile + "_NDCGauss.txt")
    with open(_fn, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, dialect='JV')
        writer.writerow([
            "Potential (V)", "dJ/dV * V/J", "Standard Deviation",
            "%s%% confidence interval" % (100 * (1 - self.opts.alpha))
        ])
        for x in self.NDCHists:
            _sem = float(self.NDCHists[x]['hist']['std']) / np.sqrt(
                self.opts.degfree - 1 or 1)
            _t_val = _sem * stdtrit(self.opts.degfree - 1 or 1,
                                    1 - self.opts.alpha)
            writer.writerow([
                '%0.4f' % x,
                '%0.4f' % self.NDCHists[x]['hist']['mean'],
                '%0.4f' % self.NDCHists[x]['hist']['std'],
                "%0.4f" % _t_val
            ])
Example #18
0
def get_critical_value(df,p):
    return stdtrit(df,1-p)
Example #19
0
def _invt(q, df):
    return special.stdtrit(df, q)  # pylint: disable=no-member
Example #20
0
 def _cdf(self, x, df, C, Ci):
     out = special.stdtr(df, numpy.dot(Ci, special.stdtrit(df, x)))
     return out
Example #21
0
def _invt(q, df):
    return special.stdtrit(df, q)
Example #22
0
def t_convert(i):
    tmp = []
    for t in alpha:
        tmp.append(stdtrit(i, 1 - t))

    data[i] = tmp
Example #23
0
 def _ppf(self, q, a, C, Ci, loc):
     z = special.stdtrit(a, q)
     out = (numpy.dot(C, z).T + loc.T).T
     return out
Example #24
0
 def _cdf(self, x, df, C, Ci):
     out = special.stdtr(df, numpy.dot(Ci, special.stdtrit(df, x)))
     return out
Example #25
0
 def _upper(self, a):
     return special.stdtrit(a, 1 - 1e-16) * 10
Example #26
0
def WriteSegmentedGauss(self, key=None):
    '''Write histograms of values of J broken out by segment to catch
    hysteretic behavior without smearing it out.'''

    if not self.segments:
        logger.warning("No segments found.")
        return

    if key == 'nofirst':
        _segments = self.segments_nofirst
        _label = 'Segment_NoFirst'
    else:
        _segments = self.segments
        _label = 'Segment'

    for segment in _segments:
        rows = {}
        _fn = os.path.join(self.opts.out_dir, self.opts.outfile+"_Gauss_%s_%s.txt" % (_label, str(segment+1)))
        with open(_fn, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile, dialect='JV')
            headers = ["Potential (V)"]
            _maxtrace = 0
            for trace in _segments[segment]:
                # TODO: Fix this hack
                if not isinstance(trace, int):
                    continue
                _maxtrace += 1
                headers += ["Log|J|",
                            "Standard Deviation",
                            "Standard Error of the Mean",
                            "%s%% confidence interval" % (100*(1-self.opts.alpha))]
                for x in _segments[segment][trace]:
                    _hist = _segments[segment][trace][x]
                    if x not in rows:
                        rows[x] = []
                    rows[x].append("%0.4f" % _hist['mean'])
                    rows[x].append("%0.4f" % _hist['std'])
                    _sem = float(_hist['std'])/np.sqrt(self.opts.degfree - 1 or 1)
                    rows[x].append("%0.4f" % _sem)
                    _t_val = _sem * stdtrit(self.opts.degfree - 1 or 1, 1 - self.opts.alpha)
                    rows[x].append("%0.4f" % _t_val)

            writer.writerow(headers)
            _V = list(rows.keys())
            _V.sort()
            for x in _V:
                while len(rows[x]) < _maxtrace * 3:
                    rows[x] += ['-', '-', '-']
                    logger.warning('Filling columns for segment %i, V=%s to match %s traces.', segment, x, _maxtrace)
                writer.writerow(["%0.4f" % x]+rows[x])

    # TODO: Don't just repeat the whole code block
    for segment in _segments:
        rows = {}
        _fn = os.path.join(self.opts.out_dir, self.opts.outfile+"_Gauss_%s_Combined_%s.txt" % (_label, str(segment+1)))
        with open(_fn, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile, dialect='JV')
            headers = ["Potential (V)",
                       "Log|J|",
                       "Standard Deviation",
                       "Standard Error of the Mean",
                       "%s%% confidence interval" % (100*(1-self.opts.alpha))]
            for x in _segments[segment]['combined']:
                _hist = _segments[segment]['combined'][x]
                if x not in rows:
                    rows[x] = []
                rows[x].append("%0.4f" % _hist['mean'])
                rows[x].append("%0.4f" % _hist['std'])
                _sem = float(_hist['std'])/np.sqrt(self.opts.degfree - 1 or 1)
                rows[x].append("%0.4f" % _sem)
                _t_val = _sem * stdtrit(self.opts.degfree - 1 or 1, 1 - self.opts.alpha)
                rows[x].append("%0.4f" % _t_val)

            writer.writerow(headers)
            _V = list(rows.keys())
            _V.sort()
            for x in _V:
                writer.writerow(["%0.4f" % x]+rows[x])
    #print header
    #print timepoints
    #for o in output:
    #    print o
    return header, np.array(timepoints), np.array(output)

#print mcss_postprocess('-a -l -S 0,1,2,3 -t 1')

cidegree = 0.95
from math import sqrt
_, timepoints, run1 = mcss_postprocess('-l -S 1 -t 1')
run1 = run1[0]
run2 = mcss_postprocess('-l -S 1 -t 2')[2][0]
runs_std = mcss_postprocess('-l -S 1')[2][1]
runs_ppf = mcss_postprocess('-l -S 1')[2][2]
#print run1, run2, runs_std, runs_ppf
print run1.shape, run2.shape, runs_std.shape, runs_ppf.shape
#print stdtrit(runs)

num_runs = 2
#cifactor = gsl_cdf_tdist_Pinv(1.0 - (1.0 - cidegree) / 2.0, num_runs - 1) / sqrt(num_runs)
from infobiotics.mcss.results.statistics import InverseStudentT
cifactor_InverseStudentT = InverseStudentT(num_runs - 1, 1.0 - (1.0 - cidegree) / 2.0) / sqrt(num_runs)
from scipy.special import stdtrit
cifactor_stdrit = stdtrit(num_runs - 1, 1.0 - (1.0 - cidegree) / 2.0) / sqrt(num_runs)
#print cifactor_InverseStudentT - cifactor_stdrit 
ppf = (cifactor_InverseStudentT * runs_std)
#ppf = (cifactor_stdrit * runs_std)
print ppf - runs_ppf

Example #28
0
 def _ppf(self, q, a):
     return special.stdtrit(a, q)
Example #29
0
def _invt(q, df):
    return special.stdtrit(df, q)
Example #30
0
 def _ppf(self, q, a):
     return special.stdtrit(a, q)
Example #31
0
 def _ppf(self, q, df, C, Ci):
     out = special.stdtr(df, numpy.dot(C, special.stdtrit(df, q)))
     return out
Example #32
0
 def _ppf(self, q, df, C, Ci):
     out = special.stdtr(df, numpy.dot(C, special.stdtrit(df, q)))
     return out
Example #33
0
 def _ppf(self, q, a, C, Ci, loc):
     z = special.stdtrit(a, q)
     out = (numpy.dot(C, z).T + loc.T).T
     return out