コード例 #1
0
ファイル: test_stats.py プロジェクト: AndreI11/SatStressGui
 def test_var(self):
     """
     var(testcase) = 1.666666667 """
     #y = stats.var(self.shoes[0])
     #assert_approx_equal(y,6.009)
     y = stats.var(self.testcase)
     assert_approx_equal(y, 1.666666667)
コード例 #2
0
ファイル: test_stats.py プロジェクト: mullens/khk-lights
 def test_var(self):
     """
     var(testcase) = 1.666666667 """
     #y = stats.var(self.shoes[0])
     #assert_approx_equal(y,6.009)
     y = stats.var(self.testcase)
     assert_approx_equal(y,1.666666667)
コード例 #3
0
ファイル: hw10_alarcon_2.py プロジェクト: rmalarc/is602
    def regression(self):
        """
        Perform linear regression

        """
        stats = BasicStats()


        n = len(self.data['x'])
        x2_sum = stats.sum_squares(self.data['x'])
        x_sum2 = sum(self.data['x'])*sum(self.data['x'])
        x_sum = sum(self.data['x'])
        y_sum = sum(self.data['y'])
        xy_sum = stats.sum_xy(self.data)
        self.alpha = (y_sum*x2_sum - x_sum*xy_sum)/(n*x2_sum-x_sum2)
        self.beta = (n*xy_sum - x_sum*y_sum)/(n*x2_sum-x_sum2)

        self.beta = stats.cov(self.data['x'],self.data['y'])/stats.var(self.data['x'])
        self.alpha = stats.mean(self.data['y']) - self.beta * stats.mean(self.data['x'])
コード例 #4
0
ファイル: bosGUI.py プロジェクト: zwghit/CFCFD-NG
def gausspeak(cmap, start):
    from scipy.stats import tvar as var
    from scipy.stats.distributions import norm
    from scipy import cumsum, log, exp, zeros, r_, dot, transpose, array
    from scipy import linalg, nan

    #(pseudo-)Gaussian peak fit: p(x,y) = exp(a + b*x + c*y + d*x*x + e*x*y + f*y*y)
    global QMAT
    rhs = zeros((9), dtype=float)
    ind = 0
    for j in r_[-1:2]:
        for i in r_[-1:2]:
            rhs[ind] = log(max(cmap[start[1] + j, start[0] + i], 1e-12))
            ind = ind + 1
    if var(rhs) == 0:

        #right hand side was clipped everywhere: no good
        peak = 0
        shift = array([0, 0])
        ok = -1

    #solve normal equations for least squares problem
    qr = dot(transpose(QMAT), transpose(rhs))
    coeffs = linalg.solve(QQ, qr)

    #unpack solution vector; find peak position from zero derivative
    mmat = array([[2 * coeffs[3], coeffs[4]], [coeffs[4], 2 * coeffs[5]]])
    mrhs = array([[-coeffs[1]], [-coeffs[2]]])
    qvec = transpose(linalg.solve(mmat, mrhs))  #qvec = (mmat\mrhs)'
    if norm(qvec) > 1:
        #interpolated displacement is too large: no good
        peak = 0
        shift = array([0, 0])
        ok = -1
    ok = 1
    shift = qvec + start
    qvec = transpose(qvec)
    peak = exp(coeffs[0] + coeffs[1] * qvec[0] + coeffs[2] * qvec[1] +
               coeffs[3] * qvec[0] * qvec[0] + coeffs[4] * qvec[0] * qvec[1] +
               coeffs[5] * qvec[1] * qvec[1])
    return peak, shift, ok
コード例 #5
0
ファイル: statistical_tools.py プロジェクト: deccs/PLearn
def autocorrelation(series, k=1, biased=True):
    """Returns autocorrelation of order 'k' and corresponding two-tailed pvalue.

    (Inspired by CLM pp.45-47)

    @param series: The series on which to compute autocorrelation
    @param k:      The order to which compute autocorrelation
    @param biased: If False, rho_k will be corrected according to Fuller (1976)

    @return: rho_k, pvalue
    """
    T = len(series)
    mu = mean(series)
    sigma = var(series)

    # Centered observations
    obs = series-mu    
    lagged = lag(obs, k) 
    truncated = obs[:-k]
    assert len(lagged) == len(truncated)

    # Multiplied by 'T' for numerical stability
    gamma_k = T*add.reduce(truncated*lagged)  # Numerator
    gamma_0 = T*add.reduce(obs*obs)           # Denominator
    rho_k   = (gamma_k / gamma_0)
    if rho_k > 1.0: rho_k = 1.0   # Correct for numerical errors

    # The standard normal random variable
    Z = sqrt(T)*rho_k
    
    # Bias correction?
    if not biased:
        rho_k += (1 - rho_k**2) * (T-k)/(T-1)**2
        Z = rho_k * T/sqrt(T-k)

    # The two-tailed p-value is twice the prob that value of a std normal r.v.
    # turns out to be greater than the (absolute) value of Z
    pvalue = 2*( 1 - norm.cdf(abs(Z)) )
    assert pvalue >= 0.0 and pvalue <= 1.0
    return rho_k, pvalue
コード例 #6
0
ファイル: statistical_tools.py プロジェクト: zbxzc35/PLearn
def autocorrelation(series, k=1, biased=True):
    """Returns autocorrelation of order 'k' and corresponding two-tailed pvalue.

    (Inspired by CLM pp.45-47)

    @param series: The series on which to compute autocorrelation
    @param k:      The order to which compute autocorrelation
    @param biased: If False, rho_k will be corrected according to Fuller (1976)

    @return: rho_k, pvalue
    """
    T = len(series)
    mu = mean(series)
    sigma = var(series)

    # Centered observations
    obs = series - mu
    lagged = lag(obs, k)
    truncated = obs[:-k]
    assert len(lagged) == len(truncated)

    # Multiplied by 'T' for numerical stability
    gamma_k = T * add.reduce(truncated * lagged)  # Numerator
    gamma_0 = T * add.reduce(obs * obs)  # Denominator
    rho_k = (gamma_k / gamma_0)
    if rho_k > 1.0: rho_k = 1.0  # Correct for numerical errors

    # The standard normal random variable
    Z = sqrt(T) * rho_k

    # Bias correction?
    if not biased:
        rho_k += (1 - rho_k**2) * (T - k) / (T - 1)**2
        Z = rho_k * T / sqrt(T - k)

    # The two-tailed p-value is twice the prob that value of a std normal r.v.
    # turns out to be greater than the (absolute) value of Z
    pvalue = 2 * (1 - norm.cdf(abs(Z)))
    assert pvalue >= 0.0 and pvalue <= 1.0
    return rho_k, pvalue
コード例 #7
0
ファイル: mcmc_sample.py プロジェクト: hadfieldn/CS677
from __future__ import division
from mcmchammer import *
from scipy import stats

datafilename = "faculty.dat"
nsamples = 10000
burn = 0
mean_candsd = 0.2
var_candsd = 0.15

# Read in Data
data = [float(line) for line in open(datafilename)]

# Use point estimators from the data to come up with starting values.
estimated_mean = stats.mean(data)
estimated_var = stats.var(data)

# Create Nodes and Links in Network
meannode = NormalNode(estimated_mean, name="Mean", candsd=mean_candsd, mean=5, var=(1 / 3) ** 2)
varprior_mean = 1 / 4
varprior_stddev = 1 / 12
varprior_shape = MomentsInvGammaShape(varprior_mean, varprior_stddev ** 2)
varprior_scale = MomentsInvGammaScale(varprior_mean, varprior_stddev ** 2)
varnode = InvGammaNode(estimated_var, name="Variance", candsd=var_candsd, shape=varprior_shape, scale=varprior_scale)
for datum in data:
    NormalNode(datum, observed=True, mean=meannode, var=varnode)

# Perform simulations and plot results

currentnetwork.simulate(nsamples, burn)
meannode.plotmixing()
コード例 #8
0
sims.plot_susceptibility(N)
sims.plot_absmag(N)
sims.plot_energies(N)
"""

###########################
# Plotting P(E) for L=20 an T = [1, 2.4]
###########################
sim = Simulation(20, 2.4, "/scratch/henriasv/FYS3150/IsingModel_once/N20/T2.4")
sim.set_thermalization(0.2)
sim.calculate_properties()
sim.plot_pe()
title("Probability density function for the energy at T=2.4 (not normalized)")
xlabel("E")
ylabel("Count")
variance = stats.var(sim.e_array)
print "variance = %f" % variance, sim.var_E
hold("on")
sim = Simulation(20, 1, "/scratch/henriasv/FYS3150/IsingModel_once/N20/T1")
sim.set_thermalization(0.2)
sim.calculate_properties()
sim.plot_pe()
title("Probability density function for the energy at T=1 (not normalized)")
xlabel("E")
ylabel("Count")
"""
###########################
# Plotting convergence with ordered or random initialization for T = [1, 2.4]
###########################
# Random, T=1
sim1 = Simulation(20, 1, "/scratch/henriasv/FYS3150/IsingModel_once_rand/N20/T1")
コード例 #9
0
ファイル: Jost_Stats_Calc.py プロジェクト: ngcrawford/SMOGD
	def approximate_hmean(dest_values):
		A = stats.mean(dest_values)
		varD = stats.var(dest_values)
		h_mean = 1/((1/A)+(varD)*pow((1/A),3))
		return h_mean
コード例 #10
0
def printParam(par, parString):
    mean = stats.mean(par)
    sd = sqrt(stats.var(par))
    print parString, ' = ', mean, ' +/- ', sd
コード例 #11
0
ファイル: analyseResults.py プロジェクト: henriasv/FYS3150
sims.plot_susceptibility(N)
sims.plot_absmag(N)
sims.plot_energies(N)
"""

###########################
# Plotting P(E) for L=20 an T = [1, 2.4]
###########################
sim = Simulation(20, 2.4, "/scratch/henriasv/FYS3150/IsingModel_once/N20/T2.4")
sim.set_thermalization(0.2)
sim.calculate_properties()
sim.plot_pe()
title("Probability density function for the energy at T=2.4 (not normalized)")
xlabel("E")
ylabel("Count")
variance = stats.var(sim.e_array)
print "variance = %f" % variance, sim.var_E
hold("on")
sim = Simulation(20, 1, "/scratch/henriasv/FYS3150/IsingModel_once/N20/T1")
sim.set_thermalization(0.2)
sim.calculate_properties()
sim.plot_pe()
title("Probability density function for the energy at T=1 (not normalized)")
xlabel("E")
ylabel("Count")

"""
###########################
# Plotting convergence with ordered or random initialization for T = [1, 2.4]
###########################
# Random, T=1