コード例 #1
0
def main():
    fig, ax = plt.subplots(figsize=gfs)

    xs = np.linspace(0., 10., 500)
    rvs = stats.lomax(c).pdf(xs)
    ax.plot(xs, rvs, label='$\lambda(x) = c/x$')

    rvs = stats.expon(scale=1 / c).pdf(xs)
    ax.plot(xs, rvs, label='$\lambda(x) = c$')

    rvs = stats.rayleigh(scale=np.sqrt(1 / c)).pdf(xs)
    ax.plot(xs, rvs, label='$\lambda(x) = cx$')

    ax.set_xlim(0, 10)
    ax.set_ylim(0, 2)
    ax.set_xlabel('$X$ (failure rv)', fontsize=fs)
    ax.set_ylabel('$p(x)$', fontsize=fs)
    ax.tick_params(labelsize=fs)
    ax.legend(fontsize=fs)

    plt.tight_layout()
    plt.savefig('../hf2pdf-examples.png', bbox_inches='tight')
    plt.close()

    # logspace now
    fig, ax = plt.subplots(figsize=gfs)

    xs = np.linspace(0.1, 100., 1000)
    rvs = stats.lomax(c).pdf(xs)
    ax.plot(xs, rvs, label='$\lambda(x) = c/x$')

    rvs = stats.expon(scale=1 / c).pdf(xs)
    ax.plot(xs, rvs, label='$\lambda(x) = c$')

    rvs = stats.rayleigh(scale=np.sqrt(1 / c)).pdf(xs)
    ax.plot(xs, rvs, label='$\lambda(x) = cx$')

    ax.set_xlabel('$X$ (failure rv)', fontsize=fs)
    ax.set_ylabel('$p(x)$', fontsize=fs)
    ax.tick_params(labelsize=fs)
    ax.legend(fontsize=fs)
    ax.set_ylim(10**-9, )
    ax.set_yscale('log')
    ax.set_xscale('log')

    plt.tight_layout()
    plt.savefig('../hf2pdf-examples-loglog.png', bbox_inches='tight')
    plt.close()
コード例 #2
0
    g1 = gamma(alpha_, loc=0, scale=1. / float(beta_))
    plt.figure(figsize=(8, 5))
    plt.hist(theta_samples, bins=55, density=True, alpha=.5)
    plt.plot(theta_range, g1.pdf(theta_range), 'b-')
    plt.xlim(np.min(theta_samples), np.max(theta_samples))
    plt.title('Theta posterior: samples versus pdf', fontsize=15)
    plt.xlabel('theta (-)', fontsize=15)

    # Compute predictive posterior directly from analytical formula: the Lomax distribution
    # See https://en.wikipedia.org/wiki/Lomax_distribution
    # and https://en.wikipedia.org/wiki/Conjugate_prior
    X = np.linspace(0, 100, 10000)  # fixed sample locations
    PredPost = lomax_manual(X, alpha_, beta_)
    PredPost = PredPost / np.sum(PredPost)

    ll = lomax(c=alpha_, scale=float(beta_))
    PredPost2 = ll.pdf(X)
    PredPost2 /= np.sum(PredPost2)
    print(np.allclose(PredPost2, PredPost))

    true_pdf = true_theta * np.exp(-true_theta * X)
    true_pdf /= np.sum(true_pdf)

    print('True mean of exponential: %1.3f' % (1. / true_theta))
    print('Estimated mean from pred.post.: %1.3f' %
          (np.dot(X, PredPost / np.sum(PredPost))))

    # Now let's try to replicate this posterior predictive from the (discrete) prob. mass function of
    # the theta parameter, namely by constructing a mixture of exponentials, not using
    # a 'meta-pdf' structure as outlined in <<Think Bayes>> (essentially based on the essential construct
    # of a nested dictionary and bypassing all classes etc., see MakeMixture() function)), but instead with a nested for-loop.
コード例 #3
0
 def interval(self, alpha):
     return st.lomax(scale=self.alpha, c=self.beta).interval(alpha)
コード例 #4
0
 def log_marginal_likelihood(self, data):
     return st.lomax(scale=self.alpha, c=self.beta).logpdf(data)
コード例 #5
0
    print('max. observed delay: %i days' % (df.delay_obs.max()))
    print('\n')

    # True delay distribution
    True_delay = expon(loc=testgroups['A'].min_delay,
                       scale=testgroups['A'].lambda_)

    # Compute observed delay dist. via Bayesian conj. priors (see script <exponential_post_predictive.py>)
    alpha_, beta_ = Bayesian_conjugate_inference(observed_delays -
                                                 testgroups['A'].min_delay)
    # (marginal) posterior
    lambd_dist_uncorrected = gamma(alpha_, loc=0,
                                   scale=1. / float(beta_)).pdf(lambda_range)

    # posterior predictive
    ll = lomax(c=alpha_, loc=testgroups['A'].min_delay, scale=float(beta_))
    Observed_delay_dist_Bayesian = ll.pdf(x)

    # Compute corrected delay dist. by adjusting likelihood function (see script <decay_problem_MacKay.py>)
    today = df.event0.values[-1]
    df['days_ago'] = (today - df.event0) / pd.Timedelta(1, 'D')

    completed_events = df[['delay_obs', 'days_ago']].dropna().copy()
    #lambda_likelh = likelihood_over_all_observations(completed_events, min_delay=testgroups['A'].min_delay, lambda_range=lambda_range)

    lambda_likelh = pd.Series(data=np.ones((len(lambda_range), )))
    for _, obs in completed_events.iterrows():
        Z = expon_integral(
            lambda_range, 0, obs.days_ago
        )  # integration constants, dependent on observation window but independent of observation
        likelihood_of_lambda_for_this_observation = expon.pdf(
コード例 #6
0
c = 1.88
mean, var, skew, kurt = lomax.stats(c, moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(lomax.ppf(0.01, c), lomax.ppf(0.99, c), 100)
ax.plot(x, lomax.pdf(x, c), 'r-', lw=5, alpha=0.6, label='lomax pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = lomax(c)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = lomax.ppf([0.001, 0.5, 0.999], c)
np.allclose([0.001, 0.5, 0.999], lomax.cdf(vals, c))
# True

# Generate random numbers:

r = lomax.rvs(c, size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
コード例 #7
0
ファイル: conftest.py プロジェクト: ashutoshvarma/dfit
def all_dists():
    # dists param were taken from scipy.stats official
    # documentaion examples
    # Total - 89
    return {
        "alpha":
        stats.alpha(a=3.57, loc=0.0, scale=1.0),
        "anglit":
        stats.anglit(loc=0.0, scale=1.0),
        "arcsine":
        stats.arcsine(loc=0.0, scale=1.0),
        "beta":
        stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0),
        "betaprime":
        stats.betaprime(a=5, b=6, loc=0.0, scale=1.0),
        "bradford":
        stats.bradford(c=0.299, loc=0.0, scale=1.0),
        "burr":
        stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0),
        "cauchy":
        stats.cauchy(loc=0.0, scale=1.0),
        "chi":
        stats.chi(df=78, loc=0.0, scale=1.0),
        "chi2":
        stats.chi2(df=55, loc=0.0, scale=1.0),
        "cosine":
        stats.cosine(loc=0.0, scale=1.0),
        "dgamma":
        stats.dgamma(a=1.1, loc=0.0, scale=1.0),
        "dweibull":
        stats.dweibull(c=2.07, loc=0.0, scale=1.0),
        "erlang":
        stats.erlang(a=2, loc=0.0, scale=1.0),
        "expon":
        stats.expon(loc=0.0, scale=1.0),
        "exponnorm":
        stats.exponnorm(K=1.5, loc=0.0, scale=1.0),
        "exponweib":
        stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0),
        "exponpow":
        stats.exponpow(b=2.7, loc=0.0, scale=1.0),
        "f":
        stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0),
        "fatiguelife":
        stats.fatiguelife(c=29, loc=0.0, scale=1.0),
        "fisk":
        stats.fisk(c=3.09, loc=0.0, scale=1.0),
        "foldcauchy":
        stats.foldcauchy(c=4.72, loc=0.0, scale=1.0),
        "foldnorm":
        stats.foldnorm(c=1.95, loc=0.0, scale=1.0),
        # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0),
        # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0),
        "genlogistic":
        stats.genlogistic(c=0.412, loc=0.0, scale=1.0),
        "genpareto":
        stats.genpareto(c=0.1, loc=0.0, scale=1.0),
        "gennorm":
        stats.gennorm(beta=1.3, loc=0.0, scale=1.0),
        "genexpon":
        stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0),
        "genextreme":
        stats.genextreme(c=-0.1, loc=0.0, scale=1.0),
        "gausshyper":
        stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0),
        "gamma":
        stats.gamma(a=1.99, loc=0.0, scale=1.0),
        "gengamma":
        stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0),
        "genhalflogistic":
        stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0),
        "gilbrat":
        stats.gilbrat(loc=0.0, scale=1.0),
        "gompertz":
        stats.gompertz(c=0.947, loc=0.0, scale=1.0),
        "gumbel_r":
        stats.gumbel_r(loc=0.0, scale=1.0),
        "gumbel_l":
        stats.gumbel_l(loc=0.0, scale=1.0),
        "halfcauchy":
        stats.halfcauchy(loc=0.0, scale=1.0),
        "halflogistic":
        stats.halflogistic(loc=0.0, scale=1.0),
        "halfnorm":
        stats.halfnorm(loc=0.0, scale=1.0),
        "halfgennorm":
        stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0),
        "hypsecant":
        stats.hypsecant(loc=0.0, scale=1.0),
        "invgamma":
        stats.invgamma(a=4.07, loc=0.0, scale=1.0),
        "invgauss":
        stats.invgauss(mu=0.145, loc=0.0, scale=1.0),
        "invweibull":
        stats.invweibull(c=10.6, loc=0.0, scale=1.0),
        "johnsonsb":
        stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0),
        "johnsonsu":
        stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0),
        "ksone":
        stats.ksone(n=1e03, loc=0.0, scale=1.0),
        "kstwobign":
        stats.kstwobign(loc=0.0, scale=1.0),
        "laplace":
        stats.laplace(loc=0.0, scale=1.0),
        "levy":
        stats.levy(loc=0.0, scale=1.0),
        "levy_l":
        stats.levy_l(loc=0.0, scale=1.0),
        "levy_stable":
        stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0),
        "logistic":
        stats.logistic(loc=0.0, scale=1.0),
        "loggamma":
        stats.loggamma(c=0.414, loc=0.0, scale=1.0),
        "loglaplace":
        stats.loglaplace(c=3.25, loc=0.0, scale=1.0),
        "lognorm":
        stats.lognorm(s=0.954, loc=0.0, scale=1.0),
        "lomax":
        stats.lomax(c=1.88, loc=0.0, scale=1.0),
        "maxwell":
        stats.maxwell(loc=0.0, scale=1.0),
        "mielke":
        stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0),
        "nakagami":
        stats.nakagami(nu=4.97, loc=0.0, scale=1.0),
        "ncx2":
        stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0),
        "ncf":
        stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0),
        "nct":
        stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0),
        "norm":
        stats.norm(loc=0.0, scale=1.0),
        "pareto":
        stats.pareto(b=2.62, loc=0.0, scale=1.0),
        "pearson3":
        stats.pearson3(skew=0.1, loc=0.0, scale=1.0),
        "powerlaw":
        stats.powerlaw(a=1.66, loc=0.0, scale=1.0),
        "powerlognorm":
        stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0),
        "powernorm":
        stats.powernorm(c=4.45, loc=0.0, scale=1.0),
        "rdist":
        stats.rdist(c=0.9, loc=0.0, scale=1.0),
        "reciprocal":
        stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0),
        "rayleigh":
        stats.rayleigh(loc=0.0, scale=1.0),
        "rice":
        stats.rice(b=0.775, loc=0.0, scale=1.0),
        "recipinvgauss":
        stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0),
        "semicircular":
        stats.semicircular(loc=0.0, scale=1.0),
        "t":
        stats.t(df=2.74, loc=0.0, scale=1.0),
        "triang":
        stats.triang(c=0.158, loc=0.0, scale=1.0),
        "truncexpon":
        stats.truncexpon(b=4.69, loc=0.0, scale=1.0),
        "truncnorm":
        stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0),
        "tukeylambda":
        stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0),
        "uniform":
        stats.uniform(loc=0.0, scale=1.0),
        "vonmises":
        stats.vonmises(kappa=3.99, loc=0.0, scale=1.0),
        "vonmises_line":
        stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0),
        "wald":
        stats.wald(loc=0.0, scale=1.0),
        "weibull_min":
        stats.weibull_min(c=1.79, loc=0.0, scale=1.0),
        "weibull_max":
        stats.weibull_max(c=2.87, loc=0.0, scale=1.0),
        "wrapcauchy":
        stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0),
    }
コード例 #8
0
    def _reset_distribution(self):

        self._distribution = lomax(c=self.alpha_prime, scale=self.beta_prime)
コード例 #9
0
    def _reset_distribution(self):

        self._distribution: rv_continuous = lomax(c=self._alpha,
                                                  scale=self._lambda)