def tst_lomax(): t = Lomax.samples_(1.1, 50, size=10000) start = time.time() params = Lomax.est_params(t) end = time.time() print("Estimating parameters of Lomax took: " + str(end - start)) return abs(params[0] - 1.1) < 1e-1
def lomax_mix(): k1 = 1.1 lmb1 = 20 k2 = 0.1 lmb2 = 30 n_samples = 10000 u = 0.3 censor = 8.0 t_len = int(n_samples * (1 - u)) s_len = int(n_samples * u) t_samples = Lomax.samples_(k1, lmb1, size=t_len) s_samples = Lomax.samples_(k2, lmb2, size=s_len) t = t_samples[t_samples < censor] s = s_samples[s_samples < censor] x_censored = np.ones(sum(t_samples > censor) + sum(s_samples > censor))
def compare_loglogistic_fitting_approaches(): """ This experiment convinced me to abandon the Lomax and Weibull based LogLogistic estimation. """ ti, xi = mixed_loglogistic_model() wbl = Weibull.est_params(ti) lmx = Lomax.est_params(ti) #Now estimate Lomax and Weibull params and construct feature vector. x_features = cnstrct_feature(ti) beta = sum(x_features * LogLogistic.lin_betas) alpha = sum(x_features * LogLogistic.lin_alphas)
lambda theta, N: tensorflow_distributions.Poisson(rate=theta["lambda"] * N) }, "lomax": { "parameters": { "log_concentration": { "support": [-10, 10], "activation function": identity }, "log_scale": { "support": [-10, 10], "activation function": identity } }, "class": lambda theta: Lomax(concentration=tf.exp(theta["log_concentration"]), scale=tf.exp(theta["log_scale"])) }, "zero-inflated poisson": { "parameters": { "pi": { "support": [0, 1], "activation function": sigmoid }, "log_lambda": { "support": [-10, 10], "activation function": identity } }, "class": lambda theta: ZeroInflated(tensorflow_distributions.Poisson( rate=tf.exp(theta["log_lambda"])),
import numpy as np from distributions.lomax import Lomax from distributions.weibull import Weibull from scipy.stats import poisson import matplotlib.pyplot as plt ## Over dispersed k = 1.2 lmb = 25 durtn = 1.0 mean = Lomax.mean_s(k, lmb) aa = np.array([ sum(np.cumsum(Lomax.samples_(k, lmb, size=100)) < durtn) for _ in range(1000) ]) expctd_mean = durtn / mean actual_mean = np.mean(aa) print("Diff:" + str(actual_mean - expctd_mean)) var = np.var(aa) ## Under dispersed # k=0.4; lmb=5.0; durtn=20.0 def weibull_to_count(k=0.4, lmb=1.0, durtn=20.0): mean = Weibull.mean_s(k, lmb) aa1 = np.array([ sum(np.cumsum(Weibull.samples_(k, lmb, size=100)) < durtn) for _ in range(1000) ]) expctd_mean = durtn / mean