def weibull_rvs(weibull_params, size):
    num_falseLoop = int(weibull_params[0] * size)
    num_trueLoop = int((1 - weibull_params[0]) * size)

    falseEntries = np.array([])
    trueEntries = np.array([])

    if ~np.isnan(weibull_params[1][1]) and (weibull_params[1][1] != 0.0):
        falseEntries = np.zeros((0, ))
        falseEntries = truncweibull_rvs(weibull_params[1][0],
                                        weibull_params[1][1],
                                        weibull_params[1][2],
                                        size=num_falseLoop)

    if ~np.isnan(weibull_params[2][0]) and (weibull_params[2][0] != 0.0):
        trueEntries = weibull_min.rvs(weibull_params[2][0],
                                      weibull_params[2][1],
                                      weibull_params[2][2],
                                      size=num_trueLoop)
        print trueEntries.shape

    if (~np.isnan(weibull_params[1][1])) and (~np.isnan(weibull_params[2][0])):
        entries = np.concatenate((falseEntries, -trueEntries), axis=0)
    elif ~np.isnan(weibull_params[1][1]):
        entries = falseEntries
    elif ~np.isnan(weibull_params[2][0]):
        entries = -trueEntries
    else:
        entries = np.array([1.0] * size)

    print entries.shape[0]
    return entries
예제 #2
0
파일: helper.py 프로젝트: mirophan/reginr
def calculate_emd(y_sim, alpha, r0):
    """Calculates Earth Movers distance between two distributions
    
    Takes wait-time distribution and calculates the EMD against the theoretical weibull distribution 
    given parameters at the start of the simulation
    
    Args:
        y_sim (list): (n_sim, n_wait_times) list of wait-times for each simulation, 
            for given reaction channel (esc, epi). Obtained from Channels.wait_times attribute
    
    Returns:
        EMD between simulation and true weibull distribution
    """
    y_sim = [item for sublist in y_sim for item in sublist]  # flatten list
    y_sim = np.array(y_sim)
    n = 10000

    # params for generating theoretical samples (ground truth)
    k = alpha + 1  # shape
    beta = (alpha + 1) * (r0 * gamma((alpha + 2) / (alpha + 1)))**(alpha + 1)
    lam = np.power((alpha + 1) / beta, 1 / (alpha + 1))  # scale
    # generate true samples
    y_true = weibull_min.rvs(k, loc=0, scale=lam, size=n)

    d = emd_samples(y_sim, y_true)

    return d
예제 #3
0
파일: helper.py 프로젝트: mirophan/reginr
def calculate_emd_avg(y_sim, alpha, r0):
    """Calculates average Earth Movers distance between two distributions
    
    For each simulations, takes wait-time distribution and calculates the EMD against the theoretical weibull distribution 
    given parameters at the start of the simulation. Normalised by r0.
    
    Args:
        y_sim (list): (n_sim, n_wait_times) dimension list of wait-times for each simulation, 
            for given reaction channel (esc, epi). Obtained from Channels.wait_times attribute
    
    Returns:
        EMD averaged over all simulations normalised by r0
    """

    n_sim = len(y_sim)

    # params for generating theoretical samples (ground truth)
    k = alpha + 1  # shape
    beta = (alpha + 1) * (r0 * gamma((alpha + 2) / (alpha + 1)))**(alpha + 1)
    lam = np.power((alpha + 1) / beta, 1 / (alpha + 1))  # scale
    n_samples = 100000

    emd_list = []
    for i in range(n_sim):
        y_hat = np.array(y_sim[i])
        y_true = weibull_min.rvs(k, loc=0, scale=lam, size=n_samples)
        emd_list.append(emd_samples(y_hat, y_true))

    return np.mean(emd_list) * r0
def weibull_dist(a, lam, N, bounds):
    data = weibull_min.rvs(a, loc=0, scale=lam, size=N)
    data_min = np.min(data)
    data = data - data_min
    data_max = np.max(data)
    data = data * bounds[1] / data_max
    return data
예제 #5
0
def weibull_data():

    p1 = weibull_min.rvs(0.6, loc=0, scale=2, size=5000)
    p2 = np.arange(1, 2000)
    lower, upper, mu, sigma = 1800, 2000, 1950, 50

    model = truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
                      loc=mu,
                      scale=sigma)
    p3 = model.rvs(5000)
    return np.concatenate((p1, p2, p3))
예제 #6
0
def sim_weibull_min():
    c = 1.79
    mean, var, skew, kurt = weibull_min.stats(c, moments='mvsk')
    print(1 / mean)
    catches = 0
    for _ in range(10000):
        j = np.random.uniform() * 1000
        t_i = 0
        while t_i < j + 500:
            t_i += weibull_min.rvs(c)
            if j < t_i and t_i < j + 1:
                catches += 1
    print(catches / 10000)
예제 #7
0
파일: net.py 프로젝트: Zouyiran/mininet
    def iperfFM(self, poisson_mean=100,r_int=0.5, r_tcp=0.8, last_time=1.7, t_threshold=8, size_1=4, size_2=1, base_port=5001):
        '''
        use iperf to generate flow model
        '''
        generate_flows = poisson.rvs(poisson_mean,size=30)
        for n in range(len(generate_flows)):
            flows_num = generate_flows[n]
            print flows_num
            for i in range(flows_num):
                # is_interior = True if bernoulli.rvs(r_int,size=1)[0]==1 else False
                client = random.choice(self.hosts[0:5])
                server = client
                # if not is_interior:
                while server == client:
                    server = random.choice(self.hosts)
                is_tcp =  True if bernoulli.rvs(r_tcp,size=1)[0]==1 else False
                flow_t = pareto.rvs(b=last_time,scale=1,size=1)[0] # b: shape parameter
                if flow_t < t_threshold:
                    flow_s = weibull_min.rvs(c=size_1,scale=5,size=1)[0] # c: shape parameter
                else:
                    flow_s = weibull_min.rvs(c=size_2,scale=1,size=1)[0]
                if is_tcp:
                    # self._iperfSingleTCP(hosts=[client,server], )
                    if flow_t < t_threshold:
                        self._iperfSingleTCPN(hosts=[client,server],bytes=str(flow_s)+'K',port=base_port) # hosts=None, bytes='10K', port=5001
                    else:
                        self._iperfSingleTCPN(hosts=[client,server],bytes=str(flow_s)+'M',port=base_port)

                else:
                    # self._iperfSingleUDP(hosts=[client,server],)
                    if flow_t < t_threshold:
                        self._iperfSingleUDPN(hosts=[client,server],bytes=str(flow_s)+'K',port=base_port)
                    else:
                        self._iperfSingleUDPN(hosts=[client,server],bytes=str(flow_s)+'M',port=base_port)
                base_port = random.randint(base_port,base_port+500)
                sleep(0.1)
        print 'iperfFM test has done'
def get_interval_times_weibull(t_begin, t_end, packet_num, alpha):
    mean_interval_time = (t_end - t_begin) / packet_num
    #interval_times_abs = mean_interval_time*numpy.random.weibull(a=alpha,size=packet_num)
    interval_times_abs = weibull_min.rvs(alpha,
                                         loc=0,
                                         scale=mean_interval_time,
                                         size=packet_num)
    interval_times_actual = []
    new_sample_time = t_begin
    for abs_time in interval_times_abs:
        new_sample_time = new_sample_time + abs_time
        if new_sample_time > t_end:
            break
        else:
            interval_times_actual.append(new_sample_time)
    return interval_times_actual
def truncweibull_rvs(prob, c, scale, size):
    prob = max(1e-10, prob)
    falseEntries = np.zeros((0, ))
    failure_ctr = 5
    while falseEntries.shape[0] < size and failure_ctr > 0:
        s = weibull_min.rvs(c, scale=scale, loc=0.0, size=size)
        accepted = s[(s <= 1.0)]
        if len(accepted) <= 0:
            failure_ctr -= 1
        falseEntries = np.concatenate((falseEntries, accepted), axis=0)
        falseEntries = falseEntries[:size]
    if failure_ctr <= 0: falseEntries = np.zeros(size)
    if size > 0:
        indexes = np.random.choice(range(size),
                                   size=int(prob * size),
                                   replace=False)
        falseEntries[indexes] = 1.0
    return falseEntries
예제 #10
0
def age_weibull_dist(exp_lifetime, mode, params):
    # dist = [
    #     [4, 14, 10.3],
    #     [4, 14, 10.3],
    #     [4, 14, 10.3],
    #     [4, 19, 7],
    #     [4, 24, 3.7],
    #     [4, 29, 3]
    # ]
    # if mode == 'init':
    #     [c, loc, scale] = dist[exp_lifetime]
    #
    # else:
    c = params['shape']
    loc = params['loc']
    scale = params['scale']

    return np.asscalar(weibull_min.rvs(c, loc, scale, 1))
def run_Parametric(story_id, data):
    print "[" + str(story_id) + "]Fitting Fisk"
    fisk_params = fisk.fit(data, floc=0)
    fisk_nll = fisk.nnlf(fisk_params, data)
    fisk_rvs = fisk.rvs(*fisk_params, size=data.shape[0])
    ks_fisk = ks_2samp(data, fisk_rvs)
    bic_fisk = compute_BIC(data, len(fisk_params), fisk_nll)

    print "[" + str(story_id) + "]Fitting IG"
    ig_params = invgauss.fit(data, floc=0)
    ig_nll = invgauss.nnlf(ig_params, data)
    ig_rvs = invgauss.rvs(*ig_params, size=data.shape[0])
    ks_ig = ks_2samp(data, ig_rvs)
    bic_ig = compute_BIC(data, len(ig_params), ig_nll)

    print "[" + str(story_id) + "]Fitting LN"
    ln_params = lognorm.fit(data, floc=0)
    ln_nll = lognorm.nnlf(ln_params, data)
    ln_rvs = lognorm.rvs(*ln_params, size=data.shape[0])
    ks_ln = ks_2samp(data, ln_rvs)
    bic_ln = compute_BIC(data, len(ln_params), ln_nll)

    print "[" + str(story_id) + "]Fitting Weibull"
    weib_params = weibull_min.fit(data, floc=0)
    weib_nll = weibull_min.nnlf(weib_params, data)
    weib_rvs = weibull_min.rvs(*weib_params, size=data.shape[0])
    ks_weib = ks_2samp(data, weib_rvs)
    bic_weib = compute_BIC(data, len(weib_params), weib_nll)

    print "[" + str(story_id) + "]Fitting Gamma"
    gamma_params = gamma.fit(data, floc=0)
    gamma_nll = gamma.nnlf(gamma_params, data)
    gamma_rvs = gamma.rvs(*gamma_params, size=data.shape[0])
    ks_gamma = ks_2samp(data, gamma_rvs)
    bic_gamma = compute_BIC(data, len(gamma_params), gamma_nll)

    return [
        fisk_nll, ig_nll, ln_nll, weib_nll, gamma_nll, ks_fisk, ks_ig, ks_ln,
        ks_weib, ks_gamma, bic_fisk, bic_ig, bic_ln, bic_weib, bic_gamma,
        fisk_params, ig_params, ln_params, weib_params, gamma_params
    ]
예제 #12
0
def sim_weibull_min_v2():
    c = 1.79
    mean, var, skew, kurt = weibull_min.stats(c, moments='mvsk')
    catches = 0
    catches2 = 0
    total_t = 0
    for _ in range(20000):
        j = np.random.uniform() * 50
        t_i = 0
        tt = 0
        catches1 = -1
        while t_i < j + 100:
            t_i += weibull_min.rvs(c)
            if j < t_i and t_i < j + 30:
                tt = t_i
                catches += 1
                catches1 += 1
            total_t += max((tt - j), 0)
            catches2 += max(0, catches1)
    print(catches / 20000 / 30)
    print(catches2 / total_t)
def testWeibull(hLen, totN, figSize, pltRange):
    """
    Brief test with data following weibull distribution.
    """
    # Create some imaginary data
    obs = weibull_min.rvs(c=1.96, scale=2.1, size=totN)  # c: shape
    model = obs + np.random.normal(0.0, 2.5, size=len(obs))

    # Start Testing
    # Create an instance of the object
    bs = Bayes(hLen, distType='weibull_min')

    # Use the first #hLen of them for training
    obsTrain = obs[:hLen]
    modelTrain = model[:hLen]

    # The rest to be used dynamically
    obsDyn = obs[hLen:]
    modelDyn = model[hLen:]
    fcst = np.zeros_like(obsDyn)

    # Perform an initial training of the model
    bs.trainMe(obsTrain, modelTrain)

    for ij in range(len(obsDyn)):
        # Provide a correction to the forecast
        fcst[ij] = bs.adjustForecast(modelDyn[ij])

        # Update system
        bs.trainMe([obsDyn[ij]], [modelDyn[ij]])

    # Show evidence!
    plt.figure(figsize=figSize)
    plt.plot(obsDyn[pltRange], label='obs')
    plt.plot(modelDyn[pltRange], '--', label='model')
    plt.plot(fcst[pltRange], '*', label='Bayes')
    plt.title('Weibull Dist Data')
    plt.legend()
    plt.show()
    return
예제 #14
0
def exp_rej(a, b=math.inf, lam_type='default'):
    if lam_type == 'default':
        lam = a
    else:
        lam = (a + math.sqrt(a**2 + 4)) / 2

    # rho is continuous at lambda=a,
    # so don't need to change the expression of rho

    acc = 0

    while 1:
        x = weibull_min.rvs(1, loc=0, scale=1 / lam) + a
        u = np.random.uniform()
        rho = math.exp(-(x - lam)**2 / 2)

        acc += 1

        if u <= rho and x < b:
            d = dict()
            d['x'] = x
            d['acc'] = acc
            return d
예제 #15
0
def next_ttr(final_downtime_model, CRN=None):
    dist = final_downtime_model[0]
    params = final_downtime_model[1]

    if dist == "uniform":
        return uniform.rvs(*params, random_state=CRN)
    elif dist == "expon":
        return expon.rvs(*params, random_state=CRN)
    elif dist == "rayleigh":
        return rayleigh.rvs(*params, random_state=CRN)
    elif dist == "weibull_min":
        return weibull_min.rvs(*params, random_state=CRN)
    elif dist == "gamma":
        return gamma.rvs(*params, random_state=CRN)
    elif dist == "gengamma":
        return gengamma.rvs(*params, random_state=CRN)
    elif dist == "invgamma":
        return invgamma.rvs(*params, random_state=CRN)
    elif dist == "gompertz":
        return gompertz.rvs(*params, random_state=CRN)
    elif dist == "lognorm":
        return lognorm.rvs(*params, random_state=CRN)
    elif dist == "exponweib":
        return exponweib.rvs(*params, random_state=CRN)
import numpy as np
from scipy.stats import weibull_min
import pandas as pd
from lifelines import WeibullAFTFitter, CoxPHFitter

# This is an implementation of https://uwspace.uwaterloo.ca/bitstream/handle/10012/10265/Cook_Richard-10265.pdf

N = 50000
p = 0.5
bX = np.log(0.5)
bZ = np.log(4)

Z = np.random.binomial(1, p, size=N)
X = np.random.binomial(1, 0.5, size=N)
X_ = 20000 + 10 * np.random.randn(N)

W = weibull_min.rvs(1, scale=1, loc=0, size=N)

Y = bX * X + bZ * Z + np.log(W)
T = np.exp(Y)

#######################################

df = pd.DataFrame({"T": T, "x": X, "x_": X_})

wf = WeibullAFTFitter().fit(df, "T")
wf.print_summary(4)

cph = CoxPHFitter().fit(df, "T", show_progress=True, step_size=1.0)
cph.print_summary(4)
예제 #17
0
def web_charge(tempoDeSerie: int,
               numberOfCharges: int = 1,
               init_time: float = 0) -> None:
    """
    Geração carga web considerando a modelagem On-Off
    \nParâmetros:
    tempoDeSerie: Tempo de duracao da carga em segundos
    numberOfCharges: número de cargas a serem geradas
    """

    for k in range(numberOfCharges):
        sequanciaDeMensagens = list()
        tempoEntreMensagens = list()
        tempo = 0.0
        while tempo <= tempoDeSerie:
            numeroDeObjetosPrincipais = int(
                np.random.lognormal(0.473844, 0.688471))
            tamanhoObjetosPrincipais = numeroDeObjetosPrincipais * [0]
            j = 0
            for i in weibull_min.rvs(0.814944,
                                     scale=28242.8,
                                     size=numeroDeObjetosPrincipais):
                tamanhoObjetosPrincipais[j] = int(i)
                j += 1
            sequanciaDeMensagens += tamanhoObjetosPrincipais
            tempoEntreMensagens += numeroDeObjetosPrincipais * [0.0]
            # print(numeroDeObjetosPrincipais, tamanhoObjetosPrincipais)
            numeroObjetosSecundarios = int(np.random.exponential(31.92))
            tamanhoObjetosSecundarios = numeroObjetosSecundarios * [0]
            j = 0
            for i in np.random.lognormal(9.17979,
                                         1.24646,
                                         size=numeroObjetosSecundarios):
                tamanhoObjetosSecundarios[j] = int(i)
                j += 1
            sequanciaDeMensagens += tamanhoObjetosSecundarios
            tempoEntreObjetosSecundarios = gamma.rvs(
                0.16, scale=5.375, size=numeroObjetosSecundarios)
            for i in tempoEntreObjetosSecundarios:
                tempo += i
            tempoEntreMensagens += tempoEntreObjetosSecundarios.tolist()
            toff = np.random.lognormal(-0.495204, 2.7731)
            # if (tempo + toff) < tempoDeSerie:
            tempoEntreMensagens.append(toff)
            sequanciaDeMensagens.append(0)
            tempo += toff
            # print('numeroObjetosSecundarios', numeroObjetosSecundarios, '\ntamanhoObjetosSecundarios', tamanhoObjetosSecundarios, '\ntempoEntreObjetosSecundarios', tempoEntreObjetosSecundarios, '\ntoff', toff)

        # Ajusta o tempo de carga
        pseudoTime = 0.0
        for i in range(len(tempoEntreMensagens)):
            pseudoTime += tempoEntreMensagens[i]
            if pseudoTime >= tempoDeSerie:
                # Time to cut
                tempoEntreMensagens = tempoEntreMensagens[:i + 1]
                sequanciaDeMensagens = sequanciaDeMensagens[:i + 1]
                tempo = pseudoTime
                break

        app = {
            "init_time": init_time,
            "server_port": 80,
            "packet_size": sequanciaDeMensagens,
            "time_between_packets": tempoEntreMensagens,
            "duration": int(tempo)
        }
        file_path = "./Charges/web_charge{}.json".format(k)
        json.dump(app, codecs.open(file_path, 'w', encoding='utf-8'), indent=4)
        weibull_min.pdf(x, c),
        'r-',
        lw=5,
        alpha=0.6,
        label='weibull_min pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = weibull_min(c)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = weibull_min.ppf([0.001, 0.5, 0.999], c)
np.allclose([0.001, 0.5, 0.999], weibull_min.cdf(vals, c))
# True

# Generate random numbers:

r = weibull_min.rvs(c, size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
예제 #19
0
 def weibull(params):
     # params[0] = shape, params[1] = scale
     return math.ceil(
         weibull_min.rvs(params[0], loc=0, scale=params[1], size=1))
예제 #20
0
def hesap(ax, bx):
    z, k, m, n, p, t = 0, 0, 0, 0, 0, 0
    studentGuvenAlt, aadmGuvenAlt, maadGuvenAlt, madmGuvenAlt, johnsonGuvenAlt, chenGuvenAlt = list(
    ), list(), list(), list(), list(), list()
    studentGuvenUst, aadmGuvenUst, maadGuvenUst, madmGuvenUst, johnsonGuvenUst, chenGuvenUst = list(
    ), list(), list(), list(), list(), list()

    workbook = xlwt.Workbook()
    sayfa = workbook.add_sheet("Sayfa1")
    sayfa.write(0, 1, "Student-t")
    sayfa.write(0, 3, "AADM-t")
    sayfa.write(0, 5, "MAAD-t")
    sayfa.write(0, 7, "MADM-t")
    sayfa.write(0, 9, "Johnson-t")
    sayfa.write(0, 11, "Chen-t")

    for item in range(0, 13):
        if item == 0:
            sayfa.write(1, 0, "n")
        elif item % 2 == 0:
            sayfa.write(1, item, "AW")

        else:
            sayfa.write(1, item, "CP")

    for i in range(5, 10):
        for j in range(1, 2501):
            x = weibull_min.rvs(ax / bx, size=i)
            mean, var, skew, kurt = weibull_min.stats(ax / bx, moments='mvsk')
            meanx = round(statistics.mean(x), 4)
            medianx = round(statistics.median(x), 4)
            stdevx = round(statistics.stdev(x), 4)
            aadmx = round((math.sqrt(math.pi / 2) / i) * sum(abs(x - medianx)),
                          4)
            maadx = round(statistics.median(abs(x - meanx)), 4)
            madmx = round(statistics.median(abs(x - medianx)), 4)

            toplam = 0
            for k in range(0, i):
                toplam = toplam + ((x[k] - meanx)**3)

            m3 = (i / ((i - 1) * (i - 2))) * toplam

            studentalt = round(meanx - cell[i - 5] * stdevx / math.sqrt(i), 4)
            studentust = round(meanx + cell[i - 5] * stdevx / math.sqrt(i), 4)
            aadmalt = round(meanx - cell[i - 5] * aadmx / math.sqrt(i), 4)
            aadmust = round(meanx + cell[i - 5] * aadmx / math.sqrt(i), 4)
            maadalt = round(meanx - cell[i - 5] * maadx / math.sqrt(i), 4)
            maadust = round(meanx + cell[i - 5] * maadx / math.sqrt(i), 4)
            madmalt = round(meanx - cell[i - 5] * madmx / math.sqrt(i), 4)
            madmust = round(meanx + cell[i - 5] * madmx / math.sqrt(i), 4)
            johnsonalt = round((meanx + (m3 / (6 * i * (stdevx**2)))) -
                               cell[i - 5] * math.sqrt(i) * stdevx, 4)
            johnsonust = round((meanx + (m3 / (6 * i * (stdevx**2)))) +
                               cell[i - 5] * math.sqrt(i) * stdevx, 4)
            chenalt = round(meanx - (cell[i - 5] + (
                ((m3 / (stdevx**3)) *
                 (1 + 2 * (cell[i - 5]**2))) / (6 * math.sqrt(i))) + (
                     (((m3 / (stdevx**3))**2) *
                      (cell[i - 5] + 2 *
                       (cell[i - 5])**2) / 9 * i)) + math.sqrt(i) * stdevx))
            chenust = round(meanx + (cell[i - 5] + (
                ((m3 / (stdevx**3)) *
                 (1 + 2 * (cell[i - 5]**2))) / (6 * math.sqrt(i))) + (
                     (((m3 / (stdevx**3))**2) *
                      (cell[i - 5] + 2 *
                       (cell[i - 5])**2) / 9 * i)) + math.sqrt(i) * stdevx))

            studentGuvenAlt.append(studentalt)
            studentGuvenUst.append(studentust)
            aadmGuvenAlt.append(aadmalt)
            aadmGuvenUst.append(aadmust)
            maadGuvenAlt.append(maadalt)
            maadGuvenUst.append(maadust)
            madmGuvenAlt.append(madmalt)
            madmGuvenUst.append(madmust)
            johnsonGuvenAlt.append(johnsonalt)
            johnsonGuvenUst.append(johnsonust)
            chenGuvenAlt.append(chenalt)
            chenGuvenUst.append(chenust)

            if studentalt <= mean <= studentust:
                z = z + 1

            if aadmalt <= mean <= aadmust:
                k = k + 1

            if madmalt <= mean <= madmust:
                m = m + 1

            if maadalt <= mean <= maadust:
                n = n + 1

            if johnsonalt <= mean <= johnsonust:
                p = p + 1

            if chenalt <= mean <= chenust:
                t = t + 1

        sayfa.write(i - 3, 0, f"{i}")
        sayfa.write(i - 3, 1, f"{round(z / 2500, 4)}")
        sayfa.write(
            i - 3, 2,
            f"{round(statistics.mean(studentGuvenUst) - statistics.mean(studentGuvenAlt), 4)}"
        )
        sayfa.write(i - 3, 3, f"{round(k / 2500, 4)}")
        sayfa.write(
            i - 3, 4,
            f"{round(statistics.mean(aadmGuvenUst) - statistics.mean(aadmGuvenAlt), 4)}"
        )
        sayfa.write(i - 3, 5, f"{round(n / 2500, 4)}")
        sayfa.write(
            i - 3, 6,
            f"{round(statistics.mean(maadGuvenUst) - statistics.mean(maadGuvenAlt), 4)}"
        )
        sayfa.write(i - 3, 7, f"{round(m / 2500, 4)}")
        sayfa.write(
            i - 3, 8,
            f"{round(statistics.mean(madmGuvenUst) - statistics.mean(madmGuvenAlt), 4)}"
        )
        sayfa.write(i - 3, 9, f"{round(p / 2500, 4)}")
        sayfa.write(
            i - 3, 10,
            f"{round(statistics.mean(johnsonGuvenUst) - statistics.mean(johnsonGuvenAlt), 4)}"
        )
        sayfa.write(i - 3, 11, f"{round(t / 2500, 4)}")
        sayfa.write(
            i - 3, 12,
            f"{round(statistics.mean(chenGuvenUst) - statistics.mean(chenGuvenAlt), 4)}"
        )

        workbook.save(f'W({ax} {bx}).xls')  # excelisim

        z, k, m, n, p, t = 0, 0, 0, 0, 0, 0
        studentGuvenAlt, aadmGuvenAlt, maadGuvenAlt, madmGuvenAlt, johnsonGuvenAlt, chenGuvenAlt = list(
        ), list(), list(), list(), list(), list()
        studentGuvenUst, aadmGuvenUst, maadGuvenUst, madmGuvenUst, johnsonGuvenUst, chenGuvenUst = list(
        ), list(), list(), list(), list(), list()
	def generate_winds(self, p, length):
		prior_winds=weibull_min.rvs(c=p[0], scale=(p[1]), size=length)
		return(prior_winds)
def clipped_sgd_const_stepsize_toy_expect(filename,
                                          x_init,
                                          L,
                                          gamma,
                                          distribution,
                                          lambd,
                                          clip_activation_iter=0,
                                          N=1000,
                                          max_t=np.inf,
                                          save_info_period=100):
    n = len(x_init)
    x = np.array(x_init)

    distrib_type = distribution[0]
    sigma = distribution[1]

    its = np.array([0])
    tim = np.array([0.0])
    data_passes = np.array([0.0])
    func_val = np.array([0.5 * L * (norm(x)**2)])
    sq_distances = np.array([norm(x)**2])

    t_start = time.time()
    num_of_data_passes = 0.0

    samples_counter = 0
    samples_number = min(N * n, 1000 * n)
    assert (distrib_type in ['normal', 'exp', 'weibull', 'burr'])

    if (distrib_type == 'normal'):
        samples = norm_d.rvs(loc=0, scale=sigma, size=samples_number)
    if (distrib_type == 'exp'):
        samples = expon.rvs(loc=-sigma, scale=sigma, size=samples_number)
    if (distrib_type == 'weibull'):
        c = distribution[2]
        scale = sigma * 1.0 / np.sqrt(
            scipy.special.gamma(1 + 2.0 / c) -
            ((scipy.special.gamma(1 + 1.0 / c))**2))
        shift = -scale * scipy.special.gamma(1 + 1.0 / c)
        samples = weibull.rvs(c=c, loc=shift, scale=scale, size=samples_number)
    if (distrib_type == 'burr'):
        c = distribution[2]
        d = distribution[3]
        unscaled_var = d * scipy.special.beta(
            (c * d - 2) * 1.0 / c,
            (c + 2) * 1.0 / c) - (d * scipy.special.beta((c * d - 1) * 1.0 / c,
                                                         (c + 1) * 1.0 / c)**2)
        scale = sigma * 1.0 / np.sqrt(unscaled_var)
        shift = -scale * d * scipy.special.beta((c * d - 1) * 1.0 / c,
                                                (c + 1) * 1.0 / c)
        samples = burr.rvs(c=c,
                           d=d,
                           loc=shift,
                           scale=scale,
                           size=samples_number)

    for it in range(N):
        if samples_counter == samples_number:
            samples_counter = 0
            if (distrib_type == 'normal'):
                samples = norm_d.rvs(loc=0, scale=sigma, size=samples_number)
            if (distrib_type == 'exp'):
                samples = expon.rvs(loc=-sigma,
                                    scale=sigma,
                                    size=samples_number)
            if (distrib_type == 'weibull'):
                c = distribution[2]
                scale = sigma * 1.0 / np.sqrt(
                    scipy.special.gamma(1 + 2.0 / c) -
                    ((scipy.special.gamma(1 + 1.0 / c))**2))
                shift = -scale * scipy.special.gamma(1 + 1.0 / c)
                samples = weibull.rvs(c=c,
                                      loc=shift,
                                      scale=scale,
                                      size=samples_number)
            if (distrib_type == 'burr'):
                c = distribution[2]
                d = distribution[3]
                unscaled_var = d * scipy.special.beta(
                    (c * d - 2) * 1.0 / c,
                    (c + 2) * 1.0 / c) - (d * scipy.special.beta(
                        (c * d - 1) * 1.0 / c, (c + 1) * 1.0 / c)**2)
                scale = sigma * 1.0 / np.sqrt(unscaled_var)
                shift = -scale * d * scipy.special.beta((c * d - 1) * 1.0 / c,
                                                        (c + 1) * 1.0 / c)
                samples = burr.rvs(c=c,
                                   d=d,
                                   loc=shift,
                                   scale=scale,
                                   size=samples_number)
        rand_vec = samples[samples_counter:(samples_counter + n)]
        samples_counter += n
        g = L * x + rand_vec
        norm_g = norm(g)
        if it >= clip_activation_iter:
            if norm_g > lambd:
                g = np.multiply(g, lambd * 1.0 / norm_g)
        x = x - gamma * g
        num_of_data_passes += 1.0 / 1000
        if ((it + 1) % save_info_period == 0):
            its = np.append(its, it + 1)
            tim = np.append(tim, time.time() - t_start)
            data_passes = np.append(data_passes, num_of_data_passes)
            func_val = np.append(func_val, 0.5 * L * (norm(x)**2))
            sq_distances = np.append(sq_distances, norm(x)**2)
        if tim[-1] > max_t:
            break

    if ((it + 1) % save_info_period != 0):
        its = np.append(its, it + 1)
        tim = np.append(tim, time.time() - t_start)
        data_passes = np.append(data_passes, num_of_data_passes)
        func_val = np.append(func_val, 0.5 * L * (norm(x)**2))
        sq_distances = np.append(sq_distances, norm(x)**2)

    res = {
        'last_iter': x,
        'func_vals': func_val,
        'iters': its,
        'time': tim,
        'data_passes': data_passes,
        'squared_distances': sq_distances
    }

    with open(
            "dump/" + filename +
            "_clipped_SGD_const_stepsize_toy_expect_gamma_" + str(gamma) +
            "_lambda_" + str(lambd) + "_num_of_iters_" + str(N) + "_distrib_" +
            distribution[0] + "_clip_activates_" + str(clip_activation_iter) +
            ".txt", 'wb') as file:
        pickle.dump(res, file)
    return res
from scipy.stats import weibull_min
import pandas as pd
from lifelines import WeibullAFTFitter, CoxPHFitter

# This is an implementation of https://uwspace.uwaterloo.ca/bitstream/handle/10012/10265/Cook_Richard-10265.pdf

N = 50000
p = 0.5
bX = np.log(0.5)
bZ = np.log(4)

Z = np.random.binomial(1, p, size=N)
X = np.random.binomial(1, 0.5, size=N)
X_ = 20000 + 10 * np.random.randn(N)

W = weibull_min.rvs(1, scale=1, loc=0, size=N)

Y = bX * X + bZ * Z + np.log(W)
T = np.exp(Y)

#######################################

df = pd.DataFrame({"T": T, "x": X, "x_": X_})


wf = WeibullAFTFitter().fit(df, "T")
wf.print_summary(4)


cph = CoxPHFitter().fit(df, "T", show_progress=True, step_size=1.0)
cph.print_summary(4)
예제 #24
0
#Flexible model: investigate optimal width of time intervals.
#True survival times are drawn from Weibull distribution.
#Described in paper.

sampleSize = 5000

np.random.seed(0)

beta1 = 1.0
lambdaT = 365. / np.log(2)
lambdaC = 2 * 365. / np.log(2)

lab = np.random.choice([0, 1], size=sampleSize)
from scipy.stats import weibull_min
trueTime = weibull_min.rvs(c=1.5,
                           scale=lambdaT * np.exp(-(beta1 * lab)),
                           size=sampleSize)
censoringTime = np.random.exponential(scale=lambdaC, size=sampleSize)
time = np.minimum(trueTime, censoringTime)
event = (time == trueTime) * 1.
x_train = lab

kmf = KaplanMeierFitter()
kmf.fit(time, event_observed=np.ones([sampleSize]))
print('Actual median survival, in days:')
print(kmf.median_)

halflife = 365.
breaks_list = [
    np.arange(0., 365. * 5.01, 365.),
    np.arange(0., 365. * 5.01, 365. / 4),
예제 #25
0
from scipy.stats import weibull_min
import matplotlib.pyplot as plt
import numpy as np
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']  # 解决绘图中文字体乱码问题
mpl.rcParams['axes.unicode_minus'] = False  # 解决绘图坐标的负号问题

# region 1.指定尺度参数和形状参数,生成一组威布尔分布的数据wbldata
scl = 2.89  # scale
shp = 1.95  # shape
wbldata = weibull_min.rvs(shp, loc=0, scale=scl, size=1000)

# endregion

# region 2.输入风速数据,拟合威布尔分布的两个参数
windspeed = [
    1, 0.86, 2.2, 7.06, 9.75, 10.57, 11.23, 10.23, 6.9, 4.19, 3.11, 2.86, 3.01,
    2.89, 2.47, 2.48, 2.46, 1.96, 1.81, 1.92, 2.18, 2.52, 2.34, 3.25, 4.46,
    5.67, 7.17, 10.05, 10.62, 11.4, 11.45, 11, 9.39, 8.99, 8.9, 8.59, 8.7,
    8.15, 7.47, 6.78, 6.25, 6.25, 5.84, 5.58, 5.33, 5.74, 6.31, 6.55, 6.8,
    7.21, 7.88, 7.47, 8.16, 8.73, 9.91, 10.11, 8.51, 8.11, 7, 5.98, 5.07, 4.66,
    4.16, 3.69, 3.3, 2.54, 1.89, 1.89, 1.95, 2.34, 2.04, 2.66, 3.3, 3.58, 3.87,
    1.99, 2.1, 2.99, 3.46, 3.26, 2.43, 1.78, 1.39, 0.93, 0.84, 1.14, 1.7, 1.7,
    1.13, 1.34, 1.34, 1.49, 1.72, 1.79, 2.01, 1.34, 0.79, 0.35, 0.93, 4.76,
    5.47, 6.11, 6.31, 5.46, 4.7, 4.47, 4.19, 3.72, 3.26, 2.75, 2.28, 1.35,
    0.88, 0.43, 0.89, 1.14, 1.56, 2.01, 1.34, 1.14, 1.05, 2.21, 3.21, 5.9,
    6.99, 6.77, 5.81, 4.48, 3.15, 2.02, 1.64, 1.54, 2.1, 2.29, 2.48, 2.56,
    2.43, 2.43, 2.28, 2.52, 3.02, 3.2, 2.88, 4.01, 5.07, 7.1, 9.31, 7.16, 8.7,
    9.87, 10.48, 9.91, 8.12, 6.79, 6.01, 5.42, 4.87, 4.48, 4.87, 5.1, 4.93,
    4.61, 4.73, 4.32, 4.19, 3.96, 3.25, 4.36, 7.09, 10.11, 12.33, 12.1, 13.34,
    13.61, 12.52, 8.9, 6.2, 5.42, 4.85, 4.48, 3.55, 2.69, 2.34, 1.54, 1.5,
예제 #26
0
 def generate(self):
     return weibull_min.rvs(self._k, loc=0, scale=self._lambd)
예제 #27
0
from rcs_stats import *
from data_processing import *
from scene import *
from scipy.stats import weibull_min
import numpy as np
import matplotlib.pyplot as plt

c = 10.79
fig, ax = plt.subplots(1, 1)
x = np.linspace(weibull_min.ppf(0.01, c), weibull_min.ppf(0.99, c), 100)
plt.plot(x,
         weibull_min.pdf(x, c),
         'r-',
         lw=5,
         alpha=0.6,
         label='weibull_min pdf')

if __name__ == '__main__':
    rv = weibull_min.rvs(c, size=10)
    print(rv)

freqs = range(int(1E9), int(2E9), int(1E8))
freq = [1E8, 2E8, 3E8]
theta = [-20, -10, 0, 10, 20]

scene3 = Scene(freqs, theta)
scene3.add_random_reflectors(5)

d = DataHandler.generate_table(15, 1, 6, freqs, theta)
DataHandler.write_file(d, 'testfile')
예제 #28
0
파일: dz1.py 프로젝트: filippticek/code
def u_exponential(x):
    return -np.log(1 - x) / O


def u_weibull(x):
    return X * np.power(-np.log(1 - x), 1. / Y)


U = uniform.rvs(size=1000, loc=0, scale=1)
U1 = []
E1 = []
W1 = []
U2 = uniform.rvs(size=1000, loc=N, scale=M - N)
E2 = expon.rvs(size=1000, loc=0, scale=1. / O)
W2 = weibull_min.rvs(c=Y, size=1000, loc=0, scale=X)
for x in U:
    E1.append(u_exponential(x))
    U1.append(u_uniform(x))
    W1.append(u_weibull(x))

writeCsv(U1, E1, W1, U2, E2, W2)

n, bins, patches = plt.hist(U1,
                            bins=50,
                            density=1,
                            facecolor='red',
                            alpha=0.5,
                            label='User distribution')
n, bins, patches = plt.hist(U2,
                            bins=50,
예제 #29
0
lmbd_hat, k_hat=fmin(fit_weibul_from_means, [2,2])

def weib(x,k,lmbd):
     return (k / lmbd) * (x / lmbd)**(k - 1) * np.exp(-(x / lmbd)**k)

x_wind=np.linspace(0,50,200)
prior_dist=[weib(x, k_hat, lmbd_hat*np.sqrt(3/2)) for x in x_wind]

fig, ax = plt.subplots()
ax.plot(x_wind, prior_dist)
plt.show()

#now sample from distribution,
# turn into wind data and see how much it costs
prior_winds=weibull_min.rvs(c=k_hat, scale=(lmbd_hat*np.sqrt(3/2)), size=8760)

fig, ax = plt.subplots()
ax.plot(x_wind, prior_dist)
ax.hist(prior_winds, normed=1, bins=100)
plt.show()

#convert prior_winds to power output
#Data from Vestas power curve chart V90 - 3.0MW - approximately
wind_speed=np.array([4,5,6,7,8,9,10, 11, 12, 13, 14, 15])
power_kw_v90=np.array([85, 200, 350, 590, 900, 1300, 1720, 2150, 2560, 2840, 2980, 3000])

#Create instance of a wind turbine
v90_turbine = wind_turbine(curve_speeds=wind_speed, power_points = power_kw_v90)
power_output = np.sum(v90_turbine(prior_winds))
예제 #30
0
def weibull_distribution(shape=(100, 100), k=2.4, lam=5.5):
    print(shape)
    n = shape[0] * shape[1]
    x = weibull_min.rvs(k, loc=0, scale=lam, size=n)
    return np.array(x).reshape(shape[0], shape[1])
예제 #31
0
solver = optimize.minimize(fun, x0=x0, bounds=bnds)
print(" {} iterations \n".format(solver.nit),
      "lambda = {} and alpha = {}".format(1 / solver.x[0], solver.x[1]))

## Parametric Boostrap (1-delta) confidence interval
lmda = 1 / solver.x[0]
alpha = solver.x[1]
# reference parameter
mu_star = (1 / lmda) * gamma(1 + 1 / alpha)
print(
    "In average, {} kms before a reliability problem occurs.".format(mu_star))

m = 100  # number of iterations to aggregate
bootstrap_estimates = []
for i in range(m):
    T_bootstrap = weibull_min.rvs(c=alpha, scale=1 / lmda, size=100)
    bootstrap_estimates.append(np.mean(T_bootstrap))

delta = 0.1
# Upper bound : we estimate prob(estimate - mu_star < x) = 1-delta/2
x = 2000
count = 0
while (count / m != 1 - delta / 2):
    count = 0
    for i in range(m):
        if bootstrap_estimates[i] - mu_star < x:
            count += 1
    print(count / m)
    x += 10
print(" Prob(mu_n - mu_estimate < {}) = {}".format(x, count / m))
예제 #32
0
def rvs_fn5(n):
    return weibull_min.rvs(c=5, size=n)
	wind_dist=[weib(x, sigma_post_dist[i], alpha_post_dist[i]) for x in x_wind]
	ax3.plot(x_wind, wind_dist, alpha=.1)
ax3.hist(w, normed=1, bins=100)
ax3.set_xlim(0,30)
ax3.set_xlabel("Posterior weibul distribution of wind speeds")
fig.tight_layout()
fig.set_size_inches(6, 10)
fig.savefig("figures/posteriors.png")
plt.show()

post_yearly_power=[]
#generate
for i in range(100):
	sigma_post_sample = np.random.choice(sigma_post_dist, 8760)
	alpha_post_sample = np.random.choice(alpha_post_dist, 8760)
	posterior_wind_data = [float(weibull_min.rvs(c=alpha_post_sample[i], 
	scale=sigma_post_sample[i], size=1)) for i in range(len(alpha_post_sample))]
	post_power_dist = v90_turbine(posterior_wind_data)
	post_yearly_power.append(np.sum(post_power_dist))

post_kwh_dist = post_yearly_power
post_dist_invest=[]
post_dist_pass=[]

for kwh in post_kwh_dist:
	post_dist_invest.append(loss_invest(kwh, I, c_oper, p_kwh))
	post_dist_pass.append(loss_pass(kwh, I , c_oper, p_kwh, d))

fig, (ax1, ax2) =plt.subplots(2, sharex=True)
ax1.hist(post_dist_invest, normed=1)
ax1.set_xlabel("Posterior Expected Losses, Invest")
ax1.set_ylabel("Probability Distribution")
예제 #34
0
def generate_Anom_data(anom_mode, num_noise_samples, params):
    anom_views = []

    if anom_mode == 1:
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=1000))
            falseEntries = np.ones((nViews, ))
            anom_views.append(falseEntries)

    elif anom_mode == 2:
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=1000))
            complete_prob = np.random.uniform(low=0.8, high=0.9)
            falseEntries = np.random.uniform(low=0.8, high=0.9, size=nViews)
            compl_indexes = np.random.choice(range(falseEntries.shape[0]),
                                             size=int(complete_prob * nViews))
            falseEntries[compl_indexes] = 1.0
            anom_views.append(falseEntries)

    elif anom_mode == 3:
        ## EXPON
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=10000))
            views = expon.rvs(loc=params[0], scale=params[1], size=nViews)
            views[views > 1.0] = 1.0
            anom_views.append(views)

    elif anom_mode == 4:
        ## Pareto
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=1000))
            views = pareto.rvs(params[0], loc=params[1], size=nViews) - 1.0
            views[views > 1.0] = 1.0
            anom_views.append(views)

    elif anom_mode == 5:
        ## LogNorm
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=1000))
            views = lognorm.rvs(s=params[0], scale=params[1], size=nViews)
            views[views > 1.0] = 1.0
            anom_views.append(views)

    elif anom_mode == 6:
        ## Weibull_Min
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=1000))
            views = weibull_min.rvs(params[0], scale=params[1], size=nViews)
            views[views > 1.0] = 1.0
            anom_views.append(views)

    elif anom_mode == 7:
        ## Uniform
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=10000))
            views = np.random.uniform(low=params[0],
                                      high=params[1],
                                      size=nViews)
            views[views > 1.0] = 1.0
            anom_views.append(views)

    elif anom_mode == 8:
        ## Uniform SHORT
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=10000))
            views = np.random.uniform(low=params[0],
                                      high=params[1],
                                      size=nViews)
            views[views > 1.0] = 1.0
            anom_views.append(views)

    elif anom_mode == 9:
        ## Gamma Short
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=10000))
            views = gamma.rvs(params[0], scale=params[1], loc=0.0, size=nViews)
            views[views > 1.0] = 1.0
            anom_views.append(views)

    elif anom_mode == 10:
        for i in range(num_noise_samples):
            nViews = int(np.random.uniform(low=100, high=10000))
            views = 1 - gamma.rvs(
                params[0], scale=params[1], loc=0.0, size=nViews)
            views[views > 1.0] = 1.0
            views[views < 0.0] = 0.0
            anom_views.append(views)

    anom_views = np.array(anom_views)

    pbar = ProgressBar()
    synth_fisk_params = []
    for i in pbar(range(len(anom_views))):
        synth_single = fit_fisk(anom_views[i], 0)
        synth_fisk_params.append(
            [synth_single[0], synth_single[1][0], synth_single[1][2]])

    print len(synth_fisk_params)
    synth_fisk_params = np.array(synth_fisk_params)
    return synth_fisk_params
예제 #35
0
        # lambda function lambda arguments: return value
        plus_one = lambda x: x + 1
        print(plus_one(3))
        # This is not a good way to define a function. It should only be used when you don't need a named function.
    elif test == 3:
        a = NoInit()
        a.greeting()
    elif test == 4:
        # np.random.seed(seed=1)
        a = np.random.uniform()
        b = np.random.weibull(4)
        # Simulating wind speed via weibull distribution
        n = 1  # number of samples
        k = 2  # shape factor should be calculated from the wind data, not available now
        lam = 5  # scale,should be calculated from the wind data, not available now
        v_wind = weibull_min.rvs(k, loc=0, scale=lam, size=n)
    elif test == 5:
        # Show alternative colors
        from matplotlib.patches import Rectangle
        import matplotlib.pyplot as plt
        import matplotlib.colors as mcolors

        def plot_colortable(colors, title, sort_colors=True, emptycols=0):

            cell_width = 212
            cell_height = 22
            swatch_width = 48
            margin = 12
            topmargin = 40

            # Sort colors by hue, saturation, value and name.