def main(): a = 0 b = 5 x = np.linspace(a-1, b+1, 100) dist = uniform(loc=a, scale=abs(a - b)) figure, ax = plt.subplots(figsize=(9, 9)) plt.subplot(221) plt.title("Функция распределения") plt.plot(x, dist.cdf(x), color='r', label=r'F({0}, {1})'.format(a, b)) plt.legend() plt.subplot(222) plt.title("Функция плотности распределения") plt.plot(x, dist.pdf(x), color='b', label=r'f({0}, {1})'.format(a, b)) plt.legend() dist = erlang(a=2) plt.subplot(223) plt.title('Функция распределения') cdf = [erlang_cdf(4, 4, i) for i in x] plt.plot(x, cdf, color='r', label=r'F({0}, {1})'.format(a, b)) plt.legend() plt.subplot(224) plt.title('Функция плотности распределения') pdf = [erlang_pdf(4, 4,i) for i in x] plt.plot(x, pdf , color='b', label=r'f({0}, {1})'.format(a, b)) plt.legend() plt.show()
def _get_bg_erlang(d, ich=0, m=10, ph_sel=Ph_sel('all'), period=0): """Return a frozen (scipy) erlang distrib. with rate equal to the bg rate. """ bg_rate = d.bg_from(ph_sel=ph_sel)[ich][period] #bg_rate_kcps = bg_rate*1e-3 bg_dist = erlang(a=m, scale=1./bg_rate) return bg_dist
def _get_bg_erlang(d, ich=0, m=10, ph_sel=Ph_sel('all'), period=0): """Return a frozen (scipy) erlang distrib. with rate equal to the bg rate. """ bg_rate = d.bg_from(ph_sel=ph_sel)[ich][period] # bg_rate_kcps = bg_rate*1e-3 bg_dist = erlang(a=m, scale=1./bg_rate) return bg_dist
def runLenFees(fullSize=975000): ds = generate_dataset() grouped, not_full = list(), list() r = False total_tx = list() for b in ds.interarrival: total_tx.extend([b['satoshi_fee'] // b['num_tx']] * b['num_tx']) if b['block_size'] <= fullSize: not_full.append(b) r = False elif b['block_size'] > fullSize: if not r: grouped.append([b]) r = True else: grouped[-1].append(b) mean = sum(total_tx) / len(total_tx) print 'mean tx fee: {}'.format(mean) fig = plt.figure() fig.suptitle('Transaction Fee PDF/Histogram Fit') plt.hist(sorted(total_tx)[:-len(total_tx) // 100], 300, normed=True) plt.xlabel('Satoshi Tx Fee') plt.ylabel('Probability') plt.axvline(x=mean, color='red') rv = sp.erlang(7, loc=11500, scale=1200) x = np.linspace(0, 50000) #plt.plot(x, rv.pdf(x)) #plt.show() l1 = [ i / len(total_tx) for i in range(len(total_tx) - (len(total_tx) // 50) - 1) ] l2 = [e for e in sorted(total_tx)[:-len(total_tx) // 50]] print len(l1), len(l2) fig.suptitle('Sorted Fees vs. Erlang CDF') plt.xlabel('Satoshi Tx Fee') plt.ylabel('Probablility') plt.plot(x, rv.cdf(x), color='red') plt.plot(l2, l1, color='blue') plt.show() dic = defaultdict(list) for blocklist in grouped: dic[len(blocklist)].extend(blocklist) avgFeeDic = dict() for key in dic: avgFeeDic[key] = calcAvgFee(dic[key]) avgFeeDic[0] = calcAvgFee(not_full) return avgFeeDic
def plotDist(control, trial, cont_pr, parameters, label): titles = ['G1', 'S-G2'] plt.figure(figsize=(12,6), dpi=200) for ind, x in enumerate(trial): plt.subplot(1,2,(ind+1)) plt.hist(control[ind], alpha=0.4, density=True, label="control", color="slategrey") plt.hist(x, alpha=0.4, density=True, label=label, color="orchid") plt.title(titles[ind]) rv = sp.erlang(parameters[ind][0],0,parameters[ind][1]) rv2 = sp.erlang(cont_pr[ind][0],0,cont_pr[ind][1]) xx = np.linspace(0, max(x)) plt.plot(xx, rv2.pdf(xx), lw=2, color="slategrey") plt.plot(xx, rv.pdf(xx), lw=2, color="orchid") plt.text(50, 0.07, 'shape: %d \n scale: %.3f' % (parameters[ind][0],parameters[ind][1]), fontsize=14) plt.text(50, 0.09, 'control shape: %d \n scale: %.3f' % (cont_pr[ind][0],cont_pr[ind][1]), fontsize=14) plt.ylim([0.0, 0.12]) plt.xlim([0.0, 90.0]) plt.xlabel("phase durations [hrs]") plt.ylabel("probability") plt.legend()
def runLenFees(fullSize=975000): ds = generate_dataset() grouped, not_full = list(), list() r = False total_tx = list() for b in ds.interarrival: total_tx.extend([b['satoshi_fee']//b['num_tx']]*b['num_tx']) if b['block_size'] <= fullSize: not_full.append(b) r = False elif b['block_size'] > fullSize: if not r: grouped.append([b]) r = True else: grouped[-1].append(b) mean = sum(total_tx)/len(total_tx) print 'mean tx fee: {}'.format(mean) fig = plt.figure() fig.suptitle('Transaction Fee PDF/Histogram Fit') plt.hist(sorted(total_tx)[:-len(total_tx)//100],300, normed=True) plt.xlabel('Satoshi Tx Fee') plt.ylabel('Probability') plt.axvline(x=mean, color='red') rv = sp.erlang(7, loc=11500, scale=1200) x = np.linspace(0,50000) #plt.plot(x, rv.pdf(x)) #plt.show() l1 = [i/len(total_tx) for i in range(len(total_tx)-(len(total_tx)//50)-1)] l2 = [e for e in sorted(total_tx)[:-len(total_tx)//50]] print len(l1), len(l2) fig.suptitle('Sorted Fees vs. Erlang CDF') plt.xlabel('Satoshi Tx Fee') plt.ylabel('Probablility') plt.plot(x, rv.cdf(x), color='red') plt.plot(l2, l1, color='blue') plt.show() dic = defaultdict(list) for blocklist in grouped: dic[len(blocklist)].extend(blocklist) avgFeeDic = dict() for key in dic: avgFeeDic[key] = calcAvgFee(dic[key]) avgFeeDic[0] = calcAvgFee(not_full) return avgFeeDic
def __init__(self, start, end): assert end <= RANGE_MAX self.arguments = (start, end) self.bitwise = np.array( [1 if (start <= i <= end) else 0 for i in range(1, RANGE_MAX + 1)], dtype=int) self.numeric = np.nonzero(self.bitwise)[0] + 1 erlang_rv = erlang(2, loc=0, scale=10) size = end - start + 1 size_prob = erlang_rv.pdf(size) start_prob = 1. / (RANGE_MAX + 1 - size) self.probability = size_prob / float(5050)
def _get_bg_distrib_erlang(d, ich=0, m=10, ph_sel=Ph_sel('all'), bp=(0, -1)): """Return a frozen (scipy) erlang distrib. with rate equal to the bg rate. """ assert ph_sel in [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')] if np.size(bp) == 1: bp = (bp, bp) periods = slice(d.Lim[ich][bp[0]][0], d.Lim[ich][bp[1]][1] + 1) # Compute the BG distribution if ph_sel == Ph_sel('all'): bg_ph = d.bg_dd[ich] + d.bg_ad[ich] elif ph_sel == Ph_sel(Dex='Dem'): bg_ph = d.bg_dd[ich] elif ph_sel == Ph_sel(Dex='Aem'): bg_ph = d.bg_ad[ich] rate_ch_kcps = bg_ph[periods].mean()/1e3 # bg rate in kcps bg_dist = erlang(a=m, scale=1./rate_ch_kcps) return bg_dist
def _get_bg_distrib_erlang(d, ich=0, m=10, ph_sel=Ph_sel('all'), period=(0, -1)): """Return a frozen (scipy) erlang distrib. with rate equal to the bg rate. """ assert ph_sel in [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')] # Compute the BG distribution if ph_sel == Ph_sel('all'): bg_ph = d.bg_dd[ich] + d.bg_ad[ich] elif ph_sel == Ph_sel(Dex='Dem'): bg_ph = d.bg_dd[ich] elif ph_sel == Ph_sel(Dex='Aem'): bg_ph = d.bg_ad[ich] rate_ch_kcps = bg_ph[period[0]:period[1]+1].mean()/1e3 # bg rate in kcps bg_dist = erlang(a=m, scale=1./rate_ch_kcps) return bg_dist
def test_erlang_dist(input_list, list_name): # erlang is a special dist of gamma with int shape alpha fit_alpha, fit_loc, fit_beta = ss.erlang.fit(input_list, floc=0) rv = ss.erlang(int(fit_alpha), fit_loc, fit_beta) print 'fit alpha is %.4f and fit beta is %.4f' % (int(fit_alpha), fit_beta) # chose the number of bins n = count_bin(input_list) print 'number of bins %i' % n name = 'test Erlang on ' + list_name draw_hist(input_list, n, rv, name, 'Chartreuse') # set the adjustment to dof (degree of freedom) = to the number of parameters estimated dof = 1 ## experiment-------------------------------------------------- result = model(input_list, n, dof, rv) t_value, p_value = result[0], result[1] print "The chi_sq test value of erlang dist is %5.6f and the p-value is %5.10f" % ( t_value, p_value)
dat = [x[0] for x in res] print dat plt.plot([CRIT]*len(dat), 'r--', label='KS CRITICAL VALUE') plt.plot(dat, 'bH:', label='Erlang Shape Parameter D values') plt.show() ds = generate_dataset() times = sorted([block['time'] for block in ds.interarrival]) #times = [x/max(times) for x in times] #trimmed_times = times[::len(times)//100] #test_erlang(trimmed_times) plt.hist(times,100) plt.show() tx_rate = sorted([block['num_tx']/block['time'] for block in ds.interarrival if block['time'] != 0 and block['num_tx'] != 0]) print 'mean: {}'.format(sum(tx_rate)/len(tx_rate)) print 'max: {}'.format(max(tx_rate)) print 'min: {}'.format(min(tx_rate)) trimmed_tx_rate = tx_rate[len(tx_rate)//25::len(tx_rate)//100] test_erlang(trimmed_tx_rate) rv = sp.erlang(3) x = np.linspace(0,1) #plt.plot(x, rv.pdf(x)) #plt.show() plt.hist(tx_rate[:-len(tx_rate)//25], 100) plt.show()
def Consumed_SKU(self): print erlang(200, scale=0.002778).rvs(200)
import numpy as np import random import scipy.stats as ss import matplotlib.pyplot as plt import chiSquare f = file('AllServiceTimes2.txt', 'r+') service_times = [float(x) for x in f.read().split(', ')] fit_alpha,fit_beta=ss.expon.fit(service_times, floc=0) rv1 = ss.expon(fit_alpha,fit_beta) print 'Exponential parameters: loc, mu: ',fit_alpha,fit_beta fit_alpha,fit_loc,fit_beta=ss.erlang.fit(service_times, floc=0) fit_alpha = int(round(fit_alpha)) rv2 = ss.erlang(fit_alpha,fit_loc,fit_beta) print 'Erlang parameters: f, loc, mu: ',fit_alpha,fit_loc,fit_beta fit_alpha,fit_loc,fit_beta=ss.gamma.fit(service_times, floc=0) rv3 = ss.gamma(fit_alpha,fit_loc,fit_beta) print 'Gamma parameters: alpha, loc, beta: ',fit_alpha,fit_loc,fit_beta fig = plt.figure() myHist = plt.hist(service_times, 60, normed=True) x = np.linspace(0.001,500) ex = plt.plot(x, rv1.pdf(x), lw=2, label="Exponential") e = plt.plot(x, rv2.pdf(x), lw=2, label="Erlang") g = plt.plot(x, rv3.pdf(x), lw=2, label="Gamma") plt.legend(loc='upper right') fig.suptitle('Service times')
# Simple Synapse # # The simple synapse does not simulate the axon, synaptic cleft, or dendrite. # Instead, it simply takes a voltage from the presynaptic neuron and provides # a means of activating the postsynaptic neuron. # It shares an interface with ChemicalSynapse, and is thus interchangeable # with it. from molecule import Receptors from collections import deque from sys import maxint from scipy.stats import erlang er = erlang(2) def erlang_generator(): """ Creates an erlang generator. """ prev = 0.0 for x in xrange(1, maxint): curr = er.cdf(x) diff = curr - prev if diff < 0.001: break prev = curr yield diff class SimpleSynapse: def __init__(self, postsynaptic_id=None, receptor=Receptors.AMPA, spiking=True, delay=0, strength=1, environment=None,
# DISTRIBUCION ERLANG elif optionSelected == 4: os.system("cls") nombreDistribucion = "Distribución Erlang" print("\t::", nombreDistribucion, "::") # Parametros necesarios para la generación forma = float(input("->Ingrese la forma :")) valorEsperado = float(input("->Ingrese el valor esperado :")) numeroDatos = int(input("->Ingrese el número de variables aleatorias a generar :")) #nivelDeSignificacia = float(input("->Ingrese el nivel de significancia para la prueba chi cuadrado :")) nivelDeSignificacia = 0.05 tipoDistribucion = st.erlang(forma, 0, valorEsperado) nombreDistribucion = "Distribución Erlang" objErlang = erlangDistribution.Erlang(numeroDatos, tipoDistribucion, nivelDeSignificacia, nombreDistribucion, forma, valorEsperado) legendHistogram = r'$\kappa$' + "=" + str("{0:.2f}".format(objErlang.formGenerated)) + " ; " + r'$\theta$' + "=" + str("{0:.2f}".format(objErlang.expectedValueGenerated)) legendDensity = r'$\kappa$' + "=" + str(forma) + " ; " + r'$\theta$' + "=" + str(valorEsperado) axisX = np.linspace(0, objErlang.median + 6 * objErlang.iqr, numeroDatos) os.system('cls') objErlang.chiSquareTest() objErlang.graph(legendHistogram, legendDensity, axisX) # DISTRIBUCION UNIFORME CONTINUA elif optionSelected == 5:
def all_dists(): # dists param were taken from scipy.stats official # documentaion examples # Total - 89 return { "alpha": stats.alpha(a=3.57, loc=0.0, scale=1.0), "anglit": stats.anglit(loc=0.0, scale=1.0), "arcsine": stats.arcsine(loc=0.0, scale=1.0), "beta": stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0), "betaprime": stats.betaprime(a=5, b=6, loc=0.0, scale=1.0), "bradford": stats.bradford(c=0.299, loc=0.0, scale=1.0), "burr": stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0), "cauchy": stats.cauchy(loc=0.0, scale=1.0), "chi": stats.chi(df=78, loc=0.0, scale=1.0), "chi2": stats.chi2(df=55, loc=0.0, scale=1.0), "cosine": stats.cosine(loc=0.0, scale=1.0), "dgamma": stats.dgamma(a=1.1, loc=0.0, scale=1.0), "dweibull": stats.dweibull(c=2.07, loc=0.0, scale=1.0), "erlang": stats.erlang(a=2, loc=0.0, scale=1.0), "expon": stats.expon(loc=0.0, scale=1.0), "exponnorm": stats.exponnorm(K=1.5, loc=0.0, scale=1.0), "exponweib": stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0), "exponpow": stats.exponpow(b=2.7, loc=0.0, scale=1.0), "f": stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0), "fatiguelife": stats.fatiguelife(c=29, loc=0.0, scale=1.0), "fisk": stats.fisk(c=3.09, loc=0.0, scale=1.0), "foldcauchy": stats.foldcauchy(c=4.72, loc=0.0, scale=1.0), "foldnorm": stats.foldnorm(c=1.95, loc=0.0, scale=1.0), # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0), # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0), "genlogistic": stats.genlogistic(c=0.412, loc=0.0, scale=1.0), "genpareto": stats.genpareto(c=0.1, loc=0.0, scale=1.0), "gennorm": stats.gennorm(beta=1.3, loc=0.0, scale=1.0), "genexpon": stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0), "genextreme": stats.genextreme(c=-0.1, loc=0.0, scale=1.0), "gausshyper": stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0), "gamma": stats.gamma(a=1.99, loc=0.0, scale=1.0), "gengamma": stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0), "genhalflogistic": stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0), "gilbrat": stats.gilbrat(loc=0.0, scale=1.0), "gompertz": stats.gompertz(c=0.947, loc=0.0, scale=1.0), "gumbel_r": stats.gumbel_r(loc=0.0, scale=1.0), "gumbel_l": stats.gumbel_l(loc=0.0, scale=1.0), "halfcauchy": stats.halfcauchy(loc=0.0, scale=1.0), "halflogistic": stats.halflogistic(loc=0.0, scale=1.0), "halfnorm": stats.halfnorm(loc=0.0, scale=1.0), "halfgennorm": stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0), "hypsecant": stats.hypsecant(loc=0.0, scale=1.0), "invgamma": stats.invgamma(a=4.07, loc=0.0, scale=1.0), "invgauss": stats.invgauss(mu=0.145, loc=0.0, scale=1.0), "invweibull": stats.invweibull(c=10.6, loc=0.0, scale=1.0), "johnsonsb": stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0), "johnsonsu": stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0), "ksone": stats.ksone(n=1e03, loc=0.0, scale=1.0), "kstwobign": stats.kstwobign(loc=0.0, scale=1.0), "laplace": stats.laplace(loc=0.0, scale=1.0), "levy": stats.levy(loc=0.0, scale=1.0), "levy_l": stats.levy_l(loc=0.0, scale=1.0), "levy_stable": stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0), "logistic": stats.logistic(loc=0.0, scale=1.0), "loggamma": stats.loggamma(c=0.414, loc=0.0, scale=1.0), "loglaplace": stats.loglaplace(c=3.25, loc=0.0, scale=1.0), "lognorm": stats.lognorm(s=0.954, loc=0.0, scale=1.0), "lomax": stats.lomax(c=1.88, loc=0.0, scale=1.0), "maxwell": stats.maxwell(loc=0.0, scale=1.0), "mielke": stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0), "nakagami": stats.nakagami(nu=4.97, loc=0.0, scale=1.0), "ncx2": stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0), "ncf": stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0), "nct": stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0), "norm": stats.norm(loc=0.0, scale=1.0), "pareto": stats.pareto(b=2.62, loc=0.0, scale=1.0), "pearson3": stats.pearson3(skew=0.1, loc=0.0, scale=1.0), "powerlaw": stats.powerlaw(a=1.66, loc=0.0, scale=1.0), "powerlognorm": stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0), "powernorm": stats.powernorm(c=4.45, loc=0.0, scale=1.0), "rdist": stats.rdist(c=0.9, loc=0.0, scale=1.0), "reciprocal": stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0), "rayleigh": stats.rayleigh(loc=0.0, scale=1.0), "rice": stats.rice(b=0.775, loc=0.0, scale=1.0), "recipinvgauss": stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0), "semicircular": stats.semicircular(loc=0.0, scale=1.0), "t": stats.t(df=2.74, loc=0.0, scale=1.0), "triang": stats.triang(c=0.158, loc=0.0, scale=1.0), "truncexpon": stats.truncexpon(b=4.69, loc=0.0, scale=1.0), "truncnorm": stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0), "tukeylambda": stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0), "uniform": stats.uniform(loc=0.0, scale=1.0), "vonmises": stats.vonmises(kappa=3.99, loc=0.0, scale=1.0), "vonmises_line": stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0), "wald": stats.wald(loc=0.0, scale=1.0), "weibull_min": stats.weibull_min(c=1.79, loc=0.0, scale=1.0), "weibull_max": stats.weibull_max(c=2.87, loc=0.0, scale=1.0), "wrapcauchy": stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0), }
""" percentiles = np.array([0.25, 0.5, 0.75]) time_values = np.array([iqr[0], median, iqr[1]]) mean = median x0 = [mean / n] * n return fit_chain_by_cdf(n, time_values, percentiles, lower, upper, x0=x0) if __name__ == "__main__": import matplotlib.pyplot as pl from scipy.stats import erlang n = 10 mean = 10 C = ExpChain([mean / n] * n) E = erlang(a=n, scale=mean / n) t, y = C.get_cdf() pl.plot(t, y) pl.plot(t, E.cdf(t)) print(C.get_median_and_iqr()) print(E.median(), E.interval(0.5)) pl.show() # ========= times = [0.3, 6., 9, 0.4] n = len(times) C = ExpChain(times) fit_C = fit_chain_by_median_and_iqr(3, *C.get_median_and_iqr())
ag, bg, thetaGamma = stats.gamma.fit(data) pdf_gamma = stats.gamma.pdf(lnspc, ag, bg, thetaGamma) plt.plot(lnspc, pdf_gamma, label="Gamma") plt.legend(loc='upper right') plt.savefig("fitting.png") print "[*] Fitting Paramaters" print "[#] Exponential: ", aexpon, '???, mu = ', muExp, '\n', print "[#] Erlang: ", ae, '???, k = ', be, ', mu = ', muErl print "[#] Gamma: ", ag, '??? k = ', bg, ', Theta = ', thetaGamma, '\n' print "[*] Computing Chi Square Test" # Apply Chi-Square test to the three fittings dof = 2 n = 30 rv = stats.expon(aexpon, muExp) exponFit = chisquare.model(data, n, dof, rv) print "[#] Exponential: The chi_sq test value is %10.6f and the p-value is %10.6f" % ( exponFit[0], exponFit[1]) rv = stats.erlang(ae, be, muErl) erlangFit = chisquare.model(data, n, dof, rv) print "[#] Erlang: The chi_sq test value is %10.6f and the p-value is %10.6f" % ( erlangFit[0], erlangFit[1]) rv = stats.gamma(ag, bg, thetaGamma) gammaFit = chisquare.model(data, n, dof, rv) print "[#] Gamma: The chi_sq test value is %10.6f and the p-value is %10.6f" % ( gammaFit[0], gammaFit[1])
def __init__(self, a=1, loc=0, scale=1): self.g = erlang(a=a, loc=loc, scale=scale) self.init_stats(mean=self.g.mean(), var=self.g.var(), running=False) self.init_buffer()
#k=(T_l/sigma)^2, mu=sigma^2/T_l #1- constant variance sigma_0^2 across different T_l's #2- constant k (not 1, because with k=1 we have an exponential distribution), #sigma increases with T_l erlang_type = 2 sigma_0 = 2 # will use only in case erlang_type=1 (constant variance) k = 10 # will use only in case erlang_type=2 (constant k) if erlang_type == 1: a = T_l / sigma_0 mu = sigma_0 / a k = a * a elif erlang_type == 2: mu = T_l / k rv = erlang(k, scale=mu) ## give locations to all agents Nruns = 10 peak_height = list() peak_time = list() start_time = list() inf_per_day = list() day_50 = list() day_100 = list() day_150 = list() day_200 = list() for irun in range(Nruns):
def main(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--stages', type=int, required=False, help='Etapas de la distribución') parser.add_argument('-l', '--lambdap', type=float, required=True, nargs='+', help='Parámetro lambda de cada distribución') parser.add_argument('-r', '--runs', type=int, required=True, help='Ejecuciones a realizar por cada simulación') parser.add_argument('-o', '--output', type=str, required=False, help='Archivo de salida para la grafica') parser.add_argument('-d', '--dist', type=str, required=True, choices=['erlang', 'expon', 'hyperexp'], help='Distribución a emplear para la simulación') parser.add_argument('--no-graph', required=False, help='Suprime la salida como gráfica', dest='graph', action='store_false') parser.add_argument('--graph', required=False, help='Habilita la salida como gráfica (usar con [-o])', dest='graph', action='store_true') parser.add_argument('-p', '--probability', required=False, type=float, help='Probabilidad para la distribución Hiperexp.') parser.set_defaults(graph=True) args = parser.parse_args() # msg = 'Distribución {3} con {0} etapas (lambda={1}) en {2} ejecuciones' # print msg.format(args.stages, args.lambdap, args.runs, args.dist) fig, ax = plt.subplots(1, 1) if args.dist in 'erlang': if args.stages <= 0: print 'Error: se necesita un número válido de etapas' sys.exit(1) lambdap = args.lambdap[0] mean, var, skew, kurt = erlang.stats(args.stages, scale=lambdap, moments='mvsk') x = np.linspace(erlang.ppf(0.00001, args.stages, scale=lambdap), erlang.ppf(0.99999, args.stages, scale=lambdap), num=1000) rv = erlang(args.stages, scale=lambdap) ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='Erlang PDF') # Generate random numbers with this distribution r = erlang.rvs(args.stages, scale=lambdap, size=args.runs) ax.hist(r, bins=20, normed=True, histtype='stepfilled', alpha=0.4, label='Experimental values') meanexp = np.mean(r) varexp = np.var(r) print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp, mean) print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var) print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp, np.sqrt(var)/mean) elif args.dist in 'expon': lambdap = args.lambdap[0] mean, var, skew, kurt = expon.stats(scale=lambdap, moments='mvsk') x = np.linspace(expon.ppf(0.00001, scale=lambdap), expon.ppf(0.99999, scale=lambdap), num=1000) rv = expon(scale=lambdap) ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='Exponential PDF') # Generate random numbers with this distribution r = expon.rvs(scale=lambdap, size=args.runs) ax.hist(r, bins=20, normed=True, histtype='stepfilled', alpha=0.4, label='Experimental values') meanexp = np.mean(r) varexp = np.var(r) print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp, mean) print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var) print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp, np.sqrt(var)/mean) elif args.dist in 'hyperexp': rv = hyperexp(args.probability, args.lambdap[0], args.lambdap[1]) x = np.linspace(0.00000001, 10.99999, num=1000) ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='HyperExp PDF') # ax.plot(x, rv.cdf(x), 'b-', lw=2, alpha=0.6, label='HyperExp CDF') r = rv.rvs(size=args.runs) ax.hist(r, normed=True, bins=100, range=(0, 11), histtype='stepfilled', alpha=0.4, label='Experimental values') meanexp = np.mean(r) varexp = np.var(r) mean = rv.mean() var = rv.standard_dev()**2 print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp, mean) print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var) print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp, rv.CoV()) if args.graph: ax.legend(loc='best', frameon=False) plt.show()
plt.show() ds = generate_dataset() times = sorted([block['time'] for block in ds.interarrival]) #times = [x/max(times) for x in times] #trimmed_times = times[::len(times)//100] #test_erlang(trimmed_times) plt.hist(times, 100) plt.show() tx_rate = sorted([ block['num_tx'] / block['time'] for block in ds.interarrival if block['time'] != 0 and block['num_tx'] != 0 ]) print 'mean: {}'.format(sum(tx_rate) / len(tx_rate)) print 'max: {}'.format(max(tx_rate)) print 'min: {}'.format(min(tx_rate)) trimmed_tx_rate = tx_rate[len(tx_rate) // 25::len(tx_rate) // 100] test_erlang(trimmed_tx_rate) rv = sp.erlang(3) x = np.linspace(0, 1) #plt.plot(x, rv.pdf(x)) #plt.show() plt.hist(tx_rate[:-len(tx_rate) // 25], 100) plt.show()
def main(): parser = argparse.ArgumentParser() parser.add_argument("-s", "--stages", type=int, required=False, help="Etapas de la distribución") parser.add_argument( "-l", "--lambdap", type=float, required=True, nargs="+", help="Parámetro lambda de cada distribución" ) parser.add_argument("-r", "--runs", type=int, required=True, help="Ejecuciones a realizar por cada simulación") parser.add_argument("-o", "--output", type=str, required=False, help="Archivo de salida para la grafica") parser.add_argument( "-d", "--dist", type=str, required=True, choices=["erlang", "expon", "hyperexp"], help="Distribución a emplear para la simulación", ) parser.add_argument( "--no-graph", required=False, help="Suprime la salida como gráfica", dest="graph", action="store_false" ) parser.add_argument( "--graph", required=False, help="Habilita la salida como gráfica (usar con [-o])", dest="graph", action="store_true", ) parser.set_defaults(graph=True) args = parser.parse_args() msg = "Distribución {3} con {0} etapas (lambda={1}) en {2} ejecuciones" print msg.format(args.stages, args.lambdap, args.runs, args.dist) fig, ax = plt.subplots(1, 1) if args.dist in "erlang": if args.stages <= 0: print "Error: se necesita un número válido de etapas" sys.exit(1) lambdap = args.lambdap[0] mean, var, skew, kurt = erlang.stats(args.stages, scale=lambdap, moments="mvsk") print "E[X]={0}, var(X)={1}".format(mean, var) x = np.linspace( erlang.ppf(0.00001, args.stages, scale=lambdap), erlang.ppf(0.99999, args.stages, scale=lambdap), num=1000 ) rv = erlang(args.stages, scale=lambdap) ax.plot(x, rv.pdf(x), "r-", lw=5, alpha=0.6, label="Erlang PDF") # Generate random numbers with this distribution r = erlang.rvs(args.stages, scale=lambdap, size=args.runs) ax.hist(r, bins=20, normed=True, histtype="stepfilled", alpha=0.2) meanexp = np.mean(r) varexp = np.var(r) print "Mediaexperimental: {0} MediaAnalitica: {1}".format(meanexp, mean) print "Sigma2_exp: {0} Sigma2_a: {1}".format(varexp, var) print "CoV_exp: {0} CoV_a: {1}".format(np.sqrt(varexp) / meanexp, np.sqrt(var) / mean) elif args.dist in "expon": lambdap = args.lambdap[0] mean, var, skew, kurt = expon.stats(scale=lambdap, moments="mvsk") print "E[X]={0}, var(X)={1}".format(mean, var) x = np.linspace(expon.ppf(0.00001, scale=lambdap), expon.ppf(0.99999, scale=lambdap), num=1000) rv = expon(scale=lambdap) ax.plot(x, rv.pdf(x), "r-", lw=5, alpha=0.6, label="Exponential PDF") # Generate random numbers with this distribution r = expon.rvs(scale=lambdap, size=args.runs) ax.hist(r, bins=20, normed=True, histtype="stepfilled", alpha=0.2) meanexp = np.mean(r) varexp = np.var(r) print "Mediaexperimental: {0} MediaAnalitica: {1}".format(meanexp, mean) print "Sigma2_exp: {0} Sigma2_a: {1}".format(varexp, var) print "CoV_exp: {0} CoV_a: {1}".format(np.sqrt(varexp) / meanexp, np.sqrt(var) / mean) elif args.dist in "hyperexp": print "HyperExponential RV" rv = hyperexp(0.1, args.lambdap[0], args.lambdap[1]) x = np.linspace(0.00000001, 10.99999, num=1000) ax.plot(x, rv.pdf(x), "r-", lw=5, alpha=0.6, label="HyperExp PDF") # ax.plot(x, rv.cdf(x), 'b-', lw=2, alpha=0.6, label='HyperExp CDF') r = rv.rvs(size=args.runs) ax.hist(r, normed=True, bins=100, range=(0, 11), histtype="stepfilled", alpha=0.2) meanexp = np.mean(r) varexp = np.var(r) print "Mediaexperimental: {0} MediaAnalitica: {1}".format(meanexp, mean) print "Sigma2_exp: {0} Sigma2_a: {1}".format(varexp, var) print "CoV_exp: {0} CoV_a: {1}".format(np.sqrt(varexp) / meanexp, np.sqrt(var) / mean) if args.graph: plt.show()