def cdfModelWeibull(a, b, y0): """ Weibull cumulative distribution function """ f = lambda x: y0 + (weibull_min.cdf(x, b, scale=a) - weibull_min.cdf( 0, b, scale=a)) * (1 - y0) return f
def truncatedweibull_pdf(data, c, scale): epsilon = 1e-200 term2 = (weibull_min.pdf(data, c, scale=scale, loc=0.0) / (weibull_min.cdf(1.0, c, scale=scale, loc=0.0) - weibull_min.cdf(0.0, c, scale=scale, loc=0.0))) * (data < 1.0) return term2 + epsilon
def truncatedweibull_pdf(data, prior, c, scale): epsilon = 1e-200 term1 = prior * (data == 1.0) term2 = (1 - prior) * (weibull_min.pdf(data, c, scale=scale, loc=0.0) / (weibull_min.cdf(1.0, c, scale=scale, loc=0.0) - weibull_min.cdf(0.0, c, scale=scale, loc=0.0))) * ( data < 1.0) return term1 + term2 + epsilon
def fit_weibull(df_speed, x, weibull_params=None): from scipy.stats import weibull_min if not weibull_params: k_shape, _, lamb_scale = weibull_params = weibull_min.fit(df_speed, loc=0) y_weibull = weibull_min.pdf(x, *weibull_params) density_expected_weibull = weibull_min.cdf( x[1:], *weibull_params) - weibull_min.cdf(x[:-1], *weibull_params) y_cdf_weibull = 1 - exp(-(x / lamb_scale)**k_shape) return weibull_params, y_weibull, density_expected_weibull, y_cdf_weibull
def survival_basefx(month=1, surv_Juv=0.866, shapeMF=3.3, scaleMF=10, shapeAdult=3.8, scaleAdult=8, dfMF, dfAdult, dfJuv): '''base survival function Parameters --------- month: int time in months of the simulation surv_Juv: float survival prob of juvenille life stage shapeMF: float shape parameter for weibull distribution of MF scaleMF: int scale parameter for weibull distribution of MF shapeAdult: float shape parameter for weibull distribution of Adults scaleAdult: int scale parameter for weibull distribution of MF Returns ------- dfMF dfAdult dfJuv ''' ##Juv is exponential 0.866 survjuv = np.random.random(len(dfJuv.age)) #array of random numbers killjuv = which(survjuv >= surv_Juv) #compare random numbers to survival dfJuv.drop(dfJuv[[killjuv]]) #remove entire row from df if dies ##MF is weibull cdf survmf = np.random.random(len(dfMF.age)) #array of random numbers surv_MF = apply(dfMF.age, 2, function(x) weibull_min.cdf(dfMF.age,shapeMF, loc=0,scale=scaleMF)) #random number from weibull killmf = which(survmf <= surv_MF) #compare random numbers dfMF.drop(dfMF[[killmf]]) #remove rows of MF #adult worms are only evaluated per year if month%12 == 0: #Adult is weibull cdf survadult = np.random.random(len(dfAdult.age)) #array of random numbers surv_Adult = apply(dfAdult.age, 2, function(x) weibull_min.cdf(dfAdult.age, shapeAdult,loc=0,scale=scaleAdult)) #weibull killadult = which(survadult <= surv_Adult) #compare dfAdult.drop(dfAdult[[killadult]]) #remove row return dfMF, dfJuv, dfAdult
def simHawkesOneDay( mu: float, alpha: float, beta: float, R0: np.ndarray, nrTrainingDays: int, day: int, cases: np.ndarray, config: EMConfig, threshold: int = 1e-5, ) -> np.ndarray: assert (cases.shape[0] >= nrTrainingDays ), "The number of cases does not match the number of training days" timestamps = nrTrainingDays + day - np.array(range(nrTrainingDays + day)) if config.incubationDistribution == "weibull": intensity = weibull_min.cdf(timestamps + 0.5, c=2.453, scale=6.258) - weibull_min.cdf( timestamps - 0.5, c=2.453, scale=6.258) intensity[len(intensity) - 1] += weibull_min.cdf(0.5, c=2.453, scale=6.258) elif config.incubationDistribution == "gamma": intensity = gamma.cdf(timestamps + 0.5, a=5.807, scale=0.948) - gamma.cdf( timestamps - 0.5, a=5.807, scale=0.948) intensity[len(intensity) - 1] += gamma.cdf(0.5, a=5.807, scale=0.948) elif config.incubationDistribution == "lognormal": sigma = 0.5 mu = 1.63 intensity = lognorm.cdf( timestamps + 0.5, s=sigma, scale=np.exp(mu)) - lognorm.cdf( timestamps - 0.5, s=sigma, scale=np.exp(mu)) intensity[len(intensity) - 1] += lognorm.cdf(0.5, scale=np.exp(mu), s=sigma) elif config.incubationDistribution == "normal": intensity = norm.cdf(timestamps + 0.5, scale=alpha, loc=beta) - norm.cdf( timestamps - 0.5, scale=alpha, loc=beta) intensity[len(intensity) - 1] += norm.cdf(0.5, scale=alpha, loc=beta) else: raise NotImplementedError intensity = intensity[intensity > threshold].reshape(-1, 1) kernelRange = list( range(nrTrainingDays + day - intensity.shape[0], nrTrainingDays + day)) intensityDay = intensity * np.array( R0[kernelRange].T * cases[kernelRange]).reshape(-1, 1) intensityDay = np.round(np.sum(intensityDay) + mu) # TODO: why here poisson distribution instead of just taking expectation? misschien voor confidence interval nrTriggeredCases = np.random.poisson(intensityDay) nrTriggeredCases = min(nrTriggeredCases, swissPopulation) return nrTriggeredCases
def precomputeKernelPDF(alpha: float, beta: float, nrTrainingDays: int, config: EMConfig) -> np.ndarray: kernelPDF = np.zeros((nrTrainingDays, nrTrainingDays)) if config.incubationDistribution == "weibull": for i in range(nrTrainingDays): for j in range(i): if i - j == 1: kernelPDF[i, j] = weibull_min.cdf( i - j + 0.5, c=alpha, scale=beta) - weibull_min.cdf( i - j - 1, c=alpha, scale=beta) else: kernelPDF[i, j] = weibull_min.cdf( i - j + 0.5, c=alpha, scale=beta) - weibull_min.cdf( i - j - 0.5, c=alpha, scale=beta) elif config.incubationDistribution == "gamma": for i in range(nrTrainingDays): for j in range(i): if i - j == 1: kernelPDF[i, j] = gamma.cdf( i - j + 0.5, a=alpha, scale=beta) - gamma.cdf( i - j - 1, a=alpha, scale=beta) else: kernelPDF[i, j] = gamma.cdf( i - j + 0.5, a=alpha, scale=beta) - gamma.cdf( i - j - 0.5, a=alpha, scale=beta) elif config.incubationDistribution == "lognormal": for i in range(nrTrainingDays): for j in range(i): if i - j == 1: kernelPDF[i, j] = lognorm.cdf( i - j + 0.5, s=alpha, scale=beta) - lognorm.cdf( i - j - 1, s=alpha, scale=beta) else: kernelPDF[i, j] = lognorm.cdf( i - j + 0.5, s=alpha, scale=beta) - lognorm.cdf( i - j - 0.5, s=alpha, scale=beta) elif config.incubationDistribution == "normal": for i in range(nrTrainingDays): for j in range(i): if i - j == 1: kernelPDF[i, j] = norm.cdf( i - j + 0.5, scale=alpha, loc=beta) - norm.cdf( i - j - 1, scale=alpha, loc=beta) else: kernelPDF[i, j] = norm.cdf( i - j + 0.5, scale=alpha, loc=beta) - norm.cdf( i - j - 0.5, scale=alpha, loc=beta) else: raise NotImplementedError return kernelPDF
def fitweibull(x): x1, x2, x3 = x[0], x[1], x[2] print(x) wei_cdf = weibull_min.cdf(surv_cdf.index, c=x1, loc=x2, scale=x3) sum_abs_err = sum( abs(surv_cdf[surv_cdf.columns].values.flatten() - wei_cdf)) return (sum_abs_err)
def weib1(self): # before vintage year = 2005 x = range(0, self.lt + UltimYr0 + 1) shape = 2.1 loc = 1.0 w = weibull_min.cdf(x, shape, loc, scale=self.lt + 2) # print "w2",w return (w)
def fit_weibull(df_speed, x, weibull_params=None, floc=True): from scipy.stats import weibull_min if not weibull_params: if floc: # sometimes need to set as loc=0 k_shape, _, lamb_scale = weibull_params = weibull_min.fit(df_speed, floc=0) else: k_shape, _, lamb_scale = weibull_params = weibull_min.fit(df_speed) else: k_shape, _, lamb_scale = weibull_params y_weibull = weibull_min.pdf(x, *weibull_params) density_expected_weibull = weibull_min.cdf( x[1:], *weibull_params) - weibull_min.cdf(x[:-1], *weibull_params) y_cdf_weibull = weibull_min.cdf(x, *weibull_params) return weibull_params, y_weibull, density_expected_weibull, y_cdf_weibull
def weib2(self): x = range(0, self.lt + UltimYr0 + 1) shape = 2.1 loc = 1.0 w = weibull_min.cdf(x, shape, loc, scale=self.lt + 2) # print "w2",w return (w)
def loglikelihood(I,a,b,T): # calculates the inverse of the loglikelihood function for observing data I # when the distribution is truncated at time T, given shape a and scale b event1=I[:,0]; # first events event2=I[:,1]; # second events L = weibull_min.pdf(event2-event1,b,0,a)/weibull_min.cdf(T-event1,b,0,scale = a); logL=(sum(np.log(L))**(-1)); # calculate inverse loglikelihood return(logL);
def predict(group, params): current_size = group.shape[0] user = group.iloc[0].original_user_id shape = params.loc[user].shape scale = params.loc[user].scale predicted = group.shape[0] / weibull_min.cdf( group['time'].values.max(), shape, loc=0, scale=scale)[0] return pd.Series({ 'predicted': predicted, })
def weibull_plot(): x = np.linspace(0, 20, 100) a = 1.5 c = 5 x1 = weibull_min.cdf(x, 0.5, loc=0, scale=c) x2 = weibull_min.cdf(x, 1, loc=0, scale=c) x3 = weibull_min.cdf(x, 1.5, loc=0, scale=c) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x, x1, 'b:', label=r'$\alpha = 0.5$') ax.plot(x, x2, 'k', label=r'$\alpha = 1.0$') ax.plot(x, x3, 'r--', label=r'$\alpha = 1.5$') plt.legend(loc='lower right') plt.xlabel(r'$t$', fontsize=12) plt.ylabel(r'$F(t)$', fontsize=11) fig_name = mydir + 'weibull.png' fig.savefig(fig_name, bbox_inches="tight", pad_inches=0.4, dpi=600) plt.close()
def weibull_survival_plot(): x = np.linspace(0, 20, 100) a = 1.5 c = 5 x1 = weibull_min.cdf(x, 0.5, loc=0, scale=c) x2 = weibull_min.cdf(x, 1, loc=0, scale=c) x3 = weibull_min.cdf(x, 1.5, loc=0, scale=c) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x, np.log(1 - x1), 'b:', label=r'$\alpha = 0.5$') ax.plot(x, np.log(1 - x2), 'k', label=r'$\alpha = 1.0$') ax.plot(x, np.log(1 - x3), 'r--', label=r'$\alpha = 1.5$') plt.legend(loc='upper right') plt.xlabel('Time (days)', fontsize=12) plt.ylabel('Proportion surviving, ' + r'$ln\,S(t)$', fontsize=11) fig_name = mydir + 'weibull_survival.png' fig.savefig(fig_name, bbox_inches="tight", pad_inches=0.4, dpi=600) plt.close()
def analytical(self): # analytical stress/strain relationship for an infinite number of filaments # as the actual strain multiplyied by the survival probability and integrated # over random slack y_analytical = [] thetas = linspace(self.theta_loc, self.theta_scale, 1000) CDF = uniform.cdf(thetas, loc = self.theta_loc, scale = self.theta_scale) for eps in self.e: integ_term = (eps - thetas)*Heaviside(eps - thetas)*\ (1-weibull_min.cdf((eps - thetas), self.xi_shape, scale = self.xi_scale)) time.clock() y_analytical.append(trapz(integ_term, CDF)) print time.clock() self.peaks[2] = max(y_analytical) return self.e, array(y_analytical)*self.E
def fit_distribution(data, fit_type, x_min, x_max, n_points=1000): # Initialization of the variables param, x, cdf, pdf = [-1, -1, -1, -1] if fit_type == 'exponweib': x = np.linspace(x_min, x_max, n_points) # Fit data to the theoretical distribution param = exponweib.fit(data, 1, 1, scale=02, loc=0) # param = exponweib.fit(data, fa=1, floc=0) # param = exponweib.fit(data) cdf = exponweib.cdf(x, param[0], param[1], param[2], param[3]) pdf = exponweib.pdf(x, param[0], param[1], param[2], param[3]) elif fit_type == 'lognorm': x = np.linspace(x_min, x_max, n_points) # Fit data to the theoretical distribution param = lognorm.fit(data, loc=0) cdf = lognorm.cdf(x, param[0], param[1], param[2]) pdf = lognorm.pdf(x, param[0], param[1], param[2]) elif fit_type == 'norm': x = np.linspace(x_min, x_max, n_points) # Fit data to the theoretical distribution param = norm.fit(data, loc=0) cdf = norm.cdf(x, param[0], param[1]) pdf = norm.pdf(x, param[0], param[1]) elif fit_type == 'weibull_min': x = np.linspace(x_min, x_max, n_points) # Fit data to the theoretical distribution param = weibull_min.fit(data, floc=0) cdf = weibull_min.cdf(x, param[0], param[1], param[2]) pdf = weibull_min.pdf(x, param[0], param[1], param[2]) return param, x, cdf, pdf
def openmax(xdata,returnvalue): from scipy.stats import weibull_min hyparam = returnvalue[0]; new_model = returnvalue[1] class_num = returnvalue[2]; mean_vector = returnvalue[3] pred = new_model.predict(xdata) new_logits=[] for idx in range(len(pred)): new_logit=[];unknown=0 logit=pred[idx] for ind in range(class_num): distance_=distance(logit,mean_vector[ind]) weight=weibull_min.cdf(distance_,hyparam[ind][0], hyparam[ind][1],hyparam[ind][2]) new_logit.append(logit[ind]*(1-weight)) unknown+=logit[ind]*weight new_logit.append(unknown) new_logits.append(new_logit) output = [] for i in new_logits: output.append(np.argmax(i)) return output
def calculate_reliability(distribution, linspace): if distribution[0] == 'EXP': lambda_ = distribution[1] scale_ = 1 / lambda_ return 1 - expon.cdf(linspace, scale=scale_) if distribution[0] == 'WEIBULL': scale = distribution[1] shape = distribution[2] return 1 - weibull_min.cdf(linspace, shape, loc=0, scale=scale) if distribution[0] == 'NORMAL': mu = distribution[1] sigma = distribution[2] return 1 - norm.cdf(linspace, loc=mu, scale=sigma) if distribution[0] == 'LOGNORM': mu = distribution[1] sigma = distribution[2] scale = math.exp(mu) return 1 - lognorm.cdf(linspace, sigma, loc=0, scale=scale)
def compute_probability(self, proxel, timestamp): #print('Computing for ' + str(self.state)) # We look at if the state is False, and then we use reliability since we want the transition from # state True to state False. if proxel.state is False: distribution = self.reliability_distribution else: distribution = self.maintainability_distribution #print('Before Probability: ' + str(self.probability)) if distribution[0] == 'EXP': lambda_ = distribution[1] scale_ = 1 / lambda_ proxel.probability *= (expon.pdf(timestamp, scale=scale_)/(1 - expon.cdf(timestamp, scale=scale_))) \ * self.delta_time if distribution[0] == 'WEIBULL': scale = distribution[1] shape = distribution[2] proxel.probability *= ( weibull_min.pdf(timestamp, shape, loc=0, scale=scale) / (1 - weibull_min.cdf(timestamp, shape, loc=0, scale=scale)) ) * self.delta_time if distribution[0] == 'NORMAL': mu = distribution[1] sigma = distribution[2] proxel.probability *= ( norm.pdf(timestamp, loc=mu, scale=sigma) / (1 - norm.cdf(timestamp, loc=mu, scale=sigma))) * self.delta_time if distribution[0] == 'LOGNORM': mu = distribution[1] sigma = distribution[2] scale = math.exp(mu) proxel.probability *= ( lognorm.pdf(timestamp, sigma, loc=0, scale=scale) / (1 - lognorm.cdf(timestamp, sigma, loc=0, scale=scale)) ) * self.delta_time
def weib(self): x = range(0, self.lt + UltimYr + 1) w = weibull_min.cdf(x, 3, loc=2.5, scale=self.lt) * self.OrigNum #print w return (w)
def _get_cs(self, sigma_c): Pf = weibull_min.cdf(sigma_c, self.m, scale=self.scale_sigma_c) if Pf == 0: Pf = 1e-15 return self.cs_final * 1.0 / Pf
def weibull(x, c, loc, scale, amp): from scipy.stats import weibull_min return amp * weibull_min.cdf(x, c, loc, scale)
def _get_cs(self, sigma_c): Pf = weibull_min.cdf( sigma_c, self.m, scale = self.scale_sigma_c ) if Pf == 0: Pf = 1e-15 return self.cs_final * 1.0 / Pf
weibull_min.pdf(x, c), 'r-', lw=5, alpha=0.6, label='weibull_min pdf') # Alternatively, the distribution object can be called (as a function) # to fix the shape, location and scale parameters. This returns a "frozen" # RV object holding the given parameters fixed. # Freeze the distribution and display the frozen ``pdf``: rv = weibull_min(c) ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') # Check accuracy of ``cdf`` and ``ppf``: vals = weibull_min.ppf([0.001, 0.5, 0.999], c) np.allclose([0.001, 0.5, 0.999], weibull_min.cdf(vals, c)) # True # Generate random numbers: r = weibull_min.rvs(c, size=1000) # And compare the histogram: ax.hist(r, density=True, histtype='stepfilled', alpha=0.2) ax.legend(loc='best', frameon=False) plt.show()
def cdf(self, x): """ @brief Return the value of a cummulative probability function for x. """ return weibull_min.cdf(x, self.shape, loc=self.loc, scale=self.scale)
def flujos(inicio, final, tau, param_dem): numero_de_anios = final - inicio + 1 stocks_viviendas = viviendas(inicio, final) # Extraemos los elementos de la variable de entrada param_dem c = param_dem[0] traslacion = param_dem[1] escala = param_dem[2] # x1 contiene tantos elementos como años de estudio hemos elegido. x1 = np.linspace(0, numero_de_anios - 1, numero_de_anios) # x2 tiene 20 años extra, para realizar las traslaciones. x2 = np.linspace(0, numero_de_anios + tau - 1, numero_de_anios + tau) # x3 es para dibujar las gráficas finales. x3 = np.linspace(inicio, final, numero_de_anios) # Inicializamos las matrices de demolicion y renovación matriz_dem = np.zeros((numero_de_anios, numero_de_anios)) matriz_ren = np.zeros((numero_de_anios, numero_de_anios)) # Inicializamos los arrays de salida del algoritmo nuevas = np.zeros(numero_de_anios) demoliciones = np.zeros(numero_de_anios) renovaciones = np.zeros(numero_de_anios) dist_vida = np.ones(numero_de_anios + tau) - weibull_min.cdf( x2, c, traslacion, escala) dist_vida_desplazada = dist_vida[tau:] dist_demoliciones = np.array(weibull_min.pdf(x1, c, traslacion, escala)) dist_renovaciones = renovaciones_ciclos(numero_de_anios, tau, dist_vida_desplazada, x1) # c y escala los tomamos igual que en dist_demoliciones pero no trasladamos la función de probabilidad porque esta # función de probabilidad se centra en viviendas antiguas dist_demoliciones_inic = np.array(weibull_min.pdf(x1, c, 0, escala)) # Tomamos la función de probabilidad uniforme porque suponemos que cada año se renueva aproximadamente el mismo # número de las viviendas iniciales. Damos el valor 1/tau porque suponemos que una vivienda se renueva de media una # vez cada tau años, por lo que la probabilidad de renovar en un año concreto es 1/tau dist_renovaciones_inic = np.full((1, numero_de_anios), 1 / tau) # Condiciones iniciales matriz_dem[:, 0] = dist_demoliciones_inic * stocks_viviendas[0] matriz_ren[:, 0] = dist_demoliciones_inic * stocks_viviendas[0] nuevas[0] = matriz_dem[0, 0] demoliciones[0] = matriz_dem[0, 0] demoliciones[1] = matriz_dem[0, 1] renovaciones[0] = matriz_ren[0, 0] for i in range(1, numero_de_anios): aumento_stock = stocks_viviendas[i] - stocks_viviendas[i - 1] nuevas[i] = aumento_stock + demoliciones[i] renovaciones[i] = sum(matriz_ren[i, :i]) if i + 1 < numero_de_anios: matriz_dem[i + 1:, i] = dist_demoliciones[:numero_de_anios - 1 - i] * nuevas[i] demoliciones[i + 1] = sum(matriz_dem[i + 1, :i + 1]) matriz_ren[i + 1:, i] = dist_renovaciones[:numero_de_anios - 1 - i] * nuevas[i] plt.plot(x3, nuevas, 'r', label="Nº nuevas") plt.plot(x3, demoliciones, 'b', label="Nº derruidas") plt.plot(x3, renovaciones, 'g', label="Nº renovadas") plt.legend(loc="lower right", title="Viviendas", frameon=False) plt.show() return nuevas, demoliciones, renovaciones
def cdf(self, x): ''' @brief Return the value of a cummulative probability function for x. ''' return weibull_min.cdf(x, self.shape, loc=self.loc, scale=self.scale)
def plot_weibull(name, metric, distribution, times, theoretical_distribution=None): scale = distribution[1] shape = distribution[2] theoretical_scale = None theoretical_shape = None # Checks whether there is a theoretical distribution to compare it to if theoretical_distribution is not None: theoretical_scale = theoretical_distribution[1] theoretical_shape = theoretical_distribution[2] fig, subplots = setup_fig_subplots(metric) fig.suptitle(name) linspace = np.linspace(weibull_min.ppf(0.001, shape, loc=0, scale=scale), weibull_min.ppf(0.999, shape, loc=0, scale=scale), 1000) # First plot PDF if theoretical_distribution is not None: subplots[0].plot(linspace, weibull_min.pdf(linspace, shape, loc=0, scale=scale), 'r-', lw=1, alpha=0.6, label='Reconstructed') subplots[0].plot(linspace, weibull_min.pdf(linspace, theoretical_shape, loc=0, scale=theoretical_scale), 'b-', lw=1, alpha=0.6, label='Theoretical') subplots[0].legend() else: subplots[0].plot(linspace, weibull_min.pdf(linspace, shape, loc=0, scale=scale), 'r-', lw=1, alpha=0.6) if times != EMPTY_LIST: if metric == 'Reliability': subplots[0].hist(times, bins=20, normed=True, histtype='stepfilled', alpha=0.2, label='Time to failures') if metric == 'Maintainability': subplots[0].hist(times, bins=20, normed=True, histtype='stepfilled', alpha=0.2, label='Time to repairs') subplots[0].legend() subplots[0].set_title('Weibull PDF') # Second plot CDF and/or Maintainability if theoretical_distribution is not None: subplots[1].plot(linspace, weibull_min.cdf(linspace, shape, loc=0, scale=scale), 'r-', lw=1, alpha=0.6, label='Reconstructed') subplots[1].plot(linspace, weibull_min.cdf(linspace, theoretical_shape, loc=0, scale=theoretical_scale), 'b-', lw=1, alpha=0.6, label='Theoretical') subplots[1].legend() else: subplots[1].plot(linspace, weibull_min.cdf(linspace, shape, loc=0, scale=scale), 'r-', lw=1, alpha=0.6) if metric == 'Reliability': subplots[1].set_title('Weibull CDF') if metric == 'Maintainability': subplots[1].set_title('Weibull CDF (Maintainability)') # Third plot Reliability if metric == 'Reliability': if theoretical_distribution is not None: subplots[2].plot( linspace, 1 - weibull_min.cdf(linspace, shape, loc=0, scale=scale), 'r-', lw=1, alpha=0.6, label='Reconstructed') subplots[2].plot(linspace, 1 - weibull_min.cdf(linspace, theoretical_shape, loc=0, scale=theoretical_scale), 'b-', lw=1, alpha=0.6, label='Theoretical') subplots[2].legend() else: subplots[2].plot( linspace, 1 - weibull_min.cdf(linspace, shape, loc=0, scale=scale), 'r-', lw=1, alpha=0.6) subplots[2].set_title(metric) #plt.show() plt.show(block=False)
def fitweibb(x,c,scale): #x=wind return 1/(1-weibull_min.cdf(x,c,0,scale)) #returns the return period
def my_weibull(x, c=0.6, a=0.8, b=3): return np.real(1 - weibull_min.cdf(x**a / b, c))
def survival_mdafx(month=1, macrocide=0.05, microcide=0.90, juvcide=0.45, clear_count=1, surv_Juv=0.866, shapeMF=3.3, scaleMF=12, shapeAdult=3.8, scaleAdult=8, dfMF, dfAdult, dfJuv): '''base survival function Parameters --------- month: int time in months of the simulation macrocide: float percent of adults killed by drug microcide: float percent of MF killed by drug juvcide: float percent of juveniile killed by drug; could be 0 or avg micro/macro clear_count: int time since MDA, drug was administered surv_Juv: float survival prob of juvenille life stage shapeMF: float shape parameter for weibull distribution of MF scaleMF: int scale parameter for weibull distribution of MF shapeAdult: float shape parameter for weibull distribution of Adults scaleAdult: int scale parameter for weibull distribution of MF Returns ------- dfMF dfAdult dfJuv ''' if clear_count == 1: ##MF mfkill = np.random.random(1,len(dfMF.age)) #array of random numbers if mfkill <= microcide: #what index in the array is less than microcide, 0.90 dfMF.drop(dfMF[[mfkill]]) #remove rows ##Juv juvkill = np.random.random(1,len(dfJuv.age)) #array of random numbers if juvkill <= juvcide: #what index in the array is less than juvcide, 0.45 dfJuv.drop(dfJuv[[juvkill]]) #remove rows ##Adult adultkill = np.random.random(1,len(dfAdult.age)) #array of random numbers if adultkill <= macrocide: #what index in the array is less than macrocide, 0.05 dfAdult.drop(dfAdult[[adultkill]]) #remove rows else: ##Juv is exponential 0.866 survjuv = np.random.random(len(dfJuv.age)) #array of random numbers killjuv = which(survjuv >= surv_Juv) #compare random numbers to survival dfJuv.drop(dfJuv[[killjuv]]) #remove entire row from df if dies ##MF is weibull cdf survmf = np.random.random(len(dfMF.age)) #array of random numbers surv_MF = apply(dfMF.age, 2, function(x) weibull_min.cdf(dfMF.age,shapeMF, loc=0,scale=scaleMF)) #random number from weibull killmf = which(survmf <= surv_MF) #compare random numbers dfMF.drop(dfMF[[killmf]]) #remove rows of MF #adult worms are only evaluated per year if month%12 == 0: #Adult is weibull cdf survadult = np.random.random(len(dfAdult.age)) #array of random numbers surv_Adult = apply(dfAdult.age, 2, function(x) weibull_min.cdf(dfAdult.age, shapeAdult,loc=0,scale=scaleAdult)) #weibull killadult = which(survadult <= surv_Adult) #compare dfAdult.drop(dfAdult[[killadult]]) #remove row return dfMF, dfJuv, dfAdult
def values( self, eps, bundle_length ): out = self.weibull_params scale0 = out[0][0] shape = out[0][1] scale = ( scale0 * ( self.ref_length / bundle_length ) ** ( 1. / shape ) /self.Ef) return eps * self.Ef * ( 1. - weibull_min.cdf( eps, shape, scale = scale ) )