def Burr(c, k, tag=None): """ A Burr random variate Parameters ---------- c : scalar The first shape parameter k : scalar The second shape parameter """ assert c>0 and k>0, 'Burr "c" and "k" parameters must be greater than zero' return uv(ss.burr(c, k), tag=tag)
def Burr(c, k, tag=None): """ A Burr random variate Parameters ---------- c : scalar The first shape parameter k : scalar The second shape parameter """ assert c > 0 and k > 0, 'Burr "c" and "k" parameters must be greater than zero' return uv(ss.burr(c, k), tag=tag)
def gen_f(data_E, update=False): # polynomial weight w0 = np.load('w0.npy') # f_e weight (familarity_exploration) w = np.load('w.npy') # f weight (familarity_exploration) w1 = np.load('w1.npy') # risk and delay k = np.load('k.npy') w2 = np.load('w2.npy') #time w_t = np.load('w_t.npy') # burr m1 = np.load('m1.npy') m2 = np.load('m2.npy') m3 = np.load('m3.npy') m4 = np.load('m4.npy') w_c = np.load('w_c.npy') w_cc = np.load('w_cc.npy') w_e = np.load('w_e.npy') w_far = np.load('w_far.npy') if update == 1: np.save('w0_old.npy', w0) np.save('w_old.npy', w) np.save('w1_old.npy', w1) np.save('w2_old.npy', w2) np.save('k_old.npy', k) np.save('w_t_old.npy', w_t) m1 = np.save('m1.npy', m1) m2 = np.save('m2.npy', m2) m3 = np.save('m3.npy', m3) m4 = np.save('m4.npy', m4) model_update(data_E) stress = np.int8(data_E.iloc[:, 31]) tired = np.int8(data_E.iloc[:, 32]) st = np.int8(data_E.iloc[:, 31:33]) choice3 = np.int8(data_E.iloc[:, 37]) choice4 = np.int8(data_E.iloc[:, 38]) tend = np.int8(choice3) + np.int8(choice4) x1 = stress x2 = tired f1 = w0[0] + w0[1] * np.int8(x1) + w0[2] * np.int8(x2) + w0[3] * np.int8( x1)**2 + w0[4] * np.int8(x1) * np.int8(x2) + w0[5] * np.int8(x2)**2 f = data_E.iloc[:, 21:31] f_e = np.zeros((data_E.shape[0], 10)) rv1 = burr(m1[0], m1[1], m1[2], m1[3]) rv2 = burr(m2[0], m2[1], m2[2], m2[3]) rv3 = burr(m3[0], m3[1], m3[2], m3[3]) rv4 = burr(m4[0], m4[1], m4[2], m4[3]) x = np.arange(0, 4000) f_e[np.where(f == '0')] = 0 f_e[np.where(f == '1')] = x.dot(rv1.pdf(x)) f_e[np.where(f == '2')] = x.dot(rv2.pdf(x)) f_e[np.where(f == '3')] = x.dot(rv3.pdf(x)) f_e[np.where(f == '4')] = x.dot(rv4.pdf(x)) f2 = np.sum(np.int8(f_e) * w, axis=1) f3 = np.sum(np.int8(f) * w1, axis=1) risk = data_E.iloc[:, 34] delay = data_E.iloc[:, 39] m = [risk == '1'] n = [delay == '1'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp1 = [i for i in a[0] if i in b[0]] m = [risk == '2'] n = [delay == '1'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp2 = [i for i in a[0] if i in b[0]] m = [risk == '1'] n = [delay == '2'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp3 = [i for i in a[0] if i in b[0]] m = [risk == '2'] n = [delay == '2'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp4 = [i for i in a[0] if i in b[0]] tend[tmp4] f41 = np.zeros(data_E.shape[0]) f41[tmp1] = k[0, 0] f41[tmp2] = k[1, 0] f41[tmp3] = k[2, 0] f41[tmp4] = k[3, 0] f42 = np.zeros(data_E.shape[0]) f42[tmp1] = k[0, 1] f42[tmp2] = k[1, 1] f42[tmp3] = k[2, 1] f42[tmp4] = k[3, 1] f43 = np.zeros(data_E.shape[0]) f43[tmp1] = k[0, 2] f43[tmp2] = k[1, 2] f43[tmp3] = k[2, 2] f43[tmp4] = k[3, 2] f4 = np.sum(np.vstack((f42, f41, f43)).T * w2, axis=1) time = np.float32(data_E.iloc[:, 1:11]) f5 = np.sum(time * w_t, axis=1) choice = np.int8(data_E.iloc[:, 11:21]) f6 = choice.dot(w_c) f7 = np.vstack((choice3, choice4)).T.dot(w_cc) f8 = time.dot(w_e) farmiliar = np.float32(data_E.iloc[:, 21:31]) f9 = farmiliar.dot(w_far) return f1, f2, f3, f4, f5, f6, f7, f8, f9
def model_update(data_E): # polynomial weight w0 = np.load('w0.npy') # f_e weight (familarity_exploration) w = np.load('w.npy') # f weight (familarity_exploration) w1 = np.load('w1.npy') # risk and delay k = np.load('k.npy') w2 = np.load('w2.npy') #time w_t = np.load('w_t.npy') w_c = np.load('w_c.npy') w_cc = np.load('w_cc.npy') w_e = np.load('w_e.npy') w_far = np.load('w_far.npy') # burr m1 = np.load('m1.npy') m2 = np.load('m2.npy') m3 = np.load('m3.npy') m4 = np.load('m4.npy') stress = np.int8(data_E.iloc[:, 31]) tired = np.int8(data_E.iloc[:, 32]) st = np.int8(data_E.iloc[:, 31:33]) choice3 = data_E.iloc[:, 37] choice4 = data_E.iloc[:, 38] tend = np.int8(choice3) + np.int8(choice4) poly_reg = PolynomialFeatures(degree=2) X_poly = poly_reg.fit_transform(st) pol_reg = LinearRegression() pol_reg.fit(X_poly, tend) w0_1 = pol_reg.coef_ np.save('w0.npy', w0 + (w0_1 - w0) / 100) data_p = np.zeros((data_E.shape[0] * 10, 13)) data_clean = np.array(data_E) for i in range(0, data_E.shape[0]): data_p[i * 10:i * 10 + 10, 0] = data_clean[i, 0] #ID data_p[i * 10:i * 10 + 10, 1] = data_clean[i, 1:11] #time data_p[i * 10:i * 10 + 10, 2] = data_clean[i, 11:21] #choice data_p[i * 10:i * 10 + 10, 3] = data_clean[i, 21:31] #familarity data_p[i * 10:i * 10 + 10, 4] = data_clean[i, 31] #stress data_p[i * 10:i * 10 + 10, 5] = data_clean[i, 32] #tired data_p[i * 10:i * 10 + 10, 6] = data_clean[i, 33] #pay off data_p[i * 10:i * 10 + 10, 7] = data_clean[i, 34] #risk data_p[i * 10:i * 10 + 10, 8] = data_clean[i, 35] #bounce 4 data_p[i * 10:i * 10 + 10, 9] = data_clean[i, 36] #bounce 3 data_p[i * 10:i * 10 + 10, 10] = data_clean[i, 37] #choice 4 include 3? data_p[i * 10:i * 10 + 10, 11] = data_clean[i, 38] #choice 3 data_p[i * 10:i * 10 + 10, 12] = data_clean[i, 39] #delay if data_E.shape[0] > 50: for i in range(1, 5): y = data_p[np.where(data_p[:, 3] == i), 1] m = best_fit_distribution(y, bins=200, ax=None) np.save('m' + str(i) + '.npy', np.array(m[1])) m1 = np.load('m1.npy') m2 = np.load('m2.npy') m3 = np.load('m3.npy') m4 = np.load('m4.npy') f = data_E.iloc[:, 21:31] f_e = np.zeros((data_E.shape[0], 10)) rv1 = burr(m1[0], m1[1], m1[2], m1[3]) rv2 = burr(m2[0], m2[1], m2[2], m2[3]) rv3 = burr(m3[0], m3[1], m3[2], m3[3]) rv4 = burr(m4[0], m4[1], m4[2], m4[3]) x = np.arange(0, 4000) f_e[np.where(f == '0')] = 0 f_e[np.where(f == '1')] = x.dot(rv1.pdf(x)) f_e[np.where(f == '2')] = x.dot(rv2.pdf(x)) f_e[np.where(f == '3')] = x.dot(rv3.pdf(x)) f_e[np.where(f == '4')] = x.dot(rv4.pdf(x)) lin_reg = LinearRegression() lin_reg.fit(np.int8(f_e), tend) w_1 = lin_reg.coef_ np.save('w.npy', w + (w_1 - w) / 100) lin_reg = LinearRegression() lin_reg.fit(np.int8(f), tend) w1_1 = lin_reg.coef_ np.save('w1.npy', w1 + (w1_1 - w1) / 100) risk = data_E.iloc[:, 34] delay = data_E.iloc[:, 39] m = [risk == '1'] n = [delay == '1'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp1 = [i for i in a[0] if i in b[0]] if np.array(tmp1).any(): #print(np.mean(tend[tmp1])) k11 = np.mean(tend[tmp1]) #print(np.median(tend[tmp1])) k12 = np.median(tend[tmp1]) counts = np.bincount(tend[tmp1]) #print(np.argmax(counts)) k13 = np.argmax(counts) else: k11 = k[0, 0] k12 = k[0, 1] k13 = k[0, 2] m = [risk == '2'] n = [delay == '1'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp2 = [i for i in a[0] if i in b[0]] if np.array(tmp2).any(): #tend[tmp2] #plt.hist(tend[tmp2]) #print(np.mean(tend[tmp2])) k21 = np.mean(tend[tmp2]) #print(np.median(tend[tmp2])) k22 = np.median(tend[tmp2]) counts = np.bincount(tend[tmp2]) #print(np.argmax(counts)) k23 = np.argmax(counts) else: k21 = k[1, 0] k22 = k[1, 1] k23 = k[1, 2] m = [risk == '1'] n = [delay == '2'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp3 = [i for i in a[0] if i in b[0]] if np.array(tmp3).any(): #tend[tmp3] #plt.hist(tend[tmp2]) #print(np.mean(tend[tmp2])) k31 = np.mean(tend[tmp3]) #print(np.median(tend[tmp2])) k32 = np.median(tend[tmp3]) counts = np.bincount(tend[tmp3]) #print(np.argmax(counts)) k33 = np.argmax(counts) else: k31 = k[2, 0] k32 = k[2, 1] k33 = k[2, 2] m = [risk == '2'] n = [delay == '2'] a = np.array(np.where(np.array(m[0]) == 1)) b = np.array(np.where(np.array(n[0]) == 1)) tmp4 = [i for i in a[0] if i in b[0]] if np.array(tmp4).any(): #tend[tmp4] #plt.hist(tend[tmp2]) #print(np.mean(tend[tmp2])) k41 = np.mean(tend[tmp4]) #print(np.median(tend[tmp2])) k42 = np.median(tend[tmp4]) counts = np.bincount(tend[tmp4]) #print(np.argmax(counts)) k43 = np.argmax(counts) else: k41 = k[3, 0] k42 = k[3, 1] k43 = k[3, 2] k_1 = np.zeros((4, 3)) k_1[0, 0] = k11 k_1[1, 0] = k21 k_1[2, 0] = k31 k_1[3, 0] = k41 k_1[0, 1] = k12 k_1[1, 1] = k22 k_1[2, 1] = k32 k_1[3, 1] = k42 k_1[0, 2] = k13 k_1[1, 2] = k23 k_1[2, 2] = k33 k_1[3, 2] = k43 np.save('k.npy', k + (k_1 - k) / 100) f41 = np.zeros(data_E.shape[0]) f41[tmp1] = k[0, 0] f41[tmp2] = k[1, 0] f41[tmp3] = k[2, 0] f41[tmp4] = k[3, 0] f42 = np.zeros(data_E.shape[0]) f42[tmp1] = k[0, 1] f42[tmp2] = k[1, 1] f42[tmp3] = k[2, 1] f42[tmp4] = k[3, 1] f43 = np.zeros(data_E.shape[0]) f43[tmp1] = k[0, 2] f43[tmp2] = k[1, 2] f43[tmp3] = k[2, 2] f43[tmp4] = k[3, 2] lin_reg = LinearRegression() lin_reg.fit(np.vstack((f42, f41, f43)).T, tend) w2_1 = lin_reg.coef_ np.save('w2.npy', w2 + (w2_1 - w2) / 100) lin_reg = LinearRegression() time = np.float32(data_E.iloc[:, 1:11]) lin_reg.fit(time, tend) w_t_1 = lin_reg.coef_ np.save('w_t.npy', w_t + (w_t_1 - w_t) / 100) choice = np.int8(data_E.iloc[:, 11:21]) payoff = np.float32(data_E.iloc[:, 33]) lin_reg = LinearRegression() lin_reg.fit(choice, payoff) w_c_1 = lin_reg.coef_ np.save('w_c.npy', w_c + (w_c_1 - w_c) / 100) lin_reg = LinearRegression() lin_reg.fit(np.vstack((choice3, choice4)).T, payoff) w_cc_1 = lin_reg.coef_ np.save('w_cc.npy', w_cc + (w_cc_1 - w_cc) / 100) lin_reg = LinearRegression() lin_reg.fit(time, payoff) w_e_1 = lin_reg.coef_ np.save('w_e.npy', w_e + (w_e_1 - w_e) / 100) farmiliar = np.float32(data_E.iloc[:, 21:31]) lin_reg = LinearRegression() lin_reg.fit(farmiliar, payoff) w_far_1 = lin_reg.coef_ np.save('w_far.npy', w_far + (w_far_1 - w_far) / 100)
ax.hist(r, density=True, histtype='stepfilled', alpha=0.2) ax.legend(loc='best', frameon=False) plt.show() #burr Continuous distributions¶ from scipy.stats import burr import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots(1, 1) #Calculate a few first moments: c, d = 10.5, 4.3 mean, var, skew, kurt = burr.stats(c, d, moments='mvsk') #Display the probability density function (pdf): x = np.linspace(burr.ppf(0.01, c, d), burr.ppf(0.99, c, d), 100) ax.plot(x, burr.pdf(x, c, d), 'r-', lw=5, alpha=0.6, label='burr pdf') #Alternatively, the distribution object can be called (as a function) to fix the shape, location and scale parameters. This returns a “frozen” RV object holding the given parameters fixed. #Freeze the distribution and display the frozen pdf: rv = burr(c, d) ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') #Check accuracy of cdf and ppf: vals = burr.ppf([0.001, 0.5, 0.999], c, d) np.allclose([0.001, 0.5, 0.999], burr.cdf(vals, c, d)) True #Generate random numbers: r = burr.rvs(c, d, size=1000) #And compare the histogram: ax.hist(r, density=True, histtype='stepfilled', alpha=0.2) ax.legend(loc='best', frameon=False) plt.show()
def gen_v(): data = pd.read_csv("data.csv") data[data == ' '] = np.nan data = data.dropna() data.head(5) data_E = data.iloc[0:56, :] data_T = data.iloc[56::, :] stress = np.int8(data_E.iloc[:, 31]) tired = np.int8(data_E.iloc[:, 32]) st = np.int8(data_E.iloc[:, 31:33]) choice3 = np.int8(data_E.iloc[:, 37]) choice4 = np.int8(data_E.iloc[:, 38]) tend = np.int8(choice3) + np.int8(choice4) risk = np.int8(data_E.iloc[:, 34]) delay = np.int8(data_E.iloc[:, 39]) fst = np.zeros((3, 3)) for i in range(0, 3): for j in range(0, 3): fst[i, j] = np.array([ i for i in np.where(st[:, 0] == i)[0] if i in np.where(st[:, 1] == j)[0] ]).size fst /= fst.sum() frd = np.zeros((2, 2)) for i in range(0, 2): for j in range(0, 2): frd[i, j] = np.array([ i for i in np.where(risk == i + 1)[0] if i in np.where(delay == j + 1)[0] ]).size frd /= frd.sum() frd time = np.float32(data_E.iloc[:, 1:11]) mu = np.zeros(10) std = np.zeros(10) for i in range(0, 10): mu[i], std[i] = norm.fit(time[:, i]) farmiliar = np.float32(data_E.iloc[:, 21:31]) mu_f = np.zeros(10) std_f = np.zeros(10) for i in range(0, 10): mu_f[i], std_f[i] = norm.fit(farmiliar[:, i]) m1 = np.load('m1.npy') m2 = np.load('m2.npy') m3 = np.load('m3.npy') m4 = np.load('m4.npy') f = data_E.iloc[:, 21:31] f_e = np.zeros((data_E.shape[0], 10)) rv1 = burr(m1[0], m1[1], m1[2], m1[3]) rv2 = burr(m2[0], m2[1], m2[2], m2[3]) rv3 = burr(m3[0], m3[1], m3[2], m3[3]) rv4 = burr(m4[0], m4[1], m4[2], m4[3]) x = np.arange(0, 4000) f_e[np.where(f == '0')] = 0 f_e[np.where(f == '1')] = x.dot(rv1.pdf(x)) f_e[np.where(f == '2')] = x.dot(rv2.pdf(x)) f_e[np.where(f == '3')] = x.dot(rv3.pdf(x)) f_e[np.where(f == '4')] = x.dot(rv4.pdf(x)) mu_fe = np.zeros(10) std_fe = np.zeros(10) for i in range(0, 10): mu_fe[i], std_fe[i] = norm.fit(f_e[:, i]) v = [fst, frd, mu, std, mu_fe, std_fe, mu_f, std_f] return v, tend, stress, tired, risk, delay, time, farmiliar, f_e
def all_dists(): # dists param were taken from scipy.stats official # documentaion examples # Total - 89 return { "alpha": stats.alpha(a=3.57, loc=0.0, scale=1.0), "anglit": stats.anglit(loc=0.0, scale=1.0), "arcsine": stats.arcsine(loc=0.0, scale=1.0), "beta": stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0), "betaprime": stats.betaprime(a=5, b=6, loc=0.0, scale=1.0), "bradford": stats.bradford(c=0.299, loc=0.0, scale=1.0), "burr": stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0), "cauchy": stats.cauchy(loc=0.0, scale=1.0), "chi": stats.chi(df=78, loc=0.0, scale=1.0), "chi2": stats.chi2(df=55, loc=0.0, scale=1.0), "cosine": stats.cosine(loc=0.0, scale=1.0), "dgamma": stats.dgamma(a=1.1, loc=0.0, scale=1.0), "dweibull": stats.dweibull(c=2.07, loc=0.0, scale=1.0), "erlang": stats.erlang(a=2, loc=0.0, scale=1.0), "expon": stats.expon(loc=0.0, scale=1.0), "exponnorm": stats.exponnorm(K=1.5, loc=0.0, scale=1.0), "exponweib": stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0), "exponpow": stats.exponpow(b=2.7, loc=0.0, scale=1.0), "f": stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0), "fatiguelife": stats.fatiguelife(c=29, loc=0.0, scale=1.0), "fisk": stats.fisk(c=3.09, loc=0.0, scale=1.0), "foldcauchy": stats.foldcauchy(c=4.72, loc=0.0, scale=1.0), "foldnorm": stats.foldnorm(c=1.95, loc=0.0, scale=1.0), # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0), # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0), "genlogistic": stats.genlogistic(c=0.412, loc=0.0, scale=1.0), "genpareto": stats.genpareto(c=0.1, loc=0.0, scale=1.0), "gennorm": stats.gennorm(beta=1.3, loc=0.0, scale=1.0), "genexpon": stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0), "genextreme": stats.genextreme(c=-0.1, loc=0.0, scale=1.0), "gausshyper": stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0), "gamma": stats.gamma(a=1.99, loc=0.0, scale=1.0), "gengamma": stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0), "genhalflogistic": stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0), "gilbrat": stats.gilbrat(loc=0.0, scale=1.0), "gompertz": stats.gompertz(c=0.947, loc=0.0, scale=1.0), "gumbel_r": stats.gumbel_r(loc=0.0, scale=1.0), "gumbel_l": stats.gumbel_l(loc=0.0, scale=1.0), "halfcauchy": stats.halfcauchy(loc=0.0, scale=1.0), "halflogistic": stats.halflogistic(loc=0.0, scale=1.0), "halfnorm": stats.halfnorm(loc=0.0, scale=1.0), "halfgennorm": stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0), "hypsecant": stats.hypsecant(loc=0.0, scale=1.0), "invgamma": stats.invgamma(a=4.07, loc=0.0, scale=1.0), "invgauss": stats.invgauss(mu=0.145, loc=0.0, scale=1.0), "invweibull": stats.invweibull(c=10.6, loc=0.0, scale=1.0), "johnsonsb": stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0), "johnsonsu": stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0), "ksone": stats.ksone(n=1e03, loc=0.0, scale=1.0), "kstwobign": stats.kstwobign(loc=0.0, scale=1.0), "laplace": stats.laplace(loc=0.0, scale=1.0), "levy": stats.levy(loc=0.0, scale=1.0), "levy_l": stats.levy_l(loc=0.0, scale=1.0), "levy_stable": stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0), "logistic": stats.logistic(loc=0.0, scale=1.0), "loggamma": stats.loggamma(c=0.414, loc=0.0, scale=1.0), "loglaplace": stats.loglaplace(c=3.25, loc=0.0, scale=1.0), "lognorm": stats.lognorm(s=0.954, loc=0.0, scale=1.0), "lomax": stats.lomax(c=1.88, loc=0.0, scale=1.0), "maxwell": stats.maxwell(loc=0.0, scale=1.0), "mielke": stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0), "nakagami": stats.nakagami(nu=4.97, loc=0.0, scale=1.0), "ncx2": stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0), "ncf": stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0), "nct": stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0), "norm": stats.norm(loc=0.0, scale=1.0), "pareto": stats.pareto(b=2.62, loc=0.0, scale=1.0), "pearson3": stats.pearson3(skew=0.1, loc=0.0, scale=1.0), "powerlaw": stats.powerlaw(a=1.66, loc=0.0, scale=1.0), "powerlognorm": stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0), "powernorm": stats.powernorm(c=4.45, loc=0.0, scale=1.0), "rdist": stats.rdist(c=0.9, loc=0.0, scale=1.0), "reciprocal": stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0), "rayleigh": stats.rayleigh(loc=0.0, scale=1.0), "rice": stats.rice(b=0.775, loc=0.0, scale=1.0), "recipinvgauss": stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0), "semicircular": stats.semicircular(loc=0.0, scale=1.0), "t": stats.t(df=2.74, loc=0.0, scale=1.0), "triang": stats.triang(c=0.158, loc=0.0, scale=1.0), "truncexpon": stats.truncexpon(b=4.69, loc=0.0, scale=1.0), "truncnorm": stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0), "tukeylambda": stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0), "uniform": stats.uniform(loc=0.0, scale=1.0), "vonmises": stats.vonmises(kappa=3.99, loc=0.0, scale=1.0), "vonmises_line": stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0), "wald": stats.wald(loc=0.0, scale=1.0), "weibull_min": stats.weibull_min(c=1.79, loc=0.0, scale=1.0), "weibull_max": stats.weibull_max(c=2.87, loc=0.0, scale=1.0), "wrapcauchy": stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0), }
fig = plt.figure(dpi=1300) ax1 = fig.add_subplot(1, 2, 1) sm.qqplot(logeados, stats.mielke(parametros_mielke[0], parametros_mielke[1], parametros_mielke[2], parametros_mielke[3]), line="45", ax=ax1) ax1.set_title('mielke', size=11.0) ax1.set_xlabel("") ax1.set_ylabel("") ax2 = fig.add_subplot(1, 2, 2) sm.qqplot(logeados, stats.burr(parametros_burr[0], parametros_burr[1], parametros_burr[2], parametros_burr[3]), line="45", ax=ax2) ax2.set_title('burr', size=11.0) ax2.set_xlabel("") ax2.set_ylabel("") fig.tight_layout(pad=0.7) fig.text(0.5, 0, 'Cuantiles teóricos', ha='center', va='center') fig.text(0., 0.5, 'Cuantiles observados', ha='center', va='center', rotation='vertical')
fig = plt.figure(dpi = 1300) ax1 = fig.add_subplot(1, 2, 1) sm.qqplot(logeados, stats.mielke(parametros_mielke[0], parametros_mielke[1], parametros_mielke[2], parametros_mielke[3]), line = "45", ax = ax1) ax1.set_title('mielke', size = 11.0) ax1.set_xlabel("") ax1.set_ylabel("") ax2 = fig.add_subplot(1, 2, 2) sm.qqplot(logeados, stats.burr(parametros_burr[0], parametros_burr[1], parametros_burr[2], parametros_burr[3]), line = "45",ax = ax2) ax2.set_title('burr', size = 11.0) ax2.set_xlabel("") ax2.set_ylabel("") fig.tight_layout(pad=0.7) fig.text(0.5, 0, 'Cuantiles teóricos', ha='center', va='center') fig.text(0., 0.5, 'Cuantiles observados', ha='center', va='center', rotation='vertical') # fig.suptitle('Gráfico de cuantiles distribuciones ajustadas') fig.subplots_adjust(top=0.86) # plt.savefig('P1.QQDist.png', format='png', dpi=1300) plt.show()