Esempio n. 1
0
for i in range(1, s):
    # a) Generovani kandidata theta_ast
    theta_ast = np.random.normal() * d + mu
    # b) Spocitani akceptacni pravdepodobnosti
    alpha = min(
        math.exp(-1 / 2 * abs(theta_ast) + 1 / 2 * abs(theta[i - 1]) - 1 /
                 (2 * d**2) * (theta[i - 1] - mu)**2 + 1 / (2 * d**2) *
                 (theta_ast - mu)**2), 1)
    # c) rozhodnuti o prijeti nebo odmitnuti kandidata
    if alpha > np.random.rand():
        theta[i] = theta_ast
        count += 1
    else:
        theta[i] = theta[i - 1]

    progress_bar(i, s)

# ===== 3. Prezentace vysledku =====
theta = theta[s_0:]  # Vyhozeni prvnich s_0 vzorku

# Graficke zobrazeni konvergence
fig, axs = plt.subplots(1, 2, tight_layout=True)

axs[0].plot(theta[::500])
axs[0].set_title("Kazdy 500. odhad theta")
axs[1].hist(theta, bins=50, facecolor="blue", alpha=0.5, edgecolor="black")
axs[1].set_title("Rozdeleni theta")

# Pocitani momentu
e_theta = np.mean(theta)
d_theta = np.var(theta)
## 1 - Deklarace promennych
S = 1_000_000  # pocet simulaci
r = 0  # pocitadlo stavu n/a_win
b_win = 0  # stredni hodnota vyhry Boba pri stavu n/a_win

step = round(S / 100)

## 2 - Simulace hry
for i in range(S):
    p = uniform()  # pocatecni rozdeleni stolu p~U(0,1)
    y = uniform(size=n)  # vektor rozmeru 8x1 s nezavislymi tahy z U(0,1)
    if sum(y < p) == a_win:
        r += 1  # pocitadlo stavu n/a_win inkrementujeme o 1
        b_win += (1 - p)**3  # scitani pravdepodobnosti vyhry Boba
        # (z predchozich behu pri stavu n/a_win)
    progress_bar(i, S)

## 3 - Vypocet a prezentace vysledku
b_win_expected = b_win / r  # Stredni (ocekavana) hodnota pravd. vyhry Boba
# pri stavu n/a_win
a_win_expected = 1 - b_win_expected  # Stredni (ocekavana) hodnota pravd. vyhry Alice
# pri stavu n/a_win

print(
    f"\nPrumerna pravd. vyhry jednotlivych hracu pri stavu {a_win}:{n-a_win}:")
print(
    tabulate([["Jmeno", "Pravd."], ["Alice", a_win_expected],
              ["Bob", b_win_expected]],
             headers="firstrow"))

print(f"\nPocet stavu {a_win}:{n-a_win}: {r}")
Esempio n. 3
0
    # Cast pro vypocet citatele SD pomeru hustot
    # d) beta_depart = 0 (druhy prvek vektoru beta)
    # p(b|h,y) ~ N(beta_1, v_1)
    v_1 = inv(inv(v_0) + h[i] * (t(x) @ x))  # (4.4) dle Koop (2003)
    beta_1 = v_1 @ (inv(v_0) @ beta_0 + h[i] * (t(x) @ y))
    new_sdd_nom = norm.pdf(0, beta_1[1][0], sqrt(v_1[1][1]))
    sdd_nom.append(new_sdd_nom)

    # e) beta_reds = beta_trains
    # p(R*b|h,y) ~ N(R * beta_1, R * v_1 * R')
    r = np.array([[0, 0, 1, -1]])
    new_sde_nom = mvn.pdf(0, r @ beta_1, r @ v_1 @ t(r))
    sde_nom.append(new_sde_nom)

    progress_bar(i, s)

# ===== 3. Posteriorni analyza =====

# vyhozeni prvnich s_0 vzorku
beta = np.delete(beta, range(s_0), axis=1)
h = h[s_0:]
sdd_nom = sdd_nom[s_0:]
sde_nom = sde_nom[s_0:]

# Graficke zobrazeni konvergence
k = 100  # delka kroku (neplest si s krokem ve funkci geweke)
fig_1 = plt.figure()
axs = fig_1.subplots(nrows=3, ncols=2)

for i in range(beta.shape[0]):
Esempio n. 4
0
theta = np.zeros((2, s + 1))
theta[:, 0] = np.array([-1000, 1000])

# ===== 2. Gibbsuv vzorkovac =====

for i in range(1, s + 1):
    # generovani theta_1/theta_2 ~ N(mu_12,sigma_12)
    mu_12 = mu[0][0] + rho * (theta[1, i - 1] - mu[1][0])
    sigma_12 = 1 - rho**2
    theta[0, i] = randn() * sqrt(sigma_12) + mu_12

    # generovani theta_2/theta_1 ~ N(mu_21,sigma_21)
    mu_21 = mu[1][0] + rho * (theta[0, i] - mu[0][0])
    sigma_21 = 1 - rho**2
    theta[1, i] = randn() * sqrt(sigma_21) + mu_21
    progress_bar(i, s + 1)

# ===== 3. Grafy =====

fig, axs = plt.subplots(2, 2)

k = 10
axs[0, 0].plot(theta[0, 0:k], theta[1, 0:k], linewidth=0.5, marker="x")
axs[0, 0].set_title(f"Konvergence prvnich {k} kroku Gibbsova vzorkovace")
axs[0, 0].set_xlabel("theta_1")
axs[0, 0].set_ylabel("theta_2")

theta = theta[:, s_1:]
axs[0, 1].set_title(f"Sdruzena hustota na zaklade {s_1} vzorku")
axs[0, 1].scatter(theta[0], theta[1], marker=".", s=0.75)
axs[0, 1].set_xlabel("theta_1")
log_bf = res_gm_rest["log_ml"] - res_gm["log_ml"]
bf = exp(log_bf)  # Bayesuv faktor (odlogaritmujeme predchozi vyraz)

print("Bayesuv faktor porovnavajici omezeny a neomezeny model:")
print(f"BF = {round(bf, 4)}", "\n")

# ===== 3. Hypoteza, ze beta > 1 =====
mc = 100_000  # pocet simulaci      (zvyste napr. na 10_000)
beta_sim = np.array([[], []])

print("Vypocet pravd., ze beta > 1 pomoci simulace:")
for i in range(mc):
    h_sim = float(gamm_rnd_koop(res_gm["h_1"], res_gm["nu_1"], (1, 1)))
    new_column = norm_rnd(1 / h_sim * res_gm["v_1"]) + res_gm["beta_1"]
    beta_sim = np.append(beta_sim, new_column, axis=1)
    progress_bar(i, mc)

# Vypocet pravd. beta > 1
pr_beta = sum(t(beta_sim > 1))[1] / mc
print(f"Pravdepodobnost, ze beta > 1:")
print(f"Pr. = {round(pr_beta, 4)}")

# Analyticky vypocet pravdepodobnost
# a) standardizace skalovaneho t-rozdeleni (p(beta|y)) pro druhy prvek vektoru parametru beta
zscore = float((1 - res_gm["beta_1"][1]) / res_gm["b1_std"][1])

# b) vypocet odpovidajiciho kvantilu ze standardizovaneho centrovaneho t-rozdeleni
pr_beta_analyticky = 1 - student.cdf(zscore, res_gm["nu_1"])
print(f"Pr. = {round(pr_beta_analyticky, 4)} (analyticky)")