예제 #1
0
import matplotlib.pyplot as plt
import sys
sys.path.append('Assignment 7')
from monte_carlo_coin_flips import probability
from monte_carlo_coin_flips import monte_carlo_probability

plt.style.use('bmh')
plt.plot([i for i in range(9)], [probability(i, 8) for i in range(9)],
         linewidth=2.5)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5'])
plt.xlabel('Number of Heads')
plt.ylabel('Probability')
plt.title('True Distribution vs Monte Carlo Simulations for 8 Coin Flips')
plt.savefig('plot.png')
예제 #2
0
def kl_divergence(p, q):
    result = 0
    for n in range(len(p)):
        if p[n] and q[n] != 0:
            result += p[n] * ln(p[n] / q[n])
    return round(result, 6)


p = [0.2, 0.5, 0, 0.3]
q = [0.1, 0.8, 0.1, 0]
print(kl_divergence(p, q))

print('Computing KL Divergence for MC Simulations...')
print('100 samples --> KL Divergence =')
p = [monte_carlo_probability(n, 8, 100) for n in range(0, 8)]
q = [probability(n, 8) for n in range(0, 8)]
print(kl_divergence(p, q))

print('1000 samples --> KL Divergence =')
p = [monte_carlo_probability(n, 8, 1000) for n in range(0, 8)]
q = [probability(n, 8) for n in range(0, 8)]
print(kl_divergence(p, q))

print('10000 samples --> KL Divergence =')
p = [monte_carlo_probability(n, 8, 10000) for n in range(0, 8)]
q = [probability(n, 8) for n in range(0, 8)]
print(kl_divergence(p, q))

#As the number of samples increases, the KL divergence approaches 0. This is because as p approaches q, ln(p/q) goes to zero and D(p||q) = 0
예제 #3
0
import sys
from monte_carlo_coin_flips import probability
from monte_carlo_coin_flips import monte_carlo_probability
import matplotlib.pyplot as plt

plt.style.use('bmh')
plt.plot([x for x in range(0, 9)],[probability(x, 8) for x in range(0, 9)],linewidth=2.5)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.legend(['True','MC 1','MC 2','MC 3','MC 4','MC 5'])
plt.xlabel('Number of Heads')
plt.ylabel('Probability')
plt.title('True Distribution vs Monte Carlo Simulations for 4 Coin Flips')
plt.savefig('plot.png')
            answer += p[num] * ln(p[num] / q[num])
    return round(answer, 6)


p = [0.2, 0.5, 0, 0.3]
q = [0.1, 0.8, 0.1, 0]

if __name__ == "__main__":
    print("Testing KL Divergence...")
    assert kl_divergence(p, q) == -0.096372, 'failed'
    print('passed')

prob = [probability(num_heads, 8) for num_heads in range(9)]

monte100 = [
    monte_carlo_probability(num_heads, 8, 100) for num_heads in range(9)
]
monte1000 = [
    monte_carlo_probability(num_heads, 8, 1000) for num_heads in range(9)
]
monte10000 = [
    monte_carlo_probability(num_heads, 8, 10000) for num_heads in range(9)
]
if __name__ == "__main__":
    print('Computing KL Divergence for MC Simulations...')
    print('100 samples --> KL Divergence = {}'.format(
        kl_divergence(monte100, prob)))
    print('1000 samples --> KL Divergence = {}'.format(
        kl_divergence(monte1000, prob)))
    print('10000 samples --> KL Divergence = {}'.format(
        kl_divergence(monte10000, prob)))