Esempio n. 1
0
import matplotlib.pyplot as plt
import sys
sys.path.append('Assignment 7')
from monte_carlo_coin_flips import probability
from monte_carlo_coin_flips import monte_carlo_probability

plt.style.use('bmh')
plt.plot([i for i in range(9)], [probability(i, 8) for i in range(9)],
         linewidth=2.5)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.plot([i for i in range(9)],
         [monte_carlo_probability(i, 8, 1000) for i in range(9)],
         linewidth=0.75)
plt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5'])
plt.xlabel('Number of Heads')
plt.ylabel('Probability')
plt.title('True Distribution vs Monte Carlo Simulations for 8 Coin Flips')
plt.savefig('plot.png')
Esempio n. 2
0
import sys
from monte_carlo_coin_flips import probability
from monte_carlo_coin_flips import monte_carlo_probability
import matplotlib.pyplot as plt

plt.style.use('bmh')
plt.plot([x for x in range(0, 9)],[probability(x, 8) for x in range(0, 9)],linewidth=2.5)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.plot([x for x in range(0, 9)],[monte_carlo_probability(x, 8) for x in range(0, 9)],linewidth=0.75)
plt.legend(['True','MC 1','MC 2','MC 3','MC 4','MC 5'])
plt.xlabel('Number of Heads')
plt.ylabel('Probability')
plt.title('True Distribution vs Monte Carlo Simulations for 4 Coin Flips')
plt.savefig('plot.png')
Esempio n. 3
0
def kl_divergence(p, q):
    result = 0
    for n in range(len(p)):
        if p[n] and q[n] != 0:
            result += p[n] * ln(p[n] / q[n])
    return round(result, 6)


p = [0.2, 0.5, 0, 0.3]
q = [0.1, 0.8, 0.1, 0]
print(kl_divergence(p, q))

print('Computing KL Divergence for MC Simulations...')
print('100 samples --> KL Divergence =')
p = [monte_carlo_probability(n, 8, 100) for n in range(0, 8)]
q = [probability(n, 8) for n in range(0, 8)]
print(kl_divergence(p, q))

print('1000 samples --> KL Divergence =')
p = [monte_carlo_probability(n, 8, 1000) for n in range(0, 8)]
q = [probability(n, 8) for n in range(0, 8)]
print(kl_divergence(p, q))

print('10000 samples --> KL Divergence =')
p = [monte_carlo_probability(n, 8, 10000) for n in range(0, 8)]
q = [probability(n, 8) for n in range(0, 8)]
print(kl_divergence(p, q))

#As the number of samples increases, the KL divergence approaches 0. This is because as p approaches q, ln(p/q) goes to zero and D(p||q) = 0
Esempio n. 4
0
sys.path.append('Assignment 7')
from monte_carlo_coin_flips import probability
from monte_carlo_coin_flips import monte_carlo_probability


def kl_divergence(p, q):
    sum = 0
    for index in range(len(p)):
        if p[index] != 0 and q[index] != 0:
            sum += p[index] * math.log(p[index] / q[index])
    return round(sum, 11)


p = [0.2, 0.5, 0, 0.3]
q = [0.1, 0.8, 0.1, 0]

print('Testing kl_divergence...')
assert kl_divergence(p, q) == -0.09637237851
print('PASSED')

q = [probability(i, 8) for i in range(9)]

print('Computing KL Divergence for MC Simulations...')
p = [monte_carlo_probability(i, 8, 100) for i in range(9)]
print('100 samples --> KL Divergence =', kl_divergence(p, q))
p = [monte_carlo_probability(i, 8, 1000) for i in range(9)]
print('1000 samples --> KL Divergence =', kl_divergence(p, q))
p = [monte_carlo_probability(i, 8, 10000) for i in range(9)]
print('10000 samples --> KL Divergence =', kl_divergence(p, q))

# As the number of samples increases, the KL divergence approaches 0 because the approximate probability tends to approach the true distribution the more samples there are.
    answer = 0
    for num in range(len(p)):
        if p[num] and q[num] != 0:
            answer += p[num] * ln(p[num] / q[num])
    return round(answer, 6)


p = [0.2, 0.5, 0, 0.3]
q = [0.1, 0.8, 0.1, 0]

if __name__ == "__main__":
    print("Testing KL Divergence...")
    assert kl_divergence(p, q) == -0.096372, 'failed'
    print('passed')

prob = [probability(num_heads, 8) for num_heads in range(9)]

monte100 = [
    monte_carlo_probability(num_heads, 8, 100) for num_heads in range(9)
]
monte1000 = [
    monte_carlo_probability(num_heads, 8, 1000) for num_heads in range(9)
]
monte10000 = [
    monte_carlo_probability(num_heads, 8, 10000) for num_heads in range(9)
]
if __name__ == "__main__":
    print('Computing KL Divergence for MC Simulations...')
    print('100 samples --> KL Divergence = {}'.format(
        kl_divergence(monte100, prob)))
    print('1000 samples --> KL Divergence = {}'.format(
Esempio n. 6
0
def simple_sort(num_list):
    sorted_list = []
    for i in range(len(num_list)):
        min_num = num_list[0]
        for num in num_list:
            if num < min_num:
                min_num = num
        sorted_list.append(min_num)
        num_list.remove(min_num)
    return sorted_list


true_distribution = []
for heads in range(5):
    true_distribution.append(probability(heads, 4))


def count_heads(flip_sequence):
    num_heads = 0
    for outcome in flip_sequence:
        if outcome == 'H':
            num_heads += 1
    return num_heads


def calculate_distribution(sample):
    flip_list = [0, 0, 0, 0, 0]
    flip_split = sample.split(' ')
    for sample in flip_split:
        for num in range(5):