Ejemplo n.º 1
0
def main():
    p = [0.2, 0.7, 0.1]
    q = [0.1, 0.8, 0.1]
    divergence = kl_divergence(p, q)
    assert divergence == 0.045157461274823146, "kl_divergence test failed!"
    print("kl_divergence test PASSED!")

    print("Computing KL for MC Simulations...")

    samples = 100
    true_probability = [probability(x, 5) for x in range(6)]
    trials = monte_carlo_trials(5, samples)
    print(f"{samples} samples --> KL Divergence = ",
          kl_divergence(trials, true_probability))

    samples = 1000
    true_probability = [probability(x, 5) for x in range(6)]
    trials = monte_carlo_trials(5, samples)
    print(f"{samples} samples --> KL Divergence = ",
          kl_divergence(trials, true_probability))

    samples = 10000
    true_probability = [probability(x, 5) for x in range(6)]
    trials = monte_carlo_trials(5, samples)
    print(f"{samples} samples --> KL Divergence = ",
          kl_divergence(trials, true_probability))
Ejemplo n.º 2
0
def main():
    flips = {
        'George':
        'THTH HTHT THTH HHTH THTH TTHT THTH TTTH THTH TTHT THHT HTTH THTH THHT THHT THTH THTH THHT THTH THTH',
        'David':
        'TTHH HHTT HHTT TTHH HTHT THTH THTH THTH HTHT HTHT THTH HTHT THHH THTH HHTT HHTT TTTH HHTH HTHH TTTH',
        'Elijah':
        'THTT HTHT HTHH HHHT TTHH THHH TTTT HHTT TTTH TTHH HTHT HTHT TTTT HTTT TTTH HTTT TTHH THTH THHH HTHH',
        'Colby':
        'HTTH HTHH THTT HHTH TTHT HTTT THHH TTHH HHTT THTH HHTT THTH THHH TTHH THTT HHTH HTTH HTHH TTHT HTTT',
        'Justin':
        'THTT HTHT TTTH THHT HTHH TTTH THTH HHTH TTTT HTTH HHTT THHH HHHH THTH HTTH TTHH HTHT HHHT THHT TTTH'
    }

    trial_flips = 4

    # Split simulations into trials
    trials = {
        name: simulation.split(" ")
        for name, simulation in flips.items()
    }

    # Get number of heads for each trial
    trial_heads = {
        name: [trial.count('H') for trial in coin_flips]
        for name, coin_flips in trials.items()
    }

    # Get probability of # of heads for each simulation
    probs = {
        name: [trial.count(x) / len(trial) for x in range(trial_flips)]
        for name, trial in trial_heads.items()
    }

    # Calculate the true probability of getting heads
    true_probability = [
        probability(x, trial_flips) for x in range(trial_flips + 1)
    ]

    print("KL Divergence between coin flips")
    for name, probs in probs.items():
        div = kl_divergence(probs, true_probability)
        print(name, ":", div)

    print(
        "According to the KL divergence, Justin's coin flipping was the best approximation of true randomness because it was the closest to the true probability."
    )
    'HHH', 'THT', 'HHT', 'HHT', 'HTH', 'HHT', 'HHT', 'HHH', 'TTT', 'THH',
    'HHH', 'HHH', 'TTH', 'THH', 'THH', 'TTH', 'HTT', 'TTH', 'HTT', 'HHT',
    'TTH', 'HTH', 'THT', 'THT', 'HTH'
]

coins = [coin_1, coin_2, coin_3]


def analyze_coin_flips(num_heads, coin_data):
    good_outcomes = 0
    for flip in coin_data:
        counter = flip.count('H')
        if counter == num_heads:
            good_outcomes += 1
    return good_outcomes / len(coin_data)


x_coords = [i for i in range(4)]
real_results = [probability(x, 3) for x in x_coords]
plt.plot(x_coords, real_results, linewidth=2.5)
for coin in coins:
    plt.plot(x_coords, [analyze_coin_flips(x, coin) for x in x_coords],
             linewidth=0.75)
plt.legend(['True', 'Coin 1', 'Coin 2', 'Coin 3'])
plt.xlabel('Number of Heads')
plt.ylabel('Real Probability')
plt.title('True Distribution vs Coins')
plt.savefig('bias_plot.png')
plt.show()

#Coin 2 looks normal, coin 1 seems to be biased towards tails and coin 3 biased towards heads
import math
from coin_flipping import probability

probabilities = [probability(i, 4) for i in range(0, 5)]

def expected_value(X, num_flips=4):
    return sum([x * X[x] for x in range(0, num_flips + 1)])

def variance(X, num_flips=4):
    return sum([((x - expected_value(X)) ** 2) * X[x] for x in range(0, num_flips + 1)])

def st_dev(X):
    return math.sqrt(variance(X))

def probability_likelihood(heads_count, tails_count, probability=None):
    if probability != None:
        return round((probability ** heads_count) * (1 - probability)**tails_count, 5)
    else:
        return 'L(p) = p ^ ' + str(heads_count) + ' * 1-p ^ ' + str(tails_count)

print('\nQuestion A.1: 0 Heads:', probabilities[0], '1 Head:', probabilities[1], '2 Heads:', probabilities[2], '3 Heads:', probabilities[3], '4 Heads:', probabilities[4])  # num_flips! / num_heads! * (num_flips - num_heads)! / 2 ** num_flips

print('Question A.2: X would be 2 heads out of 4 flips because an unbiased coin would be HTHT or THTH, and both have 2 heads, therefore X would be 2.')  # 4 * 0.5 = 2

print('Question A.3:', expected_value(probabilities))  # E x*p(x) = 2

print('Question A.4:', variance([i for i in range(0, 5)]))  # E (X-X¯)^2 = 1

print('Question A.5:', st_dev([i for i in range(0, 5)]), '\n') # Sqrt(1) = 1

biased_probabilities = [probability_likelihood(i, 4 - i) for i in range(0, 5)]
import sys
sys.path.append('src')
from coin_flipping import monte_carlo_probability, probability
import matplotlib.pyplot as plt
plt.style.use('bmh')

num_heads = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
true_result = [probability(x, 10) for x in num_heads]

mc_1 = [monte_carlo_probability(x, 10) for x in num_heads]
mc_2 = [monte_carlo_probability(x, 10) for x in num_heads]
mc_3 = [monte_carlo_probability(x, 10) for x in num_heads]
mc_4 = [monte_carlo_probability(x, 10) for x in num_heads]
mc_5 = [monte_carlo_probability(x, 10) for x in num_heads]

plt.plot(num_heads, true_result, linewidth=2.5)
plt.plot(num_heads, mc_1, linewidth=0.75)
plt.plot(num_heads, mc_2, linewidth=0.75)
plt.plot(num_heads, mc_3, linewidth=0.75)
plt.plot(num_heads, mc_4, linewidth=0.75)
plt.plot(num_heads, mc_5, linewidth=0.75)

plt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5'])
plt.xlabel('Number of Heads')
plt.ylabel('Probability')
plt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips')
plt.savefig('coin_plot.png')
plt.show()
ordered_random_flip_sequence = []
ordered_true_distribution_flip_sequence = []

i = 0
for person, flip_sequence in flips.items():
    ordered_random_flip_sequence.append([])
    ordered_flip_sequence = order_flip_sequence(flip_sequence)
    for j in range(0, 5):
        ordered_random_flip_sequence[i].append(
            ordered_flip_sequence.count(j) / 20)

    i += 1

diverged_flip_seqences = [
    kl_divergence(ordered_random_flip_sequence[i],
                  [probability(j, 4) for j in range(0, 5)])
    for i in range(0, 5)
]

print('\nDivergence')
print("    George's samples divergence", diverged_flip_seqences[0])
print("    David's samples divergence", diverged_flip_seqences[1])
print("    Elijah's samples divergence", diverged_flip_seqences[2])
print("    Colby's samples divergence", diverged_flip_seqences[3])
print("    Justin's samples divergence", diverged_flip_seqences[4])
print('\n')

if diverged_flip_seqences.index(min(diverged_flip_seqences)) == 0:
    print(
        "George's samples were the closest to true distribution at a KL Divergence of:",
        min(diverged_flip_seqences))
    return divergence


def calculate_distribution(num_heads, tests):
    good_outcomes = 0
    for test in tests:
        counter = test.count('H')
        if counter == num_heads:
            good_outcomes += 1
    return good_outcomes / len(tests)


def total_distribution(num_flips, data):
    data_set = data
    distribution = []
    for i in range(num_flips + 1):
        distribution.append(calculate_distribution(i, data_set))
    return distribution


real_results = [probability(x, 4) for x in range(5)]
for i in range(len(sorted_flip_sequences)):
    distribution = total_distribution(4, sorted_flip_sequences[i])
    round_distribution = [round(elem, 9) for elem in distribution]
    print('-------------------------')
    print('\nKL DIVERGENCE FOR', names[i], ':')
    print('\n', kl_divergence(round_distribution, real_results))
    print('\n-------------------------')

print('Justin got the lowest divergence so his is most accurate')
Ejemplo n.º 8
0
def variance(x, distribution):
    expected_val = expected_value(x, distribution)  ## FINDING EXPECTED VALUE
    variance = sum(
        [((x[i] - expected_val)**2) * distribution[i] for i in range(len(x))]
    )  ## SUMMING THE NUM OF HEADS MINUS THE EXPECTED VALUE AND SQUARING IT THEN MLTIPLYING BY THE PROBABILTY OF GETTING THAT AMOUNT OF HEADS IN A SEQUENCE FOR EVERY AMOUNT OF HEADS IN THE RANGE OF 4
    return variance


def st_dev(x, distribution):
    return math.sqrt(variance(x, distribution))


x = [0, 1, 2, 3, 4]

distribution = [probability(i, 4)
                for i in range(len(x))]  ## USING PRE MADE PROBABILITY FUNCTION
print('\nA1----------')
print('\nProbability of getting x heads in 4 flips')
for i in range(len(distribution)):
    print(i, ':', distribution[i])
print('\nA2----------')
print('\nProbably 2 because it is the biggest chance of actually happening')
print('\nA3----------')
print('\nExpected value of X is', expected_value(x, distribution))
print('\nA4----------')
print('\nVariance is ', variance(x, distribution))
print('\nA5----------')
print('\nStandard Deviation is ', variance(x, distribution))
print('\n')
print('\nB1----------')
Ejemplo n.º 9
0
from coin_flipping import probability, monte_carlo_probability
import sys
sys.path.append('src/python')

print('probability', probability(5, 8))

print('monte_carlo_probability 1:', monte_carlo_probability(5, 8))
print('monte_carlo_probability 2:', monte_carlo_probability(5, 8))
print('monte_carlo_probability 3:', monte_carlo_probability(5, 8))
Ejemplo n.º 10
0
import sys
sys.path.append('src')
from coin_flipping import monte_carlo_probability, probability

print('--- Normal Probability ---')
print(probability(5, 8))
print('--- Monte Carlo Probabilities ---')
for i in range(3):
    print(monte_carlo_probability(5, 8))
from coin_flipping import probability, monte_carlo_probability
import matplotlib.pyplot as plt
import sys
sys.path.append('src/python')
plt.style.use('bmh')
plt.plot([i for i in range(0, 11)], [probability(i, 10)
                                     for i in range(0, 11)], linewidth=2.5)
plt.plot([i for i in range(0, 11)], [monte_carlo_probability(i, 10)
                                     for i in range(0, 11)], linewidth=0.75)
plt.plot([i for i in range(0, 11)], [monte_carlo_probability(i, 10)
                                     for i in range(0, 11)], linewidth=0.75)
plt.plot([i for i in range(0, 11)], [monte_carlo_probability(i, 10)
                                     for i in range(0, 11)], linewidth=0.75)
plt.plot([i for i in range(0, 11)], [monte_carlo_probability(i, 10)
                                     for i in range(0, 11)], linewidth=0.75)
plt.plot([i for i in range(0, 11)], [monte_carlo_probability(i, 10)
                                     for i in range(0, 11)], linewidth=0.75)
plt.legend(['P(x)', 'MC #1', 'MC #2', 'MC #3', 'MC #4', 'MC #5'])
plt.xlabel('Number of Heads')
plt.ylabel('Probability')
plt.title('Graph of P(x)')
plt.savefig('plot.png')
plt.show()
def make_p_and_q(samples):
    return count_samples(samples, 5), [probability(i, 5) for i in range(0, 6)]