def _class_sizes(): # start with the actual distribution of class sizes from the book d = { 7: 8, 12: 8, 17: 14, 22: 4, 27: 6, 32: 12, 37: 8, 42: 3, 47: 2, } # form the pmf pmf = _04_Pmf._make_pmf_from_dict(d, 'actual') print('mean', pmf._mean()) print('var', pmf._var()) # compute the biased pmf biased_pmf = _bias_pmf(pmf, 'observed') print('mean', biased_pmf._mean()) print('var', biased_pmf._var()) # unbias the biased pmf unbiased_pmf = _unbias_pmf(biased_pmf, 'unbiased') print('mean', unbiased_pmf._mean()) print('var', unbiased_pmf._var()) # plot the Pmfs _05_myplot._pmfs([pmf, biased_pmf]) _05_myplot._show(xlabel='Class size', ylabel='PMF')
def main(): print('pae', 0.3 / (0.3 + 3.0 / 13)) doorA = _make_uniform_suite(0.0, 1.0, 101, name='Door A') evidence = 3, 2 _update(doorA, evidence) doorC = _make_uniform_suite(0.0, 1.0, 101, name='Door C') evidence = 3, 10 _update(doorC, evidence) print(_total_probability(doorA, doorC, _prob_winning)) # plot the posterior distributions _05_myplot._pmfs([doorA, doorC]) _05_myplot._save(root='blinky', formats=['pdf', 'png'], title='Probability of blinking', xlabel='P(blink)', ylabel='Posterior probability')