Exemple #1
0
    def test_random_from_uniform_dist(self):
        # Test is probabilistic, which is not great, but is necessary.
        simulation_pmf = PMF()
        for n in range(10000):
            x = self.pmf.random()
            hit_count = simulation_pmf.get(x, 0)
            simulation_pmf[x] = hit_count + 1
        simulation_pmf.normalize()

        for x in 'abcde':
            self.assertTrue(0.190 < simulation_pmf[x] < 0.210)
Exemple #2
0
    def test_random_from_uniform_dist(self):
        # Test is probabilistic, which is not great, but is necessary.
        simulation_pmf = PMF()
        for n in range(10000):
            x = self.pmf.random()
            hit_count = simulation_pmf.get(x, 0)
            simulation_pmf[x] = hit_count + 1
        simulation_pmf.normalize()

        for x in "abcde":
            self.assertTrue(0.190 < simulation_pmf[x] < 0.210)
Exemple #3
0
    def test_random_from_power_dist(self):
        # Test is probabilistic, which is not great, but is necessary.
        self.pmf.power_law_dist(xrange(1, 4))
        simulation_pmf = PMF()
        for n in range(10000):
            x = self.pmf.random()
            hit_count = simulation_pmf.get(x, 0)
            simulation_pmf[x] = hit_count + 1
        simulation_pmf.normalize()

        self.assertTrue(0.540 < simulation_pmf[1] < 0.550)
        self.assertTrue(0.262 < simulation_pmf[2] < 0.283)
        self.assertTrue(0.166 < simulation_pmf[3] < 0.197)
Exemple #4
0
    def test_random_from_power_dist(self):
        # Test is probabilistic, which is not great, but is necessary.
        self.pmf.power_law_dist(xrange(1, 4))
        simulation_pmf = PMF()
        for n in range(10000):
            x = self.pmf.random()
            hit_count = simulation_pmf.get(x, 0)
            simulation_pmf[x] = hit_count + 1
        simulation_pmf.normalize()

        self.assertTrue(0.540 < simulation_pmf[1] < 0.550)
        self.assertTrue(0.262 < simulation_pmf[2] < 0.283)
        self.assertTrue(0.166 < simulation_pmf[3] < 0.197)
Exemple #5
0
    def test_cookie_problem_with_arbitrary_factors(self):
        """
    test_cookie_problem_with_arbitrary_factors (irrealis_bayes.tests.FunctionalTestPMF)

    Can multiply dictionary by any convenient factor, as long as the whole
    dictionary is multiplied by that factor. We later normalize to get a
    probability distribution.
    """
        # One "bowl_1" and one "bowl_2".
        pmf = PMF(bowl_1=1, bowl_2=1)
        # Thirty vanilla cookies (out of forty) in bowl_1.
        pmf["bowl_1"] *= 30
        # Twenty vanilla cookies (out of forty) in bowl_2.
        pmf["bowl_2"] *= 20
        # This normalizes dictionary into a probability distribution.
        pmf.normalize()
        self.assertTrue(0.599 < pmf["bowl_1"] < 0.601)
Exemple #6
0
    def test_cookie_problem_with_arbitrary_factors(self):
        '''
    test_cookie_problem_with_arbitrary_factors (irrealis_bayes.tests.FunctionalTestPMF)

    Can multiply dictionary by any convenient factor, as long as the whole
    dictionary is multiplied by that factor. We later normalize to get a
    probability distribution.
    '''
        # One "bowl_1" and one "bowl_2".
        pmf = PMF(bowl_1=1, bowl_2=1)
        # Thirty vanilla cookies (out of forty) in bowl_1.
        pmf['bowl_1'] *= 30
        # Twenty vanilla cookies (out of forty) in bowl_2.
        pmf['bowl_2'] *= 20
        # This normalizes dictionary into a probability distribution.
        pmf.normalize()
        self.assertTrue(0.599 < pmf['bowl_1'] < 0.601)
Exemple #7
0
    def test_basic_cookie_problem(self):
        """
    test_basic_cookie_problem (irrealis_bayes.tests.FunctionalTestPMF)

    From Think Bayes:

      Suppose there are two bowls of cookies. The first bowl contains 30
      vanilla cookies and ten chocolate cookies. The second bowl contains
      twenty of each.  Now suppose you choose one of the bowls at random and,
      without looking, select a cookie from bowl at random. The cookie is
      vanilla. What is the probability that it came from the first bowl?

      Prior to choosing the cookie, the probability P(bowl_1) of choosing the
      first bowl was 0.5 (since we were equally likely to choose either bowl).

      Assuming we had chosen the first bowl, the likelihood P(vanilla | bowl_1)
      of choosing a vanilla cookie was 0.75 (30 vanilla cookies out a total of
      forty cookies in the first bowl). On the other hand, assuming we had
      chosen the second bowl, the likelihood P(vanilla | bowl_2) of choosing a
      vanilla cookie was 0.5 (twenty vanilla cookies out of 40 cookies in the
      second bowl).

      Since our hypotheses (bowl one or bowl two) are exclusive and exhaustive,
      the law of total probability gives:
      
        P(bowl_1 | vanilla)
        = (P(bowl_1)*P(vanilla | bowl_1)) / (P(bowl_1)*P(vanilla | bowl_1) + P(bowl_2)*P(vanilla | bowl_2))
        = (0.5*0.75)/(0.5*0.75 + 0.5*0.5)
        = (0.75)/(0.75 + 0.5)
        = 0.6
    """
        pmf = PMF(bowl_1=0.5, bowl_2=0.5)
        pmf["bowl_1"] *= 0.75
        pmf["bowl_2"] *= 0.5
        pmf.normalize()
        self.assertTrue(0.599 < pmf["bowl_1"] < 0.601)
Exemple #8
0
    def test_basic_cookie_problem(self):
        '''
    test_basic_cookie_problem (irrealis_bayes.tests.FunctionalTestPMF)

    From Think Bayes:

      Suppose there are two bowls of cookies. The first bowl contains 30
      vanilla cookies and ten chocolate cookies. The second bowl contains
      twenty of each.  Now suppose you choose one of the bowls at random and,
      without looking, select a cookie from bowl at random. The cookie is
      vanilla. What is the probability that it came from the first bowl?

      Prior to choosing the cookie, the probability P(bowl_1) of choosing the
      first bowl was 0.5 (since we were equally likely to choose either bowl).

      Assuming we had chosen the first bowl, the likelihood P(vanilla | bowl_1)
      of choosing a vanilla cookie was 0.75 (30 vanilla cookies out a total of
      forty cookies in the first bowl). On the other hand, assuming we had
      chosen the second bowl, the likelihood P(vanilla | bowl_2) of choosing a
      vanilla cookie was 0.5 (twenty vanilla cookies out of 40 cookies in the
      second bowl).

      Since our hypotheses (bowl one or bowl two) are exclusive and exhaustive,
      the law of total probability gives:
      
        P(bowl_1 | vanilla)
        = (P(bowl_1)*P(vanilla | bowl_1)) / (P(bowl_1)*P(vanilla | bowl_1) + P(bowl_2)*P(vanilla | bowl_2))
        = (0.5*0.75)/(0.5*0.75 + 0.5*0.5)
        = (0.75)/(0.75 + 0.5)
        = 0.6
    '''
        pmf = PMF(bowl_1=0.5, bowl_2=0.5)
        pmf['bowl_1'] *= 0.75
        pmf['bowl_2'] *= 0.5
        pmf.normalize()
        self.assertTrue(0.599 < pmf['bowl_1'] < 0.601)