コード例 #1
0
 def setUp(self):
     self.mocks = [
         MockHypothesis(fitness=0.5),
         MockHypothesis(fitness=0.6),
         MockHypothesis(fitness=0.7)
     ]
     self.decisioner = Decisioner(self.mocks, training_window=0)
コード例 #2
0
    def test_get_guess_training_window(self):
        """Tests the 'get_guess' method of the decisioner with the
        training window in place"""

        # First we know need to create a new decisioner with a new
        # training window
        decisioner = Decisioner(self.mocks, training_window=100)

        # Then we test that we are receiving true while in the window
        for i in range(100):
            self.assertTrue(decisioner.get_guess(self.FAKE_VECTOR))
            decisioner.update(self.FAKE_VECTOR, self.FAKE_ACTION, self.FAKE_OUTCOME)

        # Finally now that we've exceeded the window we test that we
        # are actually getting hypothesis that are chosen
        self.mocks[0]._fitness = 0.99
        self.mocks[0]._next_guess = 'spam'

        response = decisioner.get_guess(self.FAKE_VECTOR)

        self.assertEqual('spam', response)
コード例 #3
0
    def test_get_guess_training_window(self):
        """Tests the 'get_guess' method of the decisioner with the
        training window in place"""

        # First we know need to create a new decisioner with a new
        # training window
        decisioner = Decisioner(self.mocks, training_window=100)

        # Then we test that we are receiving true while in the window
        for i in range(100):
            self.assertTrue(decisioner.get_guess(self.FAKE_VECTOR))
            decisioner.update(self.FAKE_VECTOR, self.FAKE_ACTION,
                              self.FAKE_OUTCOME)

        # Finally now that we've exceeded the window we test that we
        # are actually getting hypothesis that are chosen
        self.mocks[0]._fitness = 0.99
        self.mocks[0]._next_guess = 'spam'

        response = decisioner.get_guess(self.FAKE_VECTOR)

        self.assertEqual('spam', response)
コード例 #4
0
 def setUpDecisioner(self, *hypothesis, **kwargs):
     self.hypothesis = hypothesis
     self.decisioner = Decisioner(self.hypothesis, **kwargs)
コード例 #5
0
class DecisionerIntegrationTest(unittest.TestCase):

    def setUpDecisioner(self, *hypothesis, **kwargs):
        self.hypothesis = hypothesis
        self.decisioner = Decisioner(self.hypothesis, **kwargs)

    def tearDown(self):
        self.hypothesis = None
        self.decisioner = None

    def test_AllAggressiveMonsters_WithWimpyAndBrave(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        self.setUpDecisioner(brave, wimpy)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            monster = Monster(1, [randint(1, 100)], 'aggressive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))
            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        for i in range(5000):
            monster = Monster(1, [randint(1, 100)], 'aggressive')

            # Get the guess from the decisioner for the next 5000,
            # We expect all of them to be wimpy and thus not attack
            self.assertFalse(self.decisioner.get_guess(monster.color))

            # Then we will update from this guess
            self.decisioner.update(monster.color, 0, 0)

            # Finally we know that the wimpy hypothesis should always have
            # a greater fitness than the brave for each iteration
            self.assertGreater(wimpy.fitness(), brave.fitness())


    def test_AllPassiveMonsters_WithWimpyAndBrave(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        self.setUpDecisioner(brave, wimpy)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            monster = Monster(0, [randint(1, 100)], 'passive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))
            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        for i in range(5000):
            monster = Monster(0, [randint(1, 100)], 'aggressive')

            # Get the guess from the decisioner for the next 5000,
            # We expect all of them to be wimpy and thus not attack
            self.assertTrue(self.decisioner.get_guess(monster.color))

            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, 1)

            # Finally we know that the wimpy hypothesis should always have
            # a greater fitness than the brave for each iteration
            self.assertGreater(brave.fitness(), wimpy.fitness())

    def test_AllPassiveMonsters_WithRando(self):
        wimpy = WimpyHypothesis()
        rando = RandoHypothesis()
        self.setUpDecisioner(wimpy, rando)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            monster = Monster(0, [randint(1, 100)], 'passive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))

            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        for i in range(5000):
            monster = Monster(0, [randint(1, 100)], 'aggressive')

            self.decisioner.get_guess(monster.color)

            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, 1)

            # Flipping a coin is better than doing nothing
            self.assertGreater(rando.fitness(), wimpy.fitness())

        # Randomness should be near-even in distribution
        self.assertGreater(rando.fitness(), 0.4)
        self.assertGreater(0.6, rando.fitness())

    def test_simpleResponse_WithDrPerceptron(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        drP = DrPerceptron()
        self.setUpDecisioner(brave, wimpy, drP)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            nonAggro = 0 # this means not aggro
            color = [randint(1, 100)]
            monster = Monster(nonAggro, color, 'passive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))
            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        # DrPerceptron should converge pretty quickly and be better than wimpy but not as good as brave
        for i in range(100):
            aggro = 1 # this means aggro
            color = [randint(1, 100)]
            monster = Monster(aggro, color, 'aggressive')

            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, 1)

            self.assertGreater(drP.fitness(), wimpy.fitness())
            self.assertGreater(brave.fitness(), drP.fitness())

    def test_frequencyResponse_WithDrPerceptron(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        drP = DrPerceptron()
        self.setUpDecisioner(brave, wimpy, drP)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            nonAggro = 0 # this means not aggro
            color = [randint(1, 100)]
            monster = Monster(nonAggro, color, 'passive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))
            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        # Dr. Perceptron should do better than brave and wimpy
        # when encountering monsters that repeat with a frequency
        # that is trainable given Dr.Perceptron's window size.
        # In otherwords, Dr. Perceptron trains on a frequency of
        # Monsters but training on a pattern is limited to the
        # input size of Dr. Perceptron (which as of this check-in
        # is 5)
        #
        # We test a staggered input
        for i in range(10):
            aggro = 1 # this means aggro
            passive = 0 # this means aggro
            evenMonstersPassive = i%2
            color = [randint(1, 100)]
            monster = Monster(evenMonstersPassive, color, 'aggressiveish')

            # Then we will update from this guess
            self.decisioner.update(monster.color, evenMonstersPassive, evenMonstersPassive)
            # drP should always be better than wimpy
            self.assertGreater(drP.fitness(), wimpy.fitness())

        # after 2 sets of 5 inputs drP should be better than brave
        self.assertGreater(drP.fitness(), brave.fitness())

    def test_frequencyResponse_WithOptimusPerceptron(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        oPP = OptimusPerceptron(4)
        self.setUpDecisioner(brave, wimpy, oPP)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            nonAggro = 0 # this means not aggro
            color = [randint(1, 100)]
            monster = Monster(nonAggro, color, 'passive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))
            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        # Dr. Perceptron should do better than brave and wimpy
        # when encountering monsters that repeat with a frequency
        # that is trainable given Dr.Perceptron's window size.
        # In otherwords, Dr. Perceptron trains on a frequency of
        # Monsters but training on a pattern is limited to the
        # input size of Dr. Perceptron (which as of this check-in
        # is 5)
        #
        # We test a staggered input
        for i in range(100):
            aggro = 1
            passive = 0
            evenMonstersPassive = i%2
            color = [randint(1, 100)]
            monster = Monster(evenMonstersPassive, color, 'aggressiveish')

            # Then we will update from this guess
            self.decisioner.update(monster.color, evenMonstersPassive, evenMonstersPassive)
            #print ('i:'+str(i)+' '+str(evenMonstersPassive)+' '+str(oPP.fitness()))

        self.assertGreater(oPP.fitness(), wimpy.fitness())
        self.assertGreater(oPP.fitness(), brave.fitness())

    def test_frequencyResponse_forHarmonic_WithDrPerceptron(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        drP = DrPerceptron()
        self.setUpDecisioner(brave, wimpy, drP)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            nonAggro = 0 # this means not aggro
            color = [randint(1, 100)]
            monster = Monster(nonAggro, color, 'passive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))
            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        # Dr. Perceptron should do better than brave and wimpy
        # when encountering monsters that repeat with a frequency
        # that is trainable given Dr.Perceptron's window size.
        # In otherwords, Dr. Perceptron trains on a frequency of
        # Monsters but training on a pattern is limited to the
        # input size of Dr. Perceptron (which as of this check-in
        # is 5)
        #
        # We test a staggered input with a repeating pattern
        for i in range(1000):
            aggroPattern = [0, 1, 1, 0, 1]
            aggroIdx = i%len(aggroPattern)
            color = [randint(1, 100)]
            monster = Monster(aggroPattern[aggroIdx], color, 'aggressiveish')

            # Then we will update from this guess
            self.decisioner.update(monster.color, aggroPattern[aggroIdx], aggroPattern[aggroIdx])
            # drP should always be better than wimpy
            self.assertGreater(drP.fitness(), wimpy.fitness())

        # after a lot of training for a pattern that is harmonic within the input size Dr.P should beat out brave
        self.assertGreater(drP.fitness(), brave.fitness())
        # Since the pattern is highly regular within the input size, the fitness should be really really close to 1.
        # With great harmony results great trainability and therefore great fitness
        self.assertGreater(drP.fitness(), 0.99)

    def test_frequencyResponse_forHarmonic_WithOptimusPerceptron(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        oPP = OptimusPerceptron()
        self.setUpDecisioner(brave, wimpy, oPP)

        # First we will start with the basic training case
        # which is the first 100 in the range
        for i in range(101):
            nonAggro = 0 # this means not aggro
            color = [randint(1, 100)]
            monster = Monster(nonAggro, color, 'passive')

            # Get the guess from the decisioner for the first 100,
            # we expect every guess to be 1
            self.assertTrue(self.decisioner.get_guess(monster.color))
            # Then we will update from this guess
            self.decisioner.update(monster.color, 1, monster.action(True))

        # Dr. Perceptron should do better than brave and wimpy
        # when encountering monsters that repeat with a frequency
        # that is trainable given Dr.Perceptron's window size.
        # In otherwords, Dr. Perceptron trains on a frequency of
        # Monsters but training on a pattern is limited to the
        # input size of Dr. Perceptron (which as of this check-in
        # is 5)
        #
        # We test a staggered input with a repeating pattern
        for i in range(1000):
            aggroPattern = [0, 1, 1, 0, 1]
            aggroIdx = i%len(aggroPattern)
            color = [randint(1, 100)]
            monster = Monster(aggroPattern[aggroIdx], color, 'aggressiveish')

            # Then we will update from this guess
            self.decisioner.update(monster.color, aggroPattern[aggroIdx], aggroPattern[aggroIdx])
            # drP should always be better than wimpy
            self.assertGreater(oPP.fitness(), wimpy.fitness())

        # after a lot of training for a pattern that is harmonic within the input size Dr.P should beat out brave
        self.assertGreater(oPP.fitness(), brave.fitness())
        # Since the pattern is highly regular within the input size, the fitness should be really really close to 1.
        # With great harmony results great trainability and therefore great fitness
        self.assertGreater(oPP.fitness(), 0.99)

    def test_GroupedAggroByColor_WithWimpyBraveAndKNN(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()
        knn = KNearestNeighbors(3)

        self.setUpDecisioner(brave, wimpy, knn)

        def create_monster():
            color = randint(1, 100)

            # Every monster below 50 is aggressive
            if color < 50:
                return Monster(1, [color], 'aggressive')
            # Otherwise if they are above 50 they are
            # passive
            else:
                return Monster(0, [color], 'passive')

        # Next we load up the training data
        for i in range(100):
            # Create the monster and generate all the data, by default
            # we expect everything in the training period to be true
            # meaning attack for data
            monster = create_monster()
            self.assertTrue(self.decisioner.get_guess(monster.color))
            self.decisioner.update(monster.color, 1, monster.action(True))

        # Finally we need to know that normal KNN was matched as the best
        # fitness for the data set
        self.assertGreater(knn.fitness(), brave.fitness())
        self.assertGreater(knn.fitness(), wimpy.fitness())

        # Then we are going to run over all of the data sets
        # We want to also track the actual value and the maximum
        # value for comparison
        maximum_value = 5000.0
        actual_value = 0.0

        for i in range(5000):
            # Create the monster, guess on it and grab the outcome
            # which is all required information for the loop
            monster = create_monster()
            guess = self.decisioner.get_guess(monster.color)
            outcome = monster.action(guess)

            # update values for comparison after loop
            maximum_value -= monster._aggressive
            actual_value += outcome

            # 
            self.decisioner.update(monster.color, guess, outcome)

            # We need to know that KNN is still our best fit for this
            # data set
            self.assertGreater(knn.fitness(), brave.fitness())
            self.assertGreater(knn.fitness(), wimpy.fitness())

        # Finally we expect that the standard KNN process will obtain
        # within 10 percent margin of the best possible solution
        self.assertGreater(actual_value / maximum_value, 0.9)

    def test_with_all_hypothesis(self):
        brave = BraveHypothesis()
        wimpy = WimpyHypothesis()

        knn3 = KNearestNeighbors(3)
        knn5 = KNearestNeighbors(5)
        knn7 = KNearestNeighbors(7)
        knn11 = KNearestNeighbors(11)

        rando = RandoHypothesis()
        prob = SimpleProbabilityHypothesis()

#        drP3 = OptimusPerceptron(11)
#        drP5 = DrPerceptron(5)
#        drP7 = DrPerceptron(7)
#        drP11 = DrPerceptron(11)
#
        self.setUpDecisioner(brave,
                             wimpy,
                             knn3,
                             knn5,
                             knn7,
                             knn11,
                             rando,
                             prob,
#                             drP3,
#                             drP5,
#                             drP7,
#                             drP11,
                            )

        def create_monster():
            color = randint(1, 100)

            if color < 70:
                if random() >= 0.3:
                    return Monster(0, [color], 'passive')
                return Monster(1, [color], 'aggressive')
            else:
                if random() >= 0.7:
                    return Monster(0, [color], 'passive')
                return Monster(1, [color], 'aggressive')

        for i in range(100):
            monster = create_monster()
            self.decisioner.update(monster.color, 1, monster.action(True))

        maximum_value = 1000.0
        actual_value = 0.0

        for i in range(1000):
            monster = create_monster()

            guess = self.decisioner.get_guess(monster.color)
            outcome = monster.action(guess)
            self.decisioner.update(monster.color, guess, outcome)

            maximum_value -= monster._aggressive
            actual_value += outcome
コード例 #6
0
class DecisionerTest(unittest.TestCase):
    FAKE_VECTOR = [0, 0]
    FAKE_ACTION = 1
    FAKE_OUTCOME = 1

    def setUp(self):
        self.mocks = [
            MockHypothesis(fitness=0.5),
            MockHypothesis(fitness=0.6),
            MockHypothesis(fitness=0.7)
        ]
        self.decisioner = Decisioner(self.mocks, training_window=0)

    def tearDown(self):
        self.mocks = None
        self.decisioner = None

    def test_get_guess(self):
        """Tests the 'get_guess' method of the Decisioner"""

        # Set up the first mock to be selected for the guess
        self.mocks[0]._fitness = 0.71
        self.mocks[0]._next_guess = 'first'

        response = self.decisioner.get_guess(self.FAKE_VECTOR)
        self.assertEqual('first', response)

        # Set up the second mock to be selected for the guess
        self.mocks[1]._fitness = 0.72
        self.mocks[1]._next_guess = 'second'

        response = self.decisioner.get_guess(self.FAKE_VECTOR)
        self.assertEqual('second', response)

        # Set up the third mock to be selected for the guess
        self.mocks[2]._fitness = 0.73
        self.mocks[2]._next_guess = 'third'

        response = self.decisioner.get_guess(self.FAKE_VECTOR)
        self.assertEqual('third', response)

    def test_get_guess_training_window(self):
        """Tests the 'get_guess' method of the decisioner with the
        training window in place"""

        # First we know need to create a new decisioner with a new
        # training window
        decisioner = Decisioner(self.mocks, training_window=100)

        # Then we test that we are receiving true while in the window
        for i in range(100):
            self.assertTrue(decisioner.get_guess(self.FAKE_VECTOR))
            decisioner.update(self.FAKE_VECTOR, self.FAKE_ACTION,
                              self.FAKE_OUTCOME)

        # Finally now that we've exceeded the window we test that we
        # are actually getting hypothesis that are chosen
        self.mocks[0]._fitness = 0.99
        self.mocks[0]._next_guess = 'spam'

        response = decisioner.get_guess(self.FAKE_VECTOR)

        self.assertEqual('spam', response)

    def test_update(self):
        """Tests the 'update' method of the Decisioner"""

        # we simply need to update multiple times. So we try
        # it here 10 times total
        for i in range(10):
            # Call the update method
            self.decisioner.update(self.FAKE_VECTOR, self.FAKE_ACTION,
                                   self.FAKE_OUTCOME)

            # Check that the hypothesis all have been updated
            for hypothesis in self.mocks:
                self.assertEqual(i + 1, hypothesis._times_updated)
コード例 #7
0
class DecisionerTest(unittest.TestCase):
    FAKE_VECTOR = [0, 0]
    FAKE_ACTION = 1
    FAKE_OUTCOME = 1

    def setUp(self):
        self.mocks = [MockHypothesis(fitness=0.5),
                      MockHypothesis(fitness=0.6),
                      MockHypothesis(fitness=0.7)]
        self.decisioner = Decisioner(self.mocks, training_window=0)

    def tearDown(self):
        self.mocks = None
        self.decisioner = None

    def test_get_guess(self):
        """Tests the 'get_guess' method of the Decisioner"""

        # Set up the first mock to be selected for the guess
        self.mocks[0]._fitness = 0.71
        self.mocks[0]._next_guess = 'first'

        response = self.decisioner.get_guess(self.FAKE_VECTOR)
        self.assertEqual('first', response)

        # Set up the second mock to be selected for the guess
        self.mocks[1]._fitness = 0.72
        self.mocks[1]._next_guess = 'second'

        response = self.decisioner.get_guess(self.FAKE_VECTOR)
        self.assertEqual('second', response)

        # Set up the third mock to be selected for the guess
        self.mocks[2]._fitness = 0.73
        self.mocks[2]._next_guess = 'third'

        response = self.decisioner.get_guess(self.FAKE_VECTOR)
        self.assertEqual('third', response)

    def test_get_guess_training_window(self):
        """Tests the 'get_guess' method of the decisioner with the
        training window in place"""

        # First we know need to create a new decisioner with a new
        # training window
        decisioner = Decisioner(self.mocks, training_window=100)

        # Then we test that we are receiving true while in the window
        for i in range(100):
            self.assertTrue(decisioner.get_guess(self.FAKE_VECTOR))
            decisioner.update(self.FAKE_VECTOR, self.FAKE_ACTION, self.FAKE_OUTCOME)

        # Finally now that we've exceeded the window we test that we
        # are actually getting hypothesis that are chosen
        self.mocks[0]._fitness = 0.99
        self.mocks[0]._next_guess = 'spam'

        response = decisioner.get_guess(self.FAKE_VECTOR)

        self.assertEqual('spam', response)

    def test_update(self):
        """Tests the 'update' method of the Decisioner"""

        # we simply need to update multiple times. So we try
        # it here 10 times total
        for i in range(10):
            # Call the update method
            self.decisioner.update(self.FAKE_VECTOR, self.FAKE_ACTION, self.FAKE_OUTCOME)

            # Check that the hypothesis all have been updated
            for hypothesis in self.mocks:
                self.assertEqual(i + 1, hypothesis._times_updated)
コード例 #8
0
 def setUp(self):
     self.mocks = [MockHypothesis(fitness=0.5),
                   MockHypothesis(fitness=0.6),
                   MockHypothesis(fitness=0.7)]
     self.decisioner = Decisioner(self.mocks, training_window=0)