def test_AllPassiveMonsters_WithWimpyAndBrave(self): brave = BraveHypothesis() wimpy = WimpyHypothesis() self.setUpDecisioner(brave, wimpy) # First we will start with the basic training case # which is the first 100 in the range for i in range(101): monster = Monster(0, [randint(1, 100)], 'passive') # Get the guess from the decisioner for the first 100, # we expect every guess to be 1 self.assertTrue(self.decisioner.get_guess(monster.color)) # Then we will update from this guess self.decisioner.update(monster.color, 1, monster.action(True)) for i in range(5000): monster = Monster(0, [randint(1, 100)], 'aggressive') # Get the guess from the decisioner for the next 5000, # We expect all of them to be wimpy and thus not attack self.assertTrue(self.decisioner.get_guess(monster.color)) # Then we will update from this guess self.decisioner.update(monster.color, 1, 1) # Finally we know that the wimpy hypothesis should always have # a greater fitness than the brave for each iteration self.assertGreater(brave.fitness(), wimpy.fitness())
def test_AllPassiveMonsters_WithRando(self): wimpy = WimpyHypothesis() rando = RandoHypothesis() self.setUpDecisioner(wimpy, rando) # First we will start with the basic training case # which is the first 100 in the range for i in range(101): monster = Monster(0, [randint(1, 100)], 'passive') # Get the guess from the decisioner for the first 100, # we expect every guess to be 1 self.assertTrue(self.decisioner.get_guess(monster.color)) # Then we will update from this guess self.decisioner.update(monster.color, 1, monster.action(True)) for i in range(5000): monster = Monster(0, [randint(1, 100)], 'aggressive') self.decisioner.get_guess(monster.color) # Then we will update from this guess self.decisioner.update(monster.color, 1, 1) # Flipping a coin is better than doing nothing self.assertGreater(rando.fitness(), wimpy.fitness()) # Randomness should be near-even in distribution self.assertGreater(rando.fitness(), 0.4) self.assertGreater(0.6, rando.fitness())
def test_simpleResponse_WithDrPerceptron(self): brave = BraveHypothesis() wimpy = WimpyHypothesis() drP = DrPerceptron() self.setUpDecisioner(brave, wimpy, drP) # First we will start with the basic training case # which is the first 100 in the range for i in range(101): nonAggro = 0 # this means not aggro color = [randint(1, 100)] monster = Monster(nonAggro, color, 'passive') # Get the guess from the decisioner for the first 100, # we expect every guess to be 1 self.assertTrue(self.decisioner.get_guess(monster.color)) # Then we will update from this guess self.decisioner.update(monster.color, 1, monster.action(True)) # DrPerceptron should converge pretty quickly and be better than wimpy but not as good as brave for i in range(100): aggro = 1 # this means aggro color = [randint(1, 100)] monster = Monster(aggro, color, 'aggressive') # Then we will update from this guess self.decisioner.update(monster.color, 1, 1) self.assertGreater(drP.fitness(), wimpy.fitness()) self.assertGreater(brave.fitness(), drP.fitness())
def generate_monster(): color = randint(1, 100) # All colors less than 50 are passive if color < 50: return Monster(0, [color], 'passive') # Otherwise the monster is aggresive return Monster(1, [color], 'aggressive')
def create_monster(): color = randint(1, 100) # Every monster below 50 is aggressive if color < 50: return Monster(1, [color], 'aggressive') # Otherwise if they are above 50 they are # passive else: return Monster(0, [color], 'passive')
def create_monster(): color = randint(1, 100) if color < 70: if random() < 0.3: return Monster(1, [color], 'aggressive') return Monster(0, [color], 'passive') else: if random() < 0.7: return Monster(1, [color], 'aggressive') return Monster(0, [color], 'passive')
def test_frequencyResponse_forHarmonic_WithOptimusPerceptron(self): brave = BraveHypothesis() wimpy = WimpyHypothesis() oPP = OptimusPerceptron() self.setUpDecisioner(brave, wimpy, oPP) # First we will start with the basic training case # which is the first 100 in the range for i in range(101): nonAggro = 0 # this means not aggro color = [randint(1, 100)] monster = Monster(nonAggro, color, 'passive') # Get the guess from the decisioner for the first 100, # we expect every guess to be 1 self.assertTrue(self.decisioner.get_guess(monster.color)) # Then we will update from this guess self.decisioner.update(monster.color, 1, monster.action(True)) # Dr. Perceptron should do better than brave and wimpy # when encountering monsters that repeat with a frequency # that is trainable given Dr.Perceptron's window size. # In otherwords, Dr. Perceptron trains on a frequency of # Monsters but training on a pattern is limited to the # input size of Dr. Perceptron (which as of this check-in # is 5) # # We test a staggered input with a repeating pattern for i in range(1000): aggroPattern = [0, 1, 1, 0, 1] aggroIdx = i%len(aggroPattern) color = [randint(1, 100)] monster = Monster(aggroPattern[aggroIdx], color, 'aggressiveish') # Then we will update from this guess self.decisioner.update(monster.color, aggroPattern[aggroIdx], aggroPattern[aggroIdx]) # drP should always be better than wimpy self.assertGreater(oPP.fitness(), wimpy.fitness()) # after a lot of training for a pattern that is harmonic within the input size Dr.P should beat out brave self.assertGreater(oPP.fitness(), brave.fitness()) # Since the pattern is highly regular within the input size, the fitness should be really really close to 1. # With great harmony results great trainability and therefore great fitness self.assertGreater(oPP.fitness(), 0.99)
def test_frequencyResponse_WithDrPerceptron(self): brave = BraveHypothesis() wimpy = WimpyHypothesis() drP = DrPerceptron() self.setUpDecisioner(brave, wimpy, drP) # First we will start with the basic training case # which is the first 100 in the range for i in range(101): nonAggro = 0 # this means not aggro color = [randint(1, 100)] monster = Monster(nonAggro, color, 'passive') # Get the guess from the decisioner for the first 100, # we expect every guess to be 1 self.assertTrue(self.decisioner.get_guess(monster.color)) # Then we will update from this guess self.decisioner.update(monster.color, 1, monster.action(True)) # Dr. Perceptron should do better than brave and wimpy # when encountering monsters that repeat with a frequency # that is trainable given Dr.Perceptron's window size. # In otherwords, Dr. Perceptron trains on a frequency of # Monsters but training on a pattern is limited to the # input size of Dr. Perceptron (which as of this check-in # is 5) # # We test a staggered input for i in range(10): aggro = 1 # this means aggro passive = 0 # this means aggro evenMonstersPassive = i%2 color = [randint(1, 100)] monster = Monster(evenMonstersPassive, color, 'aggressiveish') # Then we will update from this guess self.decisioner.update(monster.color, evenMonstersPassive, evenMonstersPassive) # drP should always be better than wimpy self.assertGreater(drP.fitness(), wimpy.fitness()) # after 2 sets of 5 inputs drP should be better than brave self.assertGreater(drP.fitness(), brave.fitness())
def create_monster(p): if random() <= p: return Monster(0, [randint(1, 100)], 'passive') return Monster(1, [randint(1, 100)], 'aggressive')