def run_ga(data: tuple): """ Runs the genetic-algorithm. Thread-safe. Args: data (tuple): A tuple of the number of the iteration executing the function, the number of repetitions, the equation to use for the genetic algorithm, the crossover rate, the mutation rate, the population size, the number of generations, and whether to display graphs, and whether the output is verbose. (i.e. (1, 10, Equation, 0.9, 0.001, 100, 1000, True, False)) Returns: (Gene): Returns the fittest gene of the algorithm. """ iteration_number, repetitions, equation, crossover_rate, mutation_rate, population_size, generations, show_display, is_verbose = data f = format('{0:<20} {1}') print('GA %s of %s' % (iteration_number, repetitions)) # Start GA tic = timeit.default_timer() ga = GA(crossover_rate, mutation_rate) population = Population(population_size, equation) population.initialize() if is_verbose: print( f.format('(GA %s) Generation 0: ' % iteration_number, population.fittest.fitness)) generation_ff = [population.fittest.fitness] # Evolve population for g in range(generations): population = ga.evolve(population) if is_verbose: print( f.format('(GA %s) Generation %s: ' % (iteration_number, g + 1), population.fittest.fitness)) generation_ff.append(population.fittest.fitness) toc = timeit.default_timer() # End GA algorithm if is_verbose: print( f.format('(GA %s) Fittest' % iteration_number, population.fittest)) print( f.format('(GA %s) Fittest Fitness.' % iteration_number, population.fittest.fitness)) print( f.format('(GA %s) Time (seconds)' % iteration_number, toc - tic) + '\n') return toc - tic, population.fittest, generation_ff
def execute(self, definitions): training, initialized, pop_size = definitions self.pop = Population(save_size = initialized, population_size=pop_size) self.setupInitialState() if training: self.runTraining() else: self.runTest() self.results()
for i in range(len(x.genes)): weight += x.genes[i][0] value += x.genes[i][1] if weight <= 15: return value return 0 def print_func(x): result = "" count = collections.Counter(x) for repetitions in count.keys(): if count.get(repetitions) != 0 and repetitions[0] != 0: result += str(count.get(repetitions)) + \ "x" + str(repetitions) + ", " return result if __name__ == "__main__": pop = Population(1000, 0.1, fitness_f, print_func) pop.generate_individuals(gene_f, 15) y1, y2, y3 = pop.evolve(iterations=ITERATIONS) plt.plot(range(pop.generation - 1), y1) plt.plot(range(pop.generation - 1), y2) plt.plot(range(pop.generation - 1), y3) plt.legend(["minimum fitness", "average fitness", "maximum fitness"], loc=1) plt.ylabel("fitness") plt.xlabel("generation") plt.show()
ITERATIONS = 50 """search for the binary string specified here""" word = list('0010101011010001001011') def gene_f(): """a gene is represented with a 1 or 0""" return random.choice(['0', '1']) def fitness_f(x): """the fitness function compares the position of each gene with the optimal""" n = 0 for i in range(len(x.genes)): if word[i] == x.genes[i]: n += 1 return n def print_func(x): result = "" for gene in x: result += gene return result + ", " if __name__ == "__main__": pop = Population(100, 0.01, fitness_f, print_func) pop.generate_individuals(gene_f, len(word)) pop.evolve(iterations=ITERATIONS, fitness_limit=len(word))
class JM17290Bot(Bot): """ This not (jm17290_ea.JM17290Bot) is used during optimization and hence contains code to facilitate this. This bot should NOT be run during a competition. use jm17290.JM17290(Bot) instead, which has hard-coded weights for expert rules set through optimization. """ def select(self, players, count): """ Select players on an individual basis using weighted expert rule phenotypes 'res_selection' and 'spy_selection' for Resistance members and spies respectively. If weighted-expert selection rules cannot choose enough suitable players to go on the mission then the remaining slots are populated randomly.""" maybe = [] selection = [self] # Resistance and Spy selecting self by default. for p in self.others(): self.to_select = p if len(selection) < count: if not self.spy: if self.res_select.run(): selection.append(p) else: maybe.append(p) else: if self.spy_selection.run(): selection.append(p) else: maybe.append(p) while len(selection) < count: p = random.choice(maybe) selection.append(p) maybe.remove(p) return selection def sabotage(self): """ Resistance members cannot sabotage a mission. Spy sabotage is determined by 'spy_sabotage' weighted expert rules phenotype.""" if not self.spy: return False else: return self.spy_sabotage.run() def vote(self, team): """ Vote for Resistance and Spies is determined by 'res_vote' and 'spy_vote' weighted expert rule phenotypes respectively. """ if not self.spy: return self.res_vote.run() else: return self.spy_vote.run() def onGameRevealed(self, players, spies): """ Create phenotypes that will be used for this games by acquiring them randomly from persistent storage. Resistance play is made up of two phenotypes: Select and Vote. Spy play is made up of three phenotypes: Select, Vote and Sabotage. Phenotypes are executed and return a boolean decision based on the weights of the active expert rules at the current point in the game. """ self.root = 'jm17290-src/' self.population = Population() self.gen = self.population.gen(self.root) self.population_size = self.population.size(self.root) self.perfect_record = set(self.others()) self.unselected = set(self.others()) self.failed_one_mission = set() self.failed_two_missions = set() if not self.spy: # RESISTANCE. self.res_genotype = self.population.genotype( random.randint(0, self.population_size - 1), self.gen, ResGenotype.Type, self.root) # SELECT - Phenotype self.res_select = Phenotype( # Yes. # Select player with perfect record. # Select previously unselected player. # Forgive one mission fail. # Default up vote to avoid deadlock. [(lambda: self.to_select in self.perfect_record, self.res_genotype[ResGenotype.PickYesPerfectRecord]), (lambda: self.to_select in self.unselected, self.res_genotype[ResGenotype.PickYesUntested]), (lambda: self.to_select in self.failed_one_mission, self.res_genotype[ResGenotype.PickYesFailed1Mission]), (lambda: True, self.res_genotype[ResGenotype.PickYesDefault]) ], # No. # Player failed one mission. # Player failed two missions. # Default up vote to avoid deadlock. [(lambda: self.to_select in self.failed_one_mission, self.res_genotype[ResGenotype.PickNoFailed1Mission]), (lambda: self.to_select in self.failed_two_missions, self.res_genotype[ResGenotype.PickNoFailed2Missions]), (lambda: True, self.res_genotype[ResGenotype.PickNoDefault])]) # VOTE - Phenotype. self.res_vote = Phenotype( # Yes. # Vote last attempt to save the mission. # Vote for the first mission. # Approve own mission. # Default vote up to avoid deadlock. [(lambda: self.game.tries == 5, self.res_genotype[ResGenotype.VoteYesAttempt5]), (lambda: self.game.turn == 1, self.res_genotype[ResGenotype.VoteYesMission1]), (lambda: self.game.leader == self, self.res_genotype[ResGenotype.VoteYesLeader]), (lambda: True, self.res_genotype[ResGenotype.VoteYesDefault]) ], # No. # Reject if left out of team of three. # Reject if team member failed one mission. # Reject if team member failed two missions. # Default vote up to avoid deadlock. [(lambda: len(self.game.team) == 3 and not bool( {self}.intersection(set(self.game.team))), self.res_genotype[ResGenotype.VoteNoNotIncludedTeam3]), (lambda: bool( set(self.game.team).intersection(self.failed_one_mission) ), self.res_genotype[ResGenotype.VoteNoFailed1Mission]), (lambda: bool( set(self.game.team).intersection(self.failed_two_missions) ), self.res_genotype[ResGenotype.VoteNoFailed2Missions]), (lambda: True, self.res_genotype[ResGenotype.VoteNoDefault])]) else: # SPY self.spies = spies self.spy_genotype = self.population.genotype( random.randint(0, self.population_size - 1), self.gen, SpyGenotype.Type, self.root) # SELECT - Phenotype self.spy_selection = Phenotype( # Yes. # Select player with a perfect record. # Select previously unselected player. # Default vote up to avoid deadlock. # Picked self, don't pick another spy. # Don't pick VERY suspicious player. [(lambda: self.to_select in self.perfect_record, self.spy_genotype[SpyGenotype.PickYesPerfectRecord]), (lambda: self.to_select in self.unselected, self.spy_genotype[SpyGenotype.PickYesUntested]), (lambda: True, self.spy_genotype[SpyGenotype.PickYesDefault]) ], # No. # Picked self, don't pick another spy. # Don't pick VERY suspicious player. # Default vote up to avoid deadlock. [(lambda: self.to_select in self.spies, self.spy_genotype[SpyGenotype.PickNoAreSpy]), (lambda: self.to_select in self.failed_two_missions, self.spy_genotype[SpyGenotype.PickNoFailed2Missions]), (lambda: True, self.spy_genotype[SpyGenotype.PickNoDefault])]) # VOTE - Phenotype. self.spy_vote = Phenotype( # Yes. # Vote for final try to avoid detection. # Vote for first mission, pretend nothing known. # Vote for own mission. # Support spy on mission. # Support the win. # Vote up to avoid deadlock. [(lambda: self.game.tries == 5, self.spy_genotype[SpyGenotype.VoteYesAttempt5]), (lambda: self.game.turn == 1, self.spy_genotype[SpyGenotype.VoteYesMission1]), (lambda: self.game.leader == self, self.spy_genotype[SpyGenotype.VoteYesLeader]), (lambda: bool( set(self.game.team).intersection(set(self.spies))), self.spy_genotype[SpyGenotype.VoteYesAtLeastOneSpy]), (lambda: bool( set(self.game.team).intersection(set(self.spies)) ) and self.game.losses == 2, self.spy_genotype[SpyGenotype.VoteYesAtLeastOneSpyOneToWin]), (lambda: True, self.spy_genotype[SpyGenotype.VoteYesDefault]) ], # No. # Not included in team of three. # Vote against team with suspicious member to avoid detection. # Vote against team with VERY suspicious member to avoid detection. # Vote against a team with no spies. # Vote against a team of all spies. # Default vote against to avoid deadlock. [(lambda: len(self.game.team) == 3 and not bool( {self}.intersection(set(self.game.team))), self.spy_genotype[SpyGenotype.VoteNoNotIncludedTeam3]), (lambda: bool( set(self.game.team).intersection(self.failed_one_mission) ), self.spy_genotype[SpyGenotype.VoteNoFailed1Mission]), (lambda: bool( set(self.game.team).intersection(self.failed_two_missions) ), self.spy_genotype[SpyGenotype.VoteNoFailed2Missions]), (lambda: not bool( set(self.game.team).intersection(set(self.spies))), self.spy_genotype[SpyGenotype.VoteNoNoSpies]), (lambda: set(self.game.team) == set(self.spies), self.spy_genotype[SpyGenotype.VoteNoAllSpies]), (lambda: True, self.spy_genotype[SpyGenotype.VoteNoDefault])]) # SABOTAGE - Phenotype. self.spy_sabotage = Phenotype( # Yes. # Sabotage to win the game. # Sabotage own mission. # Sabotage if only spy on the mission. # Default vote up to avoid deadlock. [(lambda: self.game.losses == 2, self.spy_genotype[SpyGenotype.SabotageYesToWin]), (lambda: self.game.leader == self, self.spy_genotype[SpyGenotype.SabotageYesLeader]), (lambda: {self} == ({self}.intersection(set(self.game.team))), self.spy_genotype[SpyGenotype.SabotageYesOnlySpyOnMission]), (lambda: True, self.spy_genotype[SpyGenotype.SabotageYesDefault])], # No. # Do not sabotage a game with all spies. # Do not sabotage the first mission. # Default vote up to avoid deadlock. [(lambda: set(self.game.team) == set(self.spies), self.spy_genotype[SpyGenotype.SabotageNoAllSpies]), (lambda: self.game.turn == 1, self.spy_genotype[SpyGenotype.SabotageNoMission1]), (lambda: True, self.spy_genotype[SpyGenotype.SabotageNoDefault])]) def onTeamSelected(self, leader, team): """ Note players who have been on a mission. """ for p in team: self.unselected.discard(p) def onMissionComplete(self, sabotaged): """ Note players who have lost a perfect record, failed 1 mission, or failed two missions. """ if sabotaged: for p in self.game.team: self.perfect_record.discard(p) if p in self.failed_one_mission: # Note players who've failed one mission. self.failed_two_missions.add( p) # Note players who've failed two missions. self.failed_one_mission.add(p) def onGameComplete(self, win, spies): """ Update the genotype with the outcome of the game and persist the result. """ if not self.spy: self.res_genotype.update(win) self.population.update_genotype(self.res_genotype, self.gen, self.root) else: self.spy_genotype.update(win) self.population.update_genotype(self.spy_genotype, self.gen, self.root)
def onGameRevealed(self, players, spies): """ Create phenotypes that will be used for this games by acquiring them randomly from persistent storage. Resistance play is made up of two phenotypes: Select and Vote. Spy play is made up of three phenotypes: Select, Vote and Sabotage. Phenotypes are executed and return a boolean decision based on the weights of the active expert rules at the current point in the game. """ self.root = 'jm17290-src/' self.population = Population() self.gen = self.population.gen(self.root) self.population_size = self.population.size(self.root) self.perfect_record = set(self.others()) self.unselected = set(self.others()) self.failed_one_mission = set() self.failed_two_missions = set() if not self.spy: # RESISTANCE. self.res_genotype = self.population.genotype( random.randint(0, self.population_size - 1), self.gen, ResGenotype.Type, self.root) # SELECT - Phenotype self.res_select = Phenotype( # Yes. # Select player with perfect record. # Select previously unselected player. # Forgive one mission fail. # Default up vote to avoid deadlock. [(lambda: self.to_select in self.perfect_record, self.res_genotype[ResGenotype.PickYesPerfectRecord]), (lambda: self.to_select in self.unselected, self.res_genotype[ResGenotype.PickYesUntested]), (lambda: self.to_select in self.failed_one_mission, self.res_genotype[ResGenotype.PickYesFailed1Mission]), (lambda: True, self.res_genotype[ResGenotype.PickYesDefault]) ], # No. # Player failed one mission. # Player failed two missions. # Default up vote to avoid deadlock. [(lambda: self.to_select in self.failed_one_mission, self.res_genotype[ResGenotype.PickNoFailed1Mission]), (lambda: self.to_select in self.failed_two_missions, self.res_genotype[ResGenotype.PickNoFailed2Missions]), (lambda: True, self.res_genotype[ResGenotype.PickNoDefault])]) # VOTE - Phenotype. self.res_vote = Phenotype( # Yes. # Vote last attempt to save the mission. # Vote for the first mission. # Approve own mission. # Default vote up to avoid deadlock. [(lambda: self.game.tries == 5, self.res_genotype[ResGenotype.VoteYesAttempt5]), (lambda: self.game.turn == 1, self.res_genotype[ResGenotype.VoteYesMission1]), (lambda: self.game.leader == self, self.res_genotype[ResGenotype.VoteYesLeader]), (lambda: True, self.res_genotype[ResGenotype.VoteYesDefault]) ], # No. # Reject if left out of team of three. # Reject if team member failed one mission. # Reject if team member failed two missions. # Default vote up to avoid deadlock. [(lambda: len(self.game.team) == 3 and not bool( {self}.intersection(set(self.game.team))), self.res_genotype[ResGenotype.VoteNoNotIncludedTeam3]), (lambda: bool( set(self.game.team).intersection(self.failed_one_mission) ), self.res_genotype[ResGenotype.VoteNoFailed1Mission]), (lambda: bool( set(self.game.team).intersection(self.failed_two_missions) ), self.res_genotype[ResGenotype.VoteNoFailed2Missions]), (lambda: True, self.res_genotype[ResGenotype.VoteNoDefault])]) else: # SPY self.spies = spies self.spy_genotype = self.population.genotype( random.randint(0, self.population_size - 1), self.gen, SpyGenotype.Type, self.root) # SELECT - Phenotype self.spy_selection = Phenotype( # Yes. # Select player with a perfect record. # Select previously unselected player. # Default vote up to avoid deadlock. # Picked self, don't pick another spy. # Don't pick VERY suspicious player. [(lambda: self.to_select in self.perfect_record, self.spy_genotype[SpyGenotype.PickYesPerfectRecord]), (lambda: self.to_select in self.unselected, self.spy_genotype[SpyGenotype.PickYesUntested]), (lambda: True, self.spy_genotype[SpyGenotype.PickYesDefault]) ], # No. # Picked self, don't pick another spy. # Don't pick VERY suspicious player. # Default vote up to avoid deadlock. [(lambda: self.to_select in self.spies, self.spy_genotype[SpyGenotype.PickNoAreSpy]), (lambda: self.to_select in self.failed_two_missions, self.spy_genotype[SpyGenotype.PickNoFailed2Missions]), (lambda: True, self.spy_genotype[SpyGenotype.PickNoDefault])]) # VOTE - Phenotype. self.spy_vote = Phenotype( # Yes. # Vote for final try to avoid detection. # Vote for first mission, pretend nothing known. # Vote for own mission. # Support spy on mission. # Support the win. # Vote up to avoid deadlock. [(lambda: self.game.tries == 5, self.spy_genotype[SpyGenotype.VoteYesAttempt5]), (lambda: self.game.turn == 1, self.spy_genotype[SpyGenotype.VoteYesMission1]), (lambda: self.game.leader == self, self.spy_genotype[SpyGenotype.VoteYesLeader]), (lambda: bool( set(self.game.team).intersection(set(self.spies))), self.spy_genotype[SpyGenotype.VoteYesAtLeastOneSpy]), (lambda: bool( set(self.game.team).intersection(set(self.spies)) ) and self.game.losses == 2, self.spy_genotype[SpyGenotype.VoteYesAtLeastOneSpyOneToWin]), (lambda: True, self.spy_genotype[SpyGenotype.VoteYesDefault]) ], # No. # Not included in team of three. # Vote against team with suspicious member to avoid detection. # Vote against team with VERY suspicious member to avoid detection. # Vote against a team with no spies. # Vote against a team of all spies. # Default vote against to avoid deadlock. [(lambda: len(self.game.team) == 3 and not bool( {self}.intersection(set(self.game.team))), self.spy_genotype[SpyGenotype.VoteNoNotIncludedTeam3]), (lambda: bool( set(self.game.team).intersection(self.failed_one_mission) ), self.spy_genotype[SpyGenotype.VoteNoFailed1Mission]), (lambda: bool( set(self.game.team).intersection(self.failed_two_missions) ), self.spy_genotype[SpyGenotype.VoteNoFailed2Missions]), (lambda: not bool( set(self.game.team).intersection(set(self.spies))), self.spy_genotype[SpyGenotype.VoteNoNoSpies]), (lambda: set(self.game.team) == set(self.spies), self.spy_genotype[SpyGenotype.VoteNoAllSpies]), (lambda: True, self.spy_genotype[SpyGenotype.VoteNoDefault])]) # SABOTAGE - Phenotype. self.spy_sabotage = Phenotype( # Yes. # Sabotage to win the game. # Sabotage own mission. # Sabotage if only spy on the mission. # Default vote up to avoid deadlock. [(lambda: self.game.losses == 2, self.spy_genotype[SpyGenotype.SabotageYesToWin]), (lambda: self.game.leader == self, self.spy_genotype[SpyGenotype.SabotageYesLeader]), (lambda: {self} == ({self}.intersection(set(self.game.team))), self.spy_genotype[SpyGenotype.SabotageYesOnlySpyOnMission]), (lambda: True, self.spy_genotype[SpyGenotype.SabotageYesDefault])], # No. # Do not sabotage a game with all spies. # Do not sabotage the first mission. # Default vote up to avoid deadlock. [(lambda: set(self.game.team) == set(self.spies), self.spy_genotype[SpyGenotype.SabotageNoAllSpies]), (lambda: self.game.turn == 1, self.spy_genotype[SpyGenotype.SabotageNoMission1]), (lambda: True, self.spy_genotype[SpyGenotype.SabotageNoDefault])])
# J.Madge 05.11.2017 'promote' and 'tournament_num' must add up to the population size. promote = 3 # J.Madge 05.11.2017 Number of genes to automatically advance to the next generation. tournament_num = 17 # J.Madge 05.11.2017 Number of tournaments to select remaining member of the population. tournament_size = 4 # J.Madge 05.11.2017 Number of genes to be randomly selected for each tournament. # J.Madge 05.11.2017 Cross at points to share selection/vote/sabotage strategies. res_cross_pos = [7, 15] spy_cross_pos = [6, 18, 25] res_mutations = 3 # J.Madge 05.11.2017 Number of mutations in child Resistance genes. spy_mutations = 5 # J.Madge 05.11.2017 Number of mutations in child Spy genes. mutation_lower = 0.8 # J.Madge 05.11.2017 Lower limit of the mutation operator. mutation_upper = 1.2 # J.Madge 05.11.2017 Upper limit of the mutation operator. p = Population() current_gen = p.gen() next_gen = current_gen + 1 # J.Madge 06.11.2017 Select the best Resistance and best Spy to represent this generation. best_res = p.top(1, current_gen, ResGenotype.Type)[0] best_res.set_index(current_gen) p.insert(best_res, 'Best') best_spy = p.top(1, current_gen, SpyGenotype.Type)[0] best_spy.set_index(current_gen) p.insert(best_spy, 'Best') # J.Madge 05.11.2017 Create database for the next generation. p.create(next_gen) # J.Madge 05.11.2017 RESISTANCE: Create the next generation.
from genetic_algorithm import Population initial_generation = 0 # J.Madge 05.11.2017 Number of the initial generation. population_size = 20 # J.Madge 05.11.2017 Number of genotypes in the population. initialise_lower_bound = 0.4 # J.Madge 05.11.2017 Lowest initial values a gene can be initialized to. initialise_upper_bound = 0.6 # J.Madge 05.11.2017 Highest initial values a gene can be initialized to. p = Population() # J.Madge 05.11.2017 Set the population state (size of the population, number of the initial generation). p.create_state() p.set_size(population_size) p.set_gen(initial_generation) # J.Madge 05.11.2017 Create the initial generation and initialize the contained genotypes to the lower and upper bounds. p.create(initial_generation) p.create('Best') p.initialise(initial_generation, initialise_lower_bound, initialise_upper_bound)
"""how a mutation on a parameter can be definied""" return random.uniform(a,b) #------------------------------------Data--------------------------------------# hours = [ 0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75, 5.00, 5.50 ] passed = [ 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1] if __name__ == "__main__": # initialize parameters p_size = 40 param_bounds = [-10, 10] n_params = 2 per_p = 0.3 per_k = 0.5 per_s = 0.1 p_mute = 0.05 # begin optimizations p = Population(p_size, loss_function, generate_individual, mutate) p.generate_population(*param_bounds, n_params) p.set_train(passed, hours) for i in range(1000): p.eval_pop_fitness() p.next_gen(per_p, per_s, per_k, *param_bounds, n_params) p.mutate(p_mute,*param_bounds) print("Average fitness: ", p.average_fitness()) print("Final Parameters: ", p.best_fit())
class Casino: NO_ITERATIONS = 1000 NO_GENERATIONS = 50 TEST_ITERATIONS = 100 INITIAL_ITERATIONS = 1000 DATA_LEN = 1000 pop = None rng = 0 KEYS = ["-training", "-pop_size", "-initializion_size"] PAYOUTS = {5: 24.6250, 6: 19.7, 7: 16.4166, 8: 14.0714, 9: 12.3125, 10: 10.9444, 11: 9.8500, 12: 8.9545, 13: 8.2083, 14: 7.5769, 15: 7.0357, 16: 6.5666, 17: 6.1562, 18: 5.7941, 19: 5.4722, 20: 5.1842, 21: 4.9250, 22: 4.6904, 23: 4.4772, 24: 4.2826, 25: 4.1041, 26: 3.9400, 27: 3.7884, 28: 3.6481, 29: 3.5178, 30: 3.3965, 31: 3.2833, 32: 3.1774, 33: 3.0781, 34: 2.9848, 35: 2.8970, 36: 2.8142, 37: 2.7361, 38: 2.6621, 39: 2.5921, 40: 2.5256, 41: 2.4625, 42: 2.4024, 43: 2.3452, 44: 2.2906, 45: 2.2386, 46: 2.1888, 47: 2.1413, 48: 2.0957, 49: 2.0520, 50: 2.0102, 51: 1.9700, 52: 1.9213, 53: 1.8942, 54: 1.8584, 55: 1.8240, 56: 1.7909, 57: 1.7589, 58: 1.7280, 59: 1.6982, 60: 1.6694, 61: 1.6416, 62: 1.6147, 63: 1.5887, 64: 1.5634, 65: 1.5390, 66: 1.5153, 67: 1.4924, 68: 1.4701, 69: 1.4485, 70: 1.4275, 71: 1.4071, 72: 1.3873, 73: 1.3680, 74: 1.3493, 75: 1.3310, 76: 1.3133, 77: 1.2960, 78: 1.2792, 79: 1.2628, 80: 1.2468, 81: 1.2312, 82: 1.2160, 83: 1.2012, 84: 1.1867, 85: 1.1726, 86: 1.1588, 87: 1.1453, 88: 1.1321, 89: 1.1193, 90: 1.1067, 91: 1.0944, 92: 1.0824, 93: 1.0706, 94: 1.0591, 95: 1.0478, 96: 1.0368} def __init__(self, args): definitions = self.definitions(args) self.execute(definitions) def definitions(self, args): definitions = [] if args.training == None: training = "" while training not in ["t", "e"]: training = input("enter t for training or e for examination/test") training = training == "t" else: training = args.training if args.initialized == None: num_ais = -1 while num_ais < 0 or num_ais > 10: num_ais = int(input("how many ais you want to already be initalized")) else: initialized = args.initialized if args.pop_size == None: pop_size = -1 while pop_size < 0 or pop_size > 100: pop_size = int(input("How many ais the population should be made of")) else: pop_size = args.pop_size return [training, initialized, pop_size] def execute(self, definitions): training, initialized, pop_size = definitions self.pop = Population(save_size = initialized, population_size=pop_size) self.setupInitialState() if training: self.runTraining() else: self.runTest() self.results() def setupInitialState(self): self.data_results = Stats(self.DATA_LEN) print('Setting initial state') for _ in range(self.INITIAL_ITERATIONS): self.getRandom() self.data_results.add(self.rng) print('Initial state set') def runTraining(self): for i in range(self.NO_GENERATIONS): print('Generation', i+1 ) self.runGeneration() self.pop.savePop() def runTest(self): for _ in range(self.TEST_ITERATIONS): self.runAis() self.pop.fitness() def runGeneration(self): for _ in range(self.NO_ITERATIONS): self.runAis() self.pop.updateAis() def runAis(self): self.getRandom() self.updateStats() self.game_state = self.getGameState() ''' #Multiprocessing pool = mp.Pool(mp.cpu_count()) pool.map_async(self.handleAiBet, [ai for ai in self.pop.population]) pool.close() pool.join() ''' #No multiprocessing self.handleAllBet() def handleAllBet(self): for ai in self.pop.population: self.handleAiBet(ai) def handleAiBet(self, ai): bet = ai.askForBet(self.game_state[:]) profit = self.decideBet(bet) ai.betResult(profit) def decideBet(self, bet): number = bet[0] wage = bet[1] lower = bet[2] if lower > 0: # We win if obtain a lower number return -wage if number >= self.rng else (self.PAYOUTS[number]-1) * wage return -wage if number <= self.rng else (self.PAYOUTS[number] - 1) * wage def getRandom(self): self.rng = random.randint(1, 100) def getGameState(self): return [self.data_results.getAverage(), self.data_results.getStdev(), self.data_results.getSkew()] def updateStats(self): self.data_results.add(self.rng) # Sets of stats that are kept about the progress of the chain of RNGs def results(self): print("Finished execution") print([x.balance for x in self.pop.population]) #input() #print([x.record_number for x in self.pop.population]) #input() #print([x.record_amount for x in self.pop.population]) plt.plot([i for i in range(len(self.pop.population[0].record_balance))], np.transpose([x.record_balance for x in self.pop.population])) plt.show() input("Enter any string to continue")