def run(): # load the config file local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, my_config) config = neat.Config(AgentGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) # uses the extended NEAT population PopulationSyn that synchronizes with singularity pop = neat.Population(config) # add reporters stats = neat.StatisticsReporter() pop.add_reporter(stats) pop.add_reporter(neat.StdOutReporter(True)) # save a checkpoint every 100 generations or 900 seconds. rep = neat.Checkpointer(100, 900) pop.add_reporter(rep) # class for evaluating the population ec = GenomeEvaluator(ts_f, vs_f) # initializes genomes fitness and gen_best just for the first time for g in itervalues(pop.population): g.fitness = -10000000.0 ec.genomes_h.append(g) gen_best = g # initializations avg_score_v = -10000000.0 avg_score_v_ant = avg_score_v avg_score = avg_score_v iteration_counter = 0 best_fitness = -2000.0 pop_size = len(pop.population) # sets the nuber of continuous iterations num_iterations = round(200 / len(pop.population)) + 1 # repeat NEAT iterations until solved or keyboard interrupt while 1: try: # if it is not the first iteration calculate training and validation scores if iteration_counter > 0: avg_score = ec.training_validation_score(gen_best, config) # if it is not the first iteration if iteration_counter >= 0: # synchronizes with singularity migrating maximum 3 specimens # pop.syn_singularity(4, my_url, stats,avg_score,rep.current_generation, config, ec.genomes_h) pop.species.speciate(config, pop.population, pop.generation) print("\nSpeciation after migration done") # perform pending evaluations on the singularity network, max 2 #pop.evaluate_pending(2) #increment iteration counter iteration_counter = iteration_counter + 1 # execute num_iterations consecutive iterations of the NEAT algorithm gen_best = pop.run(ec.evaluate_genomes, 2) # verify the training score is enough to stop the NEAT algorithm: TODO change to validation score when generalization is ok if avg_score < 2000000000: solved = False if solved: print("Solved.") # save the winners. for n, g in enumerate(best_genomes): name = 'winner-{0}'.format(n) with open(name + '.pickle', 'wb') as f: pickle.dump(g, f) break except KeyboardInterrupt: print("User break.") break env.close()
def run(config_file): # Load configuration. config = neat.Config( neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file, ) # Create the population, which is the top-level object for a NEAT run. p = neat.Population(config) # Add a stdout reporter to show progress in the terminal. p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) # Run for up to 300 generations. pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome) winner = p.run(pe.evaluate, runs) winner_net = neat.nn.FeedForwardNetwork.create(winner, config) outputs = [] for xi, xo in zip(x_inputs, x_outputs): output = winner_net.activate(xi) outputs.append(output) print("input {!r}, expected output {!r}, got {!r}".format( xi, xo, output)) xi = tuple([0, 0]) xo = [tuple([0])] output = winner_net.activate(xi) print("input {!r}, expected output {!r}, got {!r}".format(xi, xo, output)) visualize.draw_net(config, winner, True) visualize.plot_stats(stats, ylog=False, view=True) visualize.plot_species(stats, view=True) fig = plt.figure(figsize=(16, 9)) ax = fig.gca(projection='3d') # Make data. X = np.arange(-32.768, 32.768, 0.25) Y = np.arange(-32.768, 32.768, 0.25) X, Y = np.meshgrid(X, Y) OZ = [] for i in range(X.shape[0]): for j in range(X.shape[1]): a = np.array([X[i][j] / 32.768, Y[i][j] / 32.768]) OZ.append(winner_net.activate(tuple(a))) OZ = np.array(OZ).reshape(X.shape) Z = OZ * 24 # Plot the surface. surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False, alpha=0.4) # Customize the z axis. ax.set_zlim(-1.01, 25) ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) outputs = [] for xi in x_inputs: output = winner_net.activate(xi) outputs.append(output) x = [] y = [] z = [] for i in range(1000): x.append(x_inputs[i][0] * 32.768) y.append(x_inputs[i][1] * 32.768) z.append(outputs[i][0] * 24) ax.scatter(x, y, z, c='k', marker='o') # Add a color bar which maps values to colors. fig.colorbar(surf, shrink=0.5, aspect=5) plt.savefig("fitting_ackley2") return winner
def prescribe(start_date_str: str, end_date_str: str, path_to_prior_ips_file: str, path_to_cost_file: str, output_file_path) -> None: start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d') end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d') # Load historical data with basic preprocessing print("Loading historical data...") df = prepare_historical_df() # Restrict it to dates before the start_date df = df[df['Date'] <= start_date] # Fill in any missing case data using predictor given ips_df. # todo: ignore ips_df for now, and instead assume we have case # data for all days and geos up until the start_date. # Create historical data arrays for all geos past_cases = {} past_ips = {} for geo in df['GeoID'].unique(): geo_df = df[df['GeoID'] == geo] past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL])) past_ips[geo] = np.array(geo_df[IP_COLS]) # Gather values for scaling network output ip_max_values_arr = np.array([IP_MAX_VALUES[ip] for ip in IP_COLS]) # Load prescriptors checkpoint = neat.Checkpointer.restore_checkpoint(PRESCRIPTORS_FILE) prescriptors = checkpoint.population.values() config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'config-prescriptor') # Load IP costs to condition prescriptions cost_df = pd.read_csv(path_to_cost_file) cost_df['RegionName'] = cost_df['RegionName'].fillna("") cost_df = add_geo_id(cost_df) geo_costs = {} for geo in cost_df['GeoID'].unique(): costs = cost_df[cost_df['GeoID'] == geo] cost_arr = np.array(costs[IP_COLS])[0] geo_costs[geo] = cost_arr # Generate prescriptions prescription_dfs = [] for prescription_idx, prescriptor in enumerate(prescriptors): print("Generating prescription", prescription_idx, "...") # Create net from genome net = neat.nn.FeedForwardNetwork.create(prescriptor, config) # Set up dictionary for keeping track of prescription df_dict = {'CountryName': [], 'RegionName': [], 'Date': []} for ip_col in sorted(IP_MAX_VALUES.keys()): df_dict[ip_col] = [] # Set initial data eval_past_cases = deepcopy(past_cases) eval_past_ips = deepcopy(past_ips) # Generate prescriptions one day at a time, feeding resulting # predictions from the predictor back into the prescriptor. for date in pd.date_range(start_date, end_date): date_str = date.strftime("%Y-%m-%d") # Get prescription for all regions for geo in df['GeoID'].unique(): # Prepare input data. Here we use log to place cases # on a reasonable scale; many other approaches are possible. X_cases = np.log(eval_past_cases[geo][-NB_LOOKBACK_DAYS:] + 1) X_ips = eval_past_ips[geo][-NB_LOOKBACK_DAYS:] X_costs = geo_costs[geo] X = np.concatenate( [X_cases.flatten(), X_ips.flatten(), X_costs]) # Get prescription prescribed_ips = net.activate(X) # Map prescription to integer outputs prescribed_ips = (prescribed_ips * ip_max_values_arr).round() # Add it to prescription dictionary country_name, region_name = geo.split('__') if region_name == 'nan': region_name = np.nan df_dict['CountryName'].append(country_name) df_dict['RegionName'].append(region_name) df_dict['Date'].append(date_str) for ip_col, prescribed_ip in zip(IP_COLS, prescribed_ips): df_dict[ip_col].append(prescribed_ip) # Create dataframe from prescriptions pres_df = pd.DataFrame(df_dict) # Make prediction given prescription for all countries pred_df = get_predictions(start_date_str, date_str, pres_df) # Update past data with new day of prescriptions and predictions pres_df['GeoID'] = pres_df['CountryName'] + '__' + pres_df[ 'RegionName'].astype(str) pred_df['RegionName'] = pred_df['RegionName'].fillna("") pred_df['GeoID'] = pred_df['CountryName'] + '__' + pred_df[ 'RegionName'].astype(str) new_pres_df = pres_df[pres_df['Date'] == date_str] new_pred_df = pred_df[pred_df['Date'] == date_str] for geo in df['GeoID'].unique(): geo_pres = new_pres_df[new_pres_df['GeoID'] == geo] geo_pred = new_pred_df[new_pred_df['GeoID'] == geo] # Append array of prescriptions pres_arr = np.array([ geo_pres[ip_col].values[0] for ip_col in IP_COLS ]).reshape(1, -1) eval_past_ips[geo] = np.concatenate( [eval_past_ips[geo], pres_arr]) # It is possible that the predictor does not return values for some regions. # To make sure we generate full prescriptions, this script continues anyway. # Geos that are ignored in this way by the predictor, will not be used in # quantitative evaluation. A list of such geos can be found in unused_geos.txt. if len(geo_pred) != 0: eval_past_cases[geo] = np.append( eval_past_cases[geo], geo_pred[PRED_CASES_COL].values[0]) # Add prescription df to list of all prescriptions for this submission pres_df['PrescriptionIndex'] = prescription_idx prescription_dfs.append(pres_df) # Combine dfs for all prescriptions into a single df for the submission prescription_df = pd.concat(prescription_dfs) # Create the output path os.makedirs(os.path.dirname(output_file_path), exist_ok=True) # Save to a csv file prescription_df.to_csv(output_file_path, index=False) print('Prescriptions saved to', output_file_path) return
#Loading classifiers lexiwinner_vn = load_object('lexiwinner_vn') lexiwinner_n = load_object('lexiwinner_n') lexiwinner_neu = load_object('lexiwinner_neu') lexiwinner_p = load_object('lexiwinner_p') lexiwinner_vp = load_object('lexiwinner_vp') #Loading test_inputs with open('lexitest_inputs.txt', 'rb') as f: test_inputs = pickle.load(f) #Creating neural networks from winning genomes config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'settings.ini') net_vn = neat.nn.FeedForwardNetwork.create(lexiwinner_vn, config) net_n = neat.nn.FeedForwardNetwork.create(lexiwinner_n, config) net_neu = neat.nn.FeedForwardNetwork.create(lexiwinner_neu, config) net_p = neat.nn.FeedForwardNetwork.create(lexiwinner_p, config) net_vp = neat.nn.FeedForwardNetwork.create(lexiwinner_vp, config) #Creating solution set solution = pd.DataFrame({ 'vn': [net_vn.activate(x)[0] for x in test_inputs], 'n': [net_n.activate(x)[0] for x in test_inputs], 'neu': [net_neu.activate(x)[0] for x in test_inputs], 'p': [net_p.activate(x)[0] for x in test_inputs],
max_episode_steps=1000, tags={"pg_complexity": 8000000}) def int_a(a): return np.array(a) def fitness(rec): f = rec['reward'].sum(axis=1).mean() return f # Load configuration config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, os.path.join(os.path.dirname(__file__), 'config-miko')) # Construct experiment exp = NEATGymExperiment( 'MiKo-v1', config, interpret_action=int_a, runs_per_genome=2, extract_fitness=fitness, mode='parallel', instances=7, # render_all=True, # network=neat.nn.GRUNetwork, # starting_gen=0 )
def run(config_file): # carrega o arquivo de configuracao realizando os "parsers" necessarios para coletar as configuracoes. config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file) # Cria uma populacao de genomas baseadas nas configuracoes setadas no arquivo p = neat.Population(config) # Adiciona algumas configuracoes de saidas no terminal para gerar pequenos relatorios durante a execucaoo. p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) # informa no console e salva um arquivo com o estado atual da populacao de genotipos atual para evitar # perdas durante a execucao caso ocorra uma falha de energia por exemplo. p.add_reporter(neat.Checkpointer(5)) # Executa N vezes o metodo de qualificacao de fitness para toda a populacao parametro 1 passa-se o metodo # e o segundo a quantidade de geracoes em que sera executada. Caso atinja-se o fitness antes do valor de # geracoes estimado a execucao e interrompida visto que o objetivo primario e sempre atingir o valor de fitness desejado winner = p.run(eval_genomes, 100) # Mostra qual o genoma obteve o maior fitness dentre as N execucoes de avaliacao e ou o melhor genoma que atingiu o valor # igual ou superior ou desejado e determinado no arquivo de configuracao. print('\nBest genome:\n{!s}'.format(winner)) # Transforma o genoma e fenotipo para uma analise visual do resultado dele print('\nOutput:') winner_net = neat.nn.RecurrentNetwork.create(winner, config) # lista com os nomes das entradas da rede neural com os valores negativos # lista com os nomes das saidas da rede neural com os valores positivos node_names = { -1: 'dintancia projetil 1', -2: 'distancia projetil 2', -3: 'distancia projetil 3', -4: 'distancia projetil 4', -5: 'distancia projetil 5', -6: 'distancia projetil 6', -7: 'distancia projetil 7', -8: 'distancia projetil 8', -9: 'distancia projetil 9', -10: 'distancia projetil 10', -11: 'distancia projetil 11', -12: 'distancia projetil 12', -13: 'distancia projetil 13', -14: 'distancia projetil 14', -15: 'distancia projetil 15', -16: 'distancia projetil 16', -17: 'distancia inimigo 1', -18: 'distancia inimigo 2', -19: 'direcao sprites 1', -20: 'direcao sprites 2', 0: 'esquerda', 1: 'direita', 2: 'pular', 3: 'atirar', 4: 'release' } # desenha uma representacao visual do fenotipo "campeao" visualize.draw_net(config, winner, True, node_names=node_names) # desenha um grafico de duas dimensoes com y sendo o fitness e x sendo as geracoes, # demonstrando assim a evolucao do nivel de fitness pelo passar das geracoes visualize.plot_stats(stats, ylog=False, view=True) # desenha visualmente a variacao e geracao de novas especies com o passar das geracoes visualize.plot_species(stats, view=True)
counter += 1 # Si la partida acaba (mono colisiona con Charlie) if done: # Marca la partida como terminada done = True # Despliega el numero del genome con su puntaje obtenido print(genome_id, current_score) # Actualiza el puntaje del genoma por cada iteración de este genome.fitness = current_score if __name__ == "__main__": # Inicializa la configuración de la red neuronal y las caracteristicas de los genomas config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'config-feedforward') p = neat.Population(config) p.add_reporter(neat.StdOutReporter(True)) p.add_reporter(neat.StatisticsReporter()) p.add_reporter(neat.Checkpointer(10)) winner = p.run(eval_genomes) with open('winner.pkl', 'wb') as output: pickle.dump(winner, output, 1)
g.graph_attr.update(splines="false", nodesep='1', ranksep='2') with g.subgraph(name="input") as c: for n in layers['input']: c.attr(rank='same') c.node(str(n), str(n), color="#2ecc71") for i in range(1, 1 + layers['n_layer']): with g.subgraph(name="hidden" + str(i)) as c: name_layer = 'hidden' + str(i) for n in layers[name_layer]: c.attr(rank='same') c.node(str(n), str(n), color="#3498db") with g.subgraph(name="output") as c: for n in layers['output']: c.attr(rank='same') c.node(str(n), str(n), color="red") for edge in layers['connections']: g.edge(str(edge[0]), str(edge[1]), arrowhead="none") g.render(filename, view=True) with open('../../winner_genome', 'rb') as f: winner = pickle.load(f) config_file = "../../config" config = neat.Config(MyGenome, EliteReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file) plot_layers(winner, config, 'prova')
def run(): # Load the config file, which is assumed to live in # the same directory as this script. local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, 'config') config = neat.Config(LanderGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) pop = neat.Population(config) stats = neat.StatisticsReporter() pop.add_reporter(stats) pop.add_reporter(neat.StdOutReporter(True)) # Checkpoint every 25 generations or 900 seconds. pop.add_reporter(neat.Checkpointer(25, 900)) # Run until the winner from a generation is able to solve the environment # or the user interrupts the process. ec = PooledErrorCompute() while 1: try: gen_best = pop.run(ec.evaluate_genomes, 5) #print(gen_best) visualize.plot_stats(stats, ylog=False, view=False, filename="fitness.svg") plt.plot(ec.episode_score, 'g-', label='score') plt.plot(ec.episode_length, 'b-', label='length') plt.grid() plt.legend(loc='best') plt.savefig("scores.svg") plt.close() mfs = sum(stats.get_fitness_mean()[-5:]) / 5.0 print("Average mean fitness over last 5 generations: {0}".format( mfs)) mfs = sum(stats.get_fitness_stat(min)[-5:]) / 5.0 print( "Average min fitness over last 5 generations: {0}".format(mfs)) # Use the best genomes seen so far as an ensemble-ish control system. best_genomes = stats.best_unique_genomes(3) best_networks = [] for g in best_genomes: best_networks.append( neat.nn.FeedForwardNetwork.create(g, config)) solved = True best_scores = [] for k in range(100): observation = env.reset() score = 0 step = 0 while 1: step += 1 # Use the total reward estimates from all five networks to # determine the best action given the current state. votes = np.zeros((4, )) for n in best_networks: output = n.activate(observation) votes[np.argmax(output)] += 1 best_action = np.argmax(votes) observation, reward, done, info = env.step(best_action) score += reward env.render() if done: break ec.episode_score.append(score) ec.episode_length.append(step) best_scores.append(score) avg_score = sum(best_scores) / len(best_scores) print(k, score, avg_score) if avg_score < 200: solved = False break if solved: print("Solved.") # Save the winners. for n, g in enumerate(best_genomes): name = 'winner-{0}'.format(n) with open(name + '.pickle', 'wb') as f: pickle.dump(g, f) visualize.draw_net(config, g, view=False, filename=name + "-net.gv") visualize.draw_net(config, g, view=False, filename=name + "-net-enabled.gv", show_disabled=False) visualize.draw_net(config, g, view=False, filename=name + "-net-enabled-pruned.gv", show_disabled=False, prune_unused=True) break except KeyboardInterrupt: print("User break.") break env.close()
def run(n_generations, n_processes): # Load the config file, which is assumed to live in # the same directory as this script. config_path = os.path.join(os.path.dirname(__file__), "neat.cfg") config = neat.Config( neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path, ) envs = [ t_maze.TMazeEnv(init_reward_side=i, n_trials=100) for i in [1, 0, 1, 0] ] evaluator = MultiEnvEvaluator(make_net, activate_net, envs=envs, batch_size=batch_size, max_env_steps=1000) if n_processes > 1: pool = multiprocessing.Pool(processes=n_processes) def eval_genomes(genomes, config): fitnesses = pool.starmap(evaluator.eval_genome, ((genome, config) for _, genome in genomes)) for (_, genome), fitness in zip(genomes, fitnesses): genome.fitness = fitness else: def eval_genomes(genomes, config): for i, (_, genome) in enumerate(genomes): try: genome.fitness = evaluator.eval_genome(genome, config, debug=DEBUG and i % 100 == 0) except Exception as e: print(genome) raise e pop = neat.Population(config) stats = neat.StatisticsReporter() pop.add_reporter(stats) reporter = neat.StdOutReporter(True) pop.add_reporter(reporter) logger = LogReporter("./logs/adaptive.json", evaluator.eval_genome) pop.add_reporter(logger) winner = pop.run(eval_genomes, n_generations) print(winner) final_performance = evaluator.eval_genome(winner, config) print("Final performance: {}".format(final_performance)) generations = reporter.generation + 1 return generations
def run(): # Load the config file, which is assumed to live in # the same directory as this script. local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, 'config') config = neat.Config(LanderGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) pop = neat.Population(config) stats = neat.StatisticsReporter() pop.add_reporter(stats) pop2 = neat.Population(config) stats2 = neat.StatisticsReporter() pop.add_reporter(stats) pop2.add_reporter(stats2) pop.add_reporter(neat.StdOutReporter(True)) # Checkpoint every 25 generations or 900 seconds. rep = neat.Checkpointer(25, 900) pop.add_reporter(rep) # asigna un gen_best para poder cargar los demás desde syn for g in itervalues(pop.population): gen_best = g g.fitness = -10000000.0 # Run until the winner from a generation is able to solve the environment # or the user interrupts the process. ec = PooledErrorCompute() temp = 0 while 1: try: if temp >= 0: # TODO: FUNCION DE SINCRONIZACION CON SINGULARITY # Lee en pop2 el último checkpoint desde syn # Hace request de getLastParam(process_hash,use_current) a syn TODO: HACER PROCESS CONFIGURABLE Y POR HASH no por id res = requests.get( my_url + "/processes/1?username=harveybc&pass_hash=$2a$04$ntNHmofQoMoajG89mTEM2uSR66jKXBgRQJnCgqfNN38aq9UkN4Y6q&process_hash=ph" ) cont = res.json() print('\ncurrent_block_performance =', cont['result'][0]['current_block_performance']) print('\nlast_optimum_id =', cont['result'][0]['last_optimum_id']) last_optimum_id = cont['result'][0]['last_optimum_id'] # Si el perf reportado pop2_champion_fitness > pop1_champion_fitness best_fitness = gen_best.fitness print('\nbest_fitness =', best_fitness) if cont['result'][0][ 'current_block_performance'] > best_fitness: # hace request GetParameter(id) res_p = requests.get( my_url + "/parameters/" + str(last_optimum_id) + "?username=harveybc&pass_hash=$2a$04$ntNHmofQoMoajG89mTEM2uSR66jKXBgRQJnCgqfNN38aq9UkN4Y6q&process_hash=ph" ) cont_param = res_p.json() # descarga el checkpoint del link de la respuesta si cont.parameter_link print('\ncont_param =', cont_param) print('\nmigrations =') if cont_param['result'][0]['parameter_link'] is not None: genom_data = requests.get( cont_param['result'][0]['parameter_link']).content with open('remote_reps', 'wb') as handler: handler.write(genom_data) handler.close() # carga genom descargado en nueva población pop2 with open('remote_reps', 'rb') as f: remote_reps = pickle.load(f) # OP.MIGRATION: Reemplaza el peor de la especie pop1 más cercana por el nuevo chmpion de pop2 como http://neo.lcc.uma.es/Articles/WRH98.pdf # para cada elemento de remote_reps, busca el closer, si remote fitness > local, lo reemplaza for i in range(len(remote_reps)): closer = None min_dist = None for g in itervalues(pop.population): if g not in remote_reps: dist = g.distance(remote_reps[i], config.genome_config) else: dist = 100000000 # do not count already migrated remote_reps if closer is None or min_dist is None: closer = deepcopy(g) min_dist = dist if dist < min_dist: closer = deepcopy(g) min_dist = dist # For the best genom in position 0 if i == 0: tmp_genom = deepcopy(remote_reps[i]) # Hack: overwrites original genome key with the replacing one tmp_genom.key = closer.key pop.population[closer.key] = deepcopy( tmp_genom) print(" gen_best=", closer.key) pop.best_genome = deepcopy(tmp_genom) gen_best = deepcopy(tmp_genom) else: # si el remote fitness>local, reemplazar el remote de pop2 en pop1 if closer is not None: if closer not in remote_reps: # ADicionada condición para que NO migre el campeón, solo los reps (prueba) if closer.fitness is not None and remote_reps[ i].fitness is not None: if remote_reps[ i].fitness > closer.fitness: tmp_genom = deepcopy( remote_reps[i]) # Hack: overwrites original genome key with the replacing one tmp_genom.key = closer.key pop.population[ closer.key] = deepcopy( tmp_genom) print("Replaced=", closer.key) # actualiza gen_best y best_genome al remoto pop.best_genome = deepcopy( tmp_genom) gen_best = deepcopy(tmp_genom) if closer.fitness is None: tmp_genom = deepcopy( remote_reps[i]) # Hack: overwrites original genome key with the replacing one tmp_genom.key = len( pop.population) + 1 pop.population[ tmp_genom.key] = tmp_genom print( "Created Por closer.fitness=NONE : ", tmp_genom.key) # actualiza gen_best y best_genome al remoto pop.best_genome = deepcopy( tmp_genom) gen_best = deepcopy(tmp_genom) else: #si closer está en remote_reps es porque no hay ningun otro cercano así que lo adiciona tmp_genom = deepcopy(remote_reps[i]) # Hack: overwrites original genome key with the replacing one tmp_genom.key = len(pop.population) + 1 pop.population[ tmp_genom.key] = tmp_genom print( "Created por Closer in rempte_reps=", tmp_genom.key) # actualiza gen_best y best_genome al remoto pop.best_genome = deepcopy(tmp_genom) gen_best = deepcopy(tmp_genom) #ejecuta speciate pop.species.speciate(config, pop.population, pop.generation) print("\nSpeciation after migration done") # Si el perf reportado es menor pero no igual al de pop1 if cont['result'][0][ 'current_block_performance'] < best_fitness: # Obtiene remote_reps # hace request GetParameter(id) remote_reps = None res_p = requests.get( my_url + "/parameters/" + str(last_optimum_id) + "?username=harveybc&pass_hash=$2a$04$ntNHmofQoMoajG89mTEM2uSR66jKXBgRQJnCgqfNN38aq9UkN4Y6q&process_hash=ph" ) cont_param = res_p.json() # descarga el checkpoint del link de la respuesta si cont.parameter_link print('\ncont_param =', cont_param) print('\nmigrations =') if cont_param['result'][0]['parameter_link'] is not None: genom_data = requests.get( cont_param['result'][0]['parameter_link']).content with open('remote_reps', 'wb') as handler: handler.write(genom_data) handler.close() # carga genom descargado en nueva población pop2 with open('remote_reps', 'rb') as f: remote_reps = pickle.load(f) #Guarda los mejores reps reps_local = [] reps = [gen_best] # Para cada especie, adiciona su representative a reps for sid, s in iteritems(pop.species.species): #print("\ns=",s) if s.representative not in reps_local: reps_local.append( pop.population[s.representative.key]) reps_local[len(reps_local) - 1] = deepcopy( pop.population[s.representative.key]) # TODO: Conservar los mejores reps, solo reemplazarlos por los mas cercanos if remote_reps is None: for l in reps_local: reps.append(l) reps[len(reps) - 1] = deepcopy(l) else: # para cada reps_local l for l in reps_local: # busca el closer a l en reps_remote for i in range(len(remote_reps)): closer = None min_dist = None for g in reps_local: if g not in remote_reps: dist = g.distance( remote_reps[i], config.genome_config) else: dist = 100000000 # do not count already migrated remote_reps if closer is None or min_dist is None: closer = deepcopy(g) min_dist = dist if dist < min_dist: closer = deepcopy(g) min_dist = dist # si closer is in reps if closer in reps: # adiciona l a reps si ya no estaba en reps if l not in reps: reps.append(l) reps[len(reps) - 1] = deepcopy(l) # sino else: # si l tiene más fitness que closer, if closer.fitness is not None and l.fitness is not None: if l.fitness > pop.population[ closer.key].fitness: # adiciona l a reps si ya no estaba en reps if l not in reps: reps.append(l) reps[len(reps) - 1] = deepcopy(l) # sino else: # adiciona closer a reps si ya no estaba en reps if l not in reps: reps.append( pop.population[closer.key]) reps[len(reps) - 1] = deepcopy( pop.population[closer.key]) # Guarda checkpoint de los representatives de cada especie y lo copia a ubicación para servir vía syn. # rep.save_checkpoint(config,pop,neat.DefaultSpeciesSet,rep.current_generation) print("\nreps=", reps) filename = '{0}{1}'.format("reps-", rep.current_generation) with open(filename, 'wb') as f: pickle.dump(reps, f) # Hace request de CreateParam a syn form_data = { "process_hash": "ph", "app_hash": "ah", "parameter_link": my_url + "/genoms/" + filename, "parameter_text": pop.best_genome.key, "parameter_blob": "", "validation_hash": "", "hash": "h", "performance": best_fitness, "redir": "1", "username": "******", "pass_hash": "$2a$04$ntNHmofQoMoajG89mTEM2uSR66jKXBgRQJnCgqfNN38aq9UkN4Y6q" } # TODO: COLOCAR DIRECCION CONFIGURABLE res = requests.post( my_url + "/parameters?username=harveybc&pass_hash=$2a$04$ntNHmofQoMoajG89mTEM2uSR66jKXBgRQJnCgqfNN38aq9UkN4Y6q&process_hash=ph", data=form_data) res_json = res.json() # TODO FIN: FUNCION DE SINCRONIZACION CON SINGULARITY temp = temp + 1 gen_best = pop.run(ec.evaluate_genomes, 5) #print(gen_best) visualize.plot_stats(stats, ylog=False, view=False, filename="fitness.svg") plt.plot(ec.episode_score, 'g-', label='score') plt.plot(ec.episode_length, 'b-', label='length') plt.grid() plt.legend(loc='best') plt.savefig("scores.svg") plt.close() mfs = sum(stats.get_fitness_mean()[-3:]) / 3.0 print("Average mean fitness over last 3 generations: {0}".format( mfs)) mfs = sum(stats.get_fitness_stat(min)[-3:]) / 3.0 print( "Average min fitness over last 3 generations: {0}".format(mfs)) # Use the best genome to evaluate an environment. best_genomes = stats.best_unique_genomes(3) solved = True best_scores = [] observation = env.reset() score = 0.0 step = 0 gen_best_nn = neat.nn.FeedForwardNetwork.create(gen_best, config) while 1: step += 1 output = gen_best_nn.activate(nn_format(observation)) best_action = np.argmax(output) observation, reward, done, info = env.step(best_action) score += reward env.render() if done: break ec.episode_score.append(score) ec.episode_length.append(step) best_scores.append(score) avg_score = sum(best_scores) / len(best_scores) print("Training Set Score =", score, " avg_score=", avg_score) #Calculate the validation set score best_genomes = stats.best_unique_genomes(3) solved = True best_scores = [] observation = env_v.reset() score = 0.0 step = 0 gen_best_nn = neat.nn.FeedForwardNetwork.create(gen_best, config) while 1: step += 1 output = gen_best_nn.activate(nn_format(observation)) best_action = np.argmax(output) observation, reward, done, info = env_v.step(best_action) score += reward env_v.render() if done: break best_scores.append(score) avg_score = sum(best_scores) / len(best_scores) print("Validation Set Score =", score, " avg_score=", avg_score) if avg_score < 2000000000: solved = False if solved: print("Solved.") # Save the winners. for n, g in enumerate(best_genomes): name = 'winner-{0}'.format(n) with open(name + '.pickle', 'wb') as f: pickle.dump(g, f) visualize.draw_net(config, g, view=False, filename=name + "-net.gv") visualize.draw_net(config, g, view=False, filename=name + "-net-enabled.gv", show_disabled=False) visualize.draw_net(config, g, view=False, filename=name + "-net-enabled-pruned.gv", show_disabled=False, prune_unused=True) break except KeyboardInterrupt: print("User break.") break env.close()
def evaluate(file, error_rates, error_mode, n_games, n_jobs, verbose, file_suffix='', transfer_to_distance=None): time_id = datetime.now() # Load the corresponding config files savedir = file[:file.rfind("/")] if not os.path.exists("%s/config.json" % savedir): raise ValueError("Configuration file does not exist (%s)." % ("%s/config.json" % savedir)) with open("%s/config.json" % savedir) as f: config = json.load(f) config = check_config(config) # Load the genome to be evaluated if not os.path.exists(file): raise ValueError("Genome file does not exist.") with open(file, "rb") as f: genome = pickle.load(f) if not os.path.exists("%s/population-config" % savedir): raise ValueError("Population configuration file does not exist.") population_config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, "%s/population-config" % savedir) if config["Training"]["network_type"] == 'ffnn': if transfer_to_distance is None: net = SimpleFeedForwardNetwork.create(genome, population_config) code_distance = config["Physics"]["distance"] elif transfer_to_distance > config["Physics"]["distance"]: generate_config_file(savedir, config, transfer_to_distance) pop_config_transferred = neat.Config( neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, savedir + "/population-config-temp-d" + str(transfer_to_distance)) new_genome = pop_config_transferred.genome_type(0) new_genome.configure_new(pop_config_transferred.genome_config) new_genome.connections = {} new_genome.nodes = {} transplantate(pop_config_transferred.genome_config, new_genome, transfer_to_distance, genome, config["Physics"]["distance"]) net = SimpleFeedForwardNetwork.create(new_genome, pop_config_transferred) code_distance = transfer_to_distance elif config["Training"]["network_type"] == 'cppn': # HyperNEAT: possibility of evaluating a CPPN trained on d=3 data on d>3 data if transfer_to_distance is None: code_distance = config["Physics"]["distance"] connection_weight_scale = 1 elif transfer_to_distance > config["Physics"]["distance"]: code_distance = transfer_to_distance # As they are more connections, in larger codes, we need to scale down the connection weight by this factor connection_weight_scale = config["Physics"][ "distance"]**2 / transfer_to_distance**2 #connection_weight_scale = 0.01 else: raise ValueError( "Transfer knwoledge can only be done to higher distance codes." ) if config["Training"]["substrate_type"] == 0: substrate = SubstrateType0( code_distance, config["Training"]["rotation_invariant_decoder"]) elif config["Training"]["substrate_type"] == 1: substrate = SubstrateType1(code_distance) #print(code_distance, connection_weight_scale) cppn_network = FeedForwardNetwork.create(genome, population_config) net = PhenotypeNetwork.create(cppn_network, substrate, connection_weight_scale) # DIRTY: To ensure that samples are generated according to transfer_to_distance config["Physics"]["distance"] = code_distance ## (PARALLEL) EVALUATION LOOP fitness = [] results = { "fitness": [], "error_rate": [], "outcome": [], "nsteps": [], "initial_qubits_flips": [] } # with statement to close properly the parallel processes with Pool(n_jobs) as pool: # Game evaluation for error_rate in error_rates: fitness.append(0) jobs = [] for i in range(n_games): # jobs.append( pool.apply_async(get_fitness, (net, config, error_rate, error_mode))) for job in jobs: output, errors_id = job.get(timeout=None) fitness[-1] += output["fitness"] for k, v in output.items(): results[k].append(v) results["initial_qubits_flips"].append(errors_id) fitness[-1] /= n_games print("Evaluation on error_rate=%.2f is done, %.2f success." % (error_rate, fitness[-1])) elapsed = datetime.now() - time_id print("Total running time:", elapsed.seconds, ":", elapsed.microseconds) # Always overwrite the result of evaluation # Synthesis report if transfer_to_distance is not None: file_suffix += ".transfered_distance%i" % transfer_to_distance savefile = "%s_evaluation.ngames=%i.errormode=%i.%s.csv" % ( file.replace(".pkl", ""), n_games, error_mode, file_suffix) if os.path.exists(savefile): print("Deleting evaluation file %s" % savefile) os.remove(savefile) print([error_rates, fitness]) df = pd.DataFrame(list(zip(error_rates, fitness)), columns=["error_rate", "mean_fitness"]) df.to_csv(savefile) # Detailed report savefile = "%s_detailed_results_evaluation.ngames=%i.%s.csv" % ( file.replace(".pkl", ""), n_games, file_suffix) if os.path.exists(savefile): print("Deleting evaluation file %s" % savefile) os.remove(savefile) pd.DataFrame.from_dict(results).to_csv(savefile) return error_rates, fitness
net = neat.nn.FeedForwardNetwork.create(genome, config) genome.fitness = 0 for _ in range(100): action = net.activate(observation) observation, reward, done, info = env.step(action) genome.fitness += reward if done: observation = env.reset() break # Load configuration. config = neat.Config(neat.DefaultGenome, neat.MctsReproductionWeightEvolution, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'config-feedforward') # Create the population, which is the top-level object for a NEAT run. p = neat.Population(config) # Add a stdout reporter to show progress in the terminal. p.add_reporter(neat.StdOutReporter(False)) # Run until a solution is found. winner = p.run(eval_genomes, 200) # Display the winning genome. print('\nBest genome:\n{!s}'.format(winner)) # Show output of the most fit genome against training data.
def run(): # Load the config file local_dir = os.path.dirname(__file__) if (type == "rnn"): config_path = os.path.join(local_dir, 'config-ctrnn') else: config_path = os.path.join(local_dir, 'config-feed') config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) pop = neat.Population(config) stats = neat.StatisticsReporter() pop.add_reporter(stats) pop.add_reporter(neat.StdOutReporter(True)) if 1: winner = pop.run(eval_genomes, num_generation) else: pe = neat.ParallelEvaluator(4, eval_genome) winner = pop.run(pe.evaluate, num_generation) # Save the winner. with open('results/winner-rnn', 'wb') as f: pickle.dump(winner, f) eval_genome(winner, config, True, fixed=0) print(stats) #plt.figure("NEAT (Population's average/std. dev and best fitness)") #plt.rcParams["figure.figsize"] = [6,4] plt.rcParams.update({'font.size': 15}) plt.figure( "NEAT fitnesses") # (Population's average/std. dev and best fitness)") avg_fitnesses = { "total": [], "rotation": [], "energy": [], "time": [], "accuracy": [] } for i in range(0, num_generation): avg_fitnesses["total"].append( np.mean(fitnesses["total"][i * pop_size:i * pop_size + pop_size - 1])) avg_fitnesses["time"].append( np.mean(fitnesses["time"][i * pop_size:i * pop_size + pop_size - 1])) avg_fitnesses["rotation"].append( np.mean(fitnesses["rotation"][i * pop_size:i * pop_size + pop_size - 1])) avg_fitnesses["energy"].append( np.mean(fitnesses["energy"][i * pop_size:i * pop_size + pop_size - 1])) avg_fitnesses["accuracy"].append( np.mean(fitnesses["accuracy"][i * pop_size:i * pop_size + pop_size - 1])) #print(avg_fitnesses["accuracy"]) #print(avg_fitnesses) #plt.plot([i for i in range(0, len(avg_fitnesses["total"]))], avg_fitnesses["total"], 'b-', label="fitness") plt.plot([i for i in range(0, len(avg_fitnesses["energy"]))], avg_fitnesses["energy"], color="#000000", label="energy", linestyle='-') #plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd") #plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd") #plt.plot(generation, best_fitness, 'r-', label="best") #plt.title("Population's average/std. dev and best fitness") plt.xlabel("Generations") plt.ylabel("Fitness", labelpad=0) plt.grid() plt.legend(loc="best") plt.savefig("energy_" + type_optimization + ".png") view = True if view: plt.show() #plt.close() fig = None plt.plot([i for i in range(0, len(avg_fitnesses["accuracy"]))], avg_fitnesses["accuracy"], color="#808080", linestyle="-", label="accuracy", lw=0.9) plt.plot([i for i in range(0, len(avg_fitnesses["time"]))], avg_fitnesses["time"], color="#D3D3D3", linestyle="-", label="time", lw=0.9) plt.plot([i for i in range(0, len(avg_fitnesses["rotation"]))], avg_fitnesses["rotation"], color="#000000", linestyle="-", label="rotation", lw=0.9) plt.xlabel("Generations") plt.ylabel("Fitness") plt.grid() plt.legend(loc="best") plt.savefig("accuracy_time_rotation_" + type_optimization + ".png") view = True if view: plt.show() fig = None if (1): visualize.plot_stats(stats, ylog=True, view=False, filename="results/rnn-fitness.svg") visualize.plot_species(stats, view=False, filename="results/rnn-speciation.svg") node_names = { 0: 'omega', 1: 'theta', 2: 'psi', 3: 'fi', 4: 'ro', 5: 'epsilon', -3: 'x', -2: 'y', -1: 'z' } visualize.draw_net(config, winner, view=False, node_names=node_names, filename="results/winner-rnn.gv") visualize.draw_net(config, winner, view=False, node_names=node_names, filename="results/winner-rnn-enabled.gv", show_disabled=False) visualize.draw_net(config, winner, view=False, node_names=node_names, filename="results/winner-rnn-enabled-pruned.gv", show_disabled=False, prune_unused=True)
xor_outputs = [(0.0, ), (1.0, ), (1.0, ), (0.0, )] def eval_genomes(genomes, config): for genome_id, genome in genomes: genome.fitness = 4.0 net = neat.nn.FeedForwardNetwork.create(genome, config) for xi, xo in zip(xor_inputs, xor_outputs): output = net.activate(xi) # this checks the squared error between the output and input genome.fitness -= (output[0] - xo[0])**2 # Load configuration. config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, './temp.conf') # Create the population, which is the top-level object for a NEAT run. p = neat.Population(config) # Add a stdout reporter to show progress in the terminal. p.add_reporter(neat.StdOutReporter(False)) # Run until a solution is found. winner = p.run(eval_genomes, n=6) # Display the winning genome. print('\nBest genome:\n{!s}'.format(winner)) # Show output of the most fit genome against training data.
def __init__(self, agent_type, name, ea_framework, idx): self.nets = [] self.genomes = [] self.genome_ids = [] self.configs = [] self.fitnesses = [] self.prev_nets = [] self.prev_genomes = [] self.prev_genome_ids = [] self.prev_configs = [] self.prev_fitnesses = [] self.best_nets = [] self.ea_framework = ea_framework self.evolved = False self.human_left = 0.0 self.human_right = 0.0 self.idx = idx self.type = agent_type # type = 0, type = 1 # 兩個evolution_event 是由ea_framework決定 self.evolution_event = threading.Event() self.evolution_end_event = threading.Event() self.evolution_event.daemon = True self.evolution_end_event.daemon = True self.name = name self.neat = None self.orientation = None self.position = None local_dir = os.path.dirname(__file__) if agent_type == 0: config_path = os.path.join(local_dir, 'config_predator') else: config_path = os.path.join(local_dir, 'config_prey') config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) if not LOAD_WEIGHT: p = neat.Population(config) self.neat = p else: p = neat.Checkpointer.restore_checkpoint('/home/jaqq/catkin_ws/src/robobo/robobo_gazebo/scripts/data/' + DATA + '/checkpoints/' + self.name + '/neat-checkpoint-' + str(LOADED_GENERATION)) #p = neat.Checkpointer.restore_checkpoint('/home/jaqq/Documents/robobo_bak/scripts_without_random/checkpoints/' + self.name + '/neat-checkpoint-' + str(LOADED_GENERATION)) self.neat = p pkl_file = open('/home/jaqq/catkin_ws/src/robobo/robobo_gazebo/scripts/data/' + DATA + '/generations/' + self.name + "/" + str(self.neat.generation - 1) + '.pkl', 'rb') #pkl_file = open('/home/jaqq/Documents/robobo_bak/scripts_without_random/generations/' + self.name + "/" + str(self.neat.generation - 1) + '.pkl', 'rb') data_dict = pickle.load(pkl_file) self.nets = data_dict['nets'] self.genomes = data_dict['genomes'] self.genome_ids = data_dict['genome_ids'] self.configs = data_dict['configs'] self.fitnesses = data_dict['fitnesses'] self.best_nets = data_dict['best_nets'] if not EVALUATION: self.neat.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() self.neat.add_reporter(stats) save_dir = "checkpoints/" + self.name save_prefix = save_dir + "/neat-checkpoint-" self.neat.add_reporter(neat.Checkpointer(1, filename_prefix=save_prefix)) if not os.path.exists(save_dir): os.mkdir(save_dir) self.fitness_dir = "fitness/" + self.name self.fitness_path = self.fitness_dir + "/fitness.txt" if not os.path.exists(self.fitness_dir): os.mkdir(self.fitness_dir) if os.path.exists(self.fitness_path): os.remove(self.fitness_path)
res.write('{0:3.3f},{1:3.3f},{2:3.3f},{3},{4:3.6f}\n'.format(best_every_generation[best_id][0], best_every_generation[best_id][1], best_every_generation[best_id][2], best_every_generation[best_id][3], best_every_generation[best_id][4])) res.close() #del the net not in current population #net_id = set(net_dict.keys()) #net_to_del = net_id - genomes_id_set #for genome_id in net_to_del: # del net_dict[genome_id] # Load configuration. config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'config-mnist') if torch.cuda.is_available(): gpu = True print("Running on GPU!") else: gpu = False print("Running on CPU!") # reset result file res = open("result.csv", "w") info = open("info.txt", "w") best = open("best.txt", "w") res.close() info.close()
import visualize, neat, gym, sys if len(sys.argv) < 2: print("Usage: {} path-to-dir".format(sys.argv[0])) exit() dire = sys.argv[1] print("Dir: {}".format(dire)) dire = './' + dire + '/' config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, dire + "fc.config") winner = None def get_winner(p): max_fitness = -9999999999 best_genome = None for v in p.population: genome = p.population[v] if genome.fitness > max_fitness: max_fitness = genome.fitness winner = genome print(max_fitness) return ([max_fitness, winner]) mf = -999999999
def eval_genomes(genomes, config): for genome_id, genome in genomes: net = neat.nn.FeedForwardNetwork.create(genome, config) state = env.reset() total_reward = 0 done = False while not done: action = np.argmax(net.activate(state)) state, reward, done, _ = env.step(action) total_reward += reward genome.fitness = total_reward config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'GymNEAT') p = neat.Population(config) p.add_reporter(neat.StdOutReporter(False)) winner = p.run(eval_genomes, 1000) net = neat.nn.FeedForwardNetwork.create(winner, config) state = env.reset() total_reward = 0 done = False while not done: env.render() action = np.argmax(net.activate(state)) state, reward, done, _ = env.step(action) total_reward += reward print('Total Reward:', total_reward)
def main(): #Create gym-retro environment for game of choice: DRMBM env = retro.make(game = 'DrRobotniksMeanBeanMachine-Genesis') imgarray = [] def eval_genomes(genomes, config): for genome_id, genome in genomes: # Set observation variable (Age) ob = env.reset() # Create a random action (generic) ac = env.action_space.sample() # Get the x, y and colors of the input space (from the emulator) inx, iny, inc = env.observation_space.shape # Divide input by 8 inx, iny, inc = int(inx/8), int(iny/8), int(inc/8) #Create NEAT net net = neat.nn.recurrent.RecurrentNetwork.create(genome, config) current_max_fitness = 0 fitness_current = 0 frame = 0 counter = 0 xpos = 0 xpos_max = 0 done = False while not done: env.render() frame+= 1 ob = cv2.resize(ob, (inx, iny)) ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY) ob = np.reshape(ob, (inx,iny)) #cut image that NN sees in half ob= ob[0:ob.shape[0], 0:int(ob.shape[1]/2)] imgarray = np.ndarray.flatten(ob) nnOutput = net.activate(imgarray) #increment emulator by 1 step ob, rew, done, info = env.step(nnOutput) fitness_current += rew if fitness_current > current_max_fitness: current_max_fitness = fitness_current counter = 0 else: counter += 1 if done or counter == 250: done = True print(genome_id, fitness_current) genome.fitness = fitness_current #Config File necessary to create a NEAT config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'config-feedforward.txt') #Create starting population p = neat.Population(config) #Compute statistic for the game p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) p.add_reporter(neat.Checkpointer(10)) winner = p.run(eval_genomes) with open('winner.pkl', 'wb') as output: pickle.dump(winner, output, 1)
def run(file_number): # Load the config file, which is assumed to live in # the same directory as this script. global clearinfo global stage_list global actorconfig_path global actorconfig stage_list = [] stage_list.append(int(file_number)) print(stage_list) local_dir = os.path.dirname(__file__) actorconfig_path = os.path.join(local_dir, 'config-feed-action') actorconfig = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, actorconfig_path) config_path = os.path.join(local_dir, 'config-action-multi') config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) with open('Network/noconnect', 'rb') as f: c = pickle.load(f) NoNets.append(c) for i in range(15): index = i + 1 with open('Network/winner-enemyNo' + str(index), 'rb') as f: c = pickle.load(f) EnemyNets.append(c) print('Enemy Nets stanby complete.') sleep(5) for i in range(15): index = i + 1 with open('Network/winner-jumpNo' + str(index), 'rb') as f: c = pickle.load(f) JumpNets.append(c) print('Jump Nets stanby complete.') pop = neat.Population(config) stats = neat.StatisticsReporter() pop.add_reporter(stats) pop.add_reporter(neat.StdOutReporter(True)) winner = pop.run(eval_genomes, 200) if (trainend == False): clearinfo.append((-1, round(nowBest, 3))) with open('Results/multisingles' + str(file_number) + '.txt', 'a') as f: f.write("(-1," + str(round(nowBest, 3)) + ")") f.write('\n') # Save the winner. #with open('Network/winner-multi-No'+file_number, 'wb') as f: # pickle.dump(winner, f) #print(winner) print("Now Multi Stage with Single Stage Networks version Clear info is ") print(clearinfo) #visualize.plot_stats(stats, ylog=False, view=False, filename="feed-fitness-fs-sigmoid-50hidden-50.svg") #visualize.plot_species(stats, view=False, filename="feed-speciation-fs-sigmoid-50hidden-150.svg") node_names = {-1: '(-3,-3)', -2: '(-2,-3)', -3: '(-1,-3)', -4: '(0,-3)', -5: '(1,-3)',-6: '(2,-3)', -7: '(3,-3)',\ -8: '(-3,-2)', -9: '(-2,-2)', -10: '(-1,-2)', -11: '(0,-2)', -12: '(1,-2)', -13:'(2,-2)', -14:'(3,-2)',\ -15: '(-3,-1)', -16: '(-2,-1)', -17:'(-1,-1)',-18:'(0,-1)',-19:'(1,-1)',-20:'(2,-1)',-21:'(3,-1)',\ -22: '(-3,0)', -23: '(-2,0)', -24:'(-1,0)',-25:'(0,0)',-26:'(1,0)',-27:'(2,0)',-28:'(3,0)',\ -29: '(-3,1)', -30: '(-2,1)', -31:'(-1,1)',-32:'(0,1)',-33:'(1,1)',-34:'(2,1)',-35:'(3,1)',\ -36: '(-3,2)', -37: '(-2,2)', -38:'(-1,2)',-39:'(0,2)',-40:'(1,2)',-41:'(2,2)',-42:'(3,2)',\ -43: '(-3,3)', -44: '(-2,3)', -45:'(-1,3)',-46:'(0,3)',-47:'(1,3)',-48:'(2,3)',-49:'(3,3)',\ -50:'vx',-51: 'vy',-52:'now_mode',-53:'on_floor?',-54:'now_dive?',-55:'now_HP',-56:'can_rejump?',-57:'can_attack?',\ 0: 'None',1:'←',2:'↑',3:'→',4:'↓',5:'↑+←',6:'↑+→',7:'↓+→',8:'↓+←',9:'Z',10:'←+Z',11:'↑+Z',\ 12:'→+Z',13:'↓+Z',14:'↑+←+Z',15:'↑+→+Z',16:'↓+→+Z',17:'↓+←+Z',18:'C',19:'←+C',20:'→+C' }
def run(config_file, rep_type='Tournament', restore_checkpoint=False): last_best_pso_cost = 0 # Load configuration. if rep_type == 'Tournament': rep_class = TournamentReproduction else: rep_class = neat.DefaultReproduction gen_class = neat.DefaultGenome pop_class = TimePopulation config = neat.Config(gen_class, rep_class, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file) # Create the population, which is the top-level object for a NEAT run. if restore_checkpoint: p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-35') else: p = pop_class(config) p.allow_regeneration(False) name_run = "output.txt" # Add a stdout reporter to show progress in the terminal. p.add_reporter(FileReporter(True, name_run)) p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) p.add_reporter(MyCheckpointer(checkpoint_interval=50)) pe = neat.ParallelEvaluator(n_workers, evaluator.eval_genome) winner = None for i in range(n_max_gen): winner = p.run(pe.evaluate, step_neat_gen) if last_best_pso_cost < winner.fitness: with open('last_winner', 'wb') as f: pickle.dump(winner, f) best_genome_weight = [] genome_key_set = [] for key_id in winner.connections.keys(): genome_key_set.append(key_id) best_genome_weight.append(winner.connections[key_id].weight) fitness = fitness_manager dimension = len(best_genome_weight) bounds = get_bounds(dimension) options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2} optimizer = MyGlobalBestPSO(n_particles=step_pso_pop, dimensions=dimension, options=options, bounds=bounds) optimizer.set_reporter_name(name_run) optimizer.swarm.position[0] = best_genome_weight cost, pos = optimizer.optimize(fitness, iters=step_pso_gen) last_best_pso_cost = -cost if -cost > p.best_genome.fitness: p.population[p.best_genome.key] = insert_weights( pos, p.population[p.best_genome.key]) # Save the winner with open('winner_genome', 'wb') as f: pickle.dump(winner, f)
gym.register(id='tiling-pattern11x11-block-sum-v0', entry_point='gym_multi_robot.envs:WeightedSumTilingPatternEnv', kwargs={ 'env_storage_path': 'tiles11x11_block.pickle', 'game_cls': DifferenceTilingPatternGame }) if __name__ == '__main__': """ Experiment where fitness is gotten by summing over all fitnesses at every timestep, multiplied with time step to give an higher importance to later grids. """ env = gym.make('tiling-pattern11x11-block-sum-v0') runner = NEATSwarmExperimentRunner(env, num_steps) # Create learning configuration. local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, config_name) config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) config.fitness_threshold = 3000 * 100 * 3000 # Create and run experiment. experiment = SingleExperiment(config, runner, num_generations, experiment_name) for i in range(num_runs): experiment.run(experiment_name + str(i))
def call_simulate(self, data): if data.data[0] == '-': return local_dir = os.path.dirname(__file__) config_file = os.path.join(local_dir, 'config-feedforward') config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file) gene_dir = os.getenv("WALKYTO_PATH") gene_id = self.string_decoder(data) if gene_id == -1 or gene_id == '-1': return # os.write(sys.__stderr__.fileno(), "%s\n"%gene_id) gene_f = open('%s/src/genes/%s' % (gene_dir, gene_id),'rb') genome = pickle.load(gene_f) gene_f.close() net = neat.nn.FeedForwardNetwork.create(genome, config) #nn.recurrent.RecurrentNetwork ************** param3 #------------------------------------------------------------------------------------------------- now = rospy.Time.now() duration = rospy.Duration(180) then = now + duration #self.gazebo_init() pos_init = self.get_pose() #-------------------------------------------------------------------------------------------------- dur = rospy.Duration(0.3); gap = rospy.Duration(0) MAX_TORQUE = 3.0 while(then > now): joint_efforts = net.activate(self.joint_states) for i in range(10): if joint_efforts[i] > MAX_TORQUE: joint_efforts[i] = MAX_TORQUE self.efforts_caller(joint_efforts, dur)#0.05sec rospy.sleep(dur) # print "input:", self.joint_states # print "output:", joint_efforts now = rospy.Time.now() if gap == rospy.Duration(0): if then-now > duration + rospy.Duration(5): gap = then-now elif gap > rospy.Duration(0): now = now + gap - duration # print self.dup_num, gap.to_sec(), then.to_sec(), now.to_sec() #-------------------------------------------------------------------------------------------------- pos_end = self.get_pose() dist = (pos_end.x - pos_init.x) self.fitness = dist
def get_config(): local_dir = os.path.dirname(__file__) path = os.path.join(local_dir, config_path) return neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
def run(config_file): # Load configuration. config = neat.Config( neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file, ) # Create the population, which is the top-level object for a NEAT run. p = neat.Population(config) # Add a stdout reporter to show progress in the terminal. p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) # Run for up to 300 generations. pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome) winner = p.run(pe.evaluate, 2000) # Display the winning genome. print("\nBest genome:\n{!s}".format(winner)) # Show output of the most fit genome against training data. print("\nOutput:") winner_net = neat.nn.FeedForwardNetwork.create(winner, config) for xi, xo in zip(xor_inputs, xor_outputs): output = winner_net.activate(xi) print("input {!r}, expected output {!r}, got {!r}".format( xi, xo, output)) df = pd.read_csv('csvs/' + inputs + 'lynx_test_X.csv', sep=' ', header=None) new_test_X = df.values X_test_inputs = [] for i in range(len(new_test_X)): X_test_inputs.append(tuple(new_test_X[i])) predictions_enn = [] for xi in X_test_inputs: output = winner_net.activate(xi) predictions_enn.append(output) np.savetxt('csvs/' + inputs + 'predictions_enn.csv', np.array(predictions_enn), delimiter=',') real_y = pd.read_csv('csvs/' + inputs + 'lynx_test_Y.csv', sep=' ', header=None) mse = np.sum( (np.array(real_y) - predictions_enn)**2) / (len(predictions_enn)) mae = np.average(np.abs(np.array(real_y) - predictions_enn)) print("MSE:", mse) print("MAE:", mae) node_names = { -1: "t-7", -2: "t-6", -3: "t-5", -4: "t-4", -5: "t-3", -6: "t-2", -7: "t-1", 0: "Target" } visualize.draw_net(config, winner, True, node_names=node_names, fmt="png") visualize.plot_stats(stats, ylog=False, view=False) visualize.plot_species(stats, view=False)