コード例 #1
0
def run():
    t0 = time.time()

    # Get the path to the config file, which is assumed to live in
    # the same directory as this script.
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'xor2_config')

    # Use a pool of four workers to evaluate fitness in parallel.
    pe = parallel.ParallelEvaluator(fitness, 3)

    pop = population.Population(config_path)
    pop.run(pe.evaluate, 400)

    print("total evolution time {0:.3f} sec".format((time.time() - t0)))
    print("time per generation {0:.3f} sec".format(
        ((time.time() - t0) / pop.generation)))

    print('Number of evaluations: {0:d}'.format(pop.total_evaluations))

    # Verify network output against training data.
    print('\nBest network output:')
    winner = pop.statistics.best_genome()
    net = nn.create_feed_forward_phenotype(winner)
    for i, inputs in enumerate(xor_inputs):
        output = net.serial_activate(inputs)  # serial activation
        print("{0:1.5f} \t {1:1.5f}".format(xor_outputs[i], output[0]))

    # Visualize the winner network and plot statistics.
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    visualize.draw_net(winner, view=True)
コード例 #2
0
def run():
    local_dir = os.path.dirname(__file__)
    pop = population.Population(os.path.join(local_dir, 'nn_config'))
    pe = parallel.ParallelEvaluator(eval_fitness)
    pop.run(pe.evaluate, 1000)

    print('Number of evaluations: {0}'.format(pop.total_evaluations))

    # Display the most fit genome.
    print('\nBest genome:')
    winner = pop.statistics.best_genome()
    print(winner)

    # Verify network output against a few randomly-generated sequences.
    winner_net = nn.create_recurrent_phenotype(winner)
    for n in range(4):
        print('\nRun {0} output:'.format(n))
        seq = [random.choice((0, 1)) for _ in range(N)]
        winner_net.reset()
        for s in seq:
            winner_net.activate([s, 0])

        for s in seq:
            output = winner_net.activate([0, 1])
            print("expected {0:1.5f} got {1:1.5f}".format(s, output[0]))

    # Visualize the winner network and plot/log statistics.
    visualize.draw_net(winner, view=True, filename="nn_winner.gv")
    visualize.draw_net(winner, view=True, filename="nn_winner-enabled.gv", show_disabled=False)
    visualize.draw_net(winner, view=True, filename="nn_winner-enabled-pruned.gv", show_disabled=False, prune_unused=True)
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    statistics.save_stats(pop.statistics)
    statistics.save_species_count(pop.statistics)
    statistics.save_species_fitness(pop.statistics)
コード例 #3
0
def test_minimal():
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'test_configuration')

    pop = Population(config_path)
    pe = parallel.ParallelEvaluator(eval_fitness, 4)
    pop.run(pe.evaluate, 400)
コード例 #4
0
def train_model(features, num_generations):
    timestamp = time.strftime("%Y%m%d-%H%M%S")
    print("########################## Time Stamp ==== " + timestamp)
    t0 = time.time()

    print("## Train a NEAT model")
    timestr = time.strftime("%Y%m%d-%H%M%S")

    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'bnp_config')

    # Use a pool of four workers to evaluate fitness in parallel.
    pe = parallel.ParallelEvaluator(fitness, 3, progress_bar=True, verbose=1)

    pop = population.Population(config_path)
    pop.run(pe.evaluate, num_generations)

    print("total evolution time {0:.3f} sec".format((time.time() - t0)))
    print("time per generation {0:.3f} sec".format(
        ((time.time() - t0) / pop.generation)))
    print('Number of evaluations: {0:d}'.format(pop.total_evaluations))

    # Verify network output against training data.
    print("## Test against verification data.")
    winner = pop.statistics.best_genome()

    net = nn.create_feed_forward_phenotype(winner)
    p_train = net.array_activate(X_train[features].values)
    p_valid = net.array_activate(X_valid[features].values)

    score_train = sklearn.metrics.log_loss(y_train, p_train[:, 0])
    score_valid = sklearn.metrics.log_loss(y_valid, p_valid[:, 0])
    print("Score based on training data set = ", score_train)
    print("Score based on validating data set = ", score_valid)

    # Visualize the winner network and plot statistics.
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    visualize.draw_net(winner, view=True)

    print("## Predicting test data")
    preds = net.array_activate(test[features].values)
    test[test_col_name] = preds
    test[[id_col_name,
          test_col_name]].to_csv("../predictions/pred_" + timestr + ".csv",
                                 index=False)
コード例 #5
0
            fitness += 1.0

        fitnesses.append(fitness)

    # The genome's fitness is its worst performance across all runs.
    return min(fitnesses)


# Load the config file, which is assumed to live in
# the same directory as this script.
local_dir = os.path.dirname(__file__)
config = Config(os.path.join(local_dir, 'ctrnn_config'))
config.node_gene_type = ctrnn.CTNodeGene

pop = population.Population(config)
pe = parallel.ParallelEvaluator(evaluate_genome)
pop.run(pe.evaluate, 2000)

# Save the winner.
print('Number of evaluations: {0:d}'.format(pop.total_evaluations))
winner = pop.statistics.best_genome()
with open('ctrnn_winner_genome', 'wb') as f:
    pickle.dump(winner, f)

print(winner)

# Plot the evolution of the best/average fitness.
visualize.plot_stats(pop.statistics, ylog=True, filename="ctrnn_fitness.svg")
# Visualizes speciation
visualize.plot_species(pop.statistics, filename="ctrnn_speciation.svg")
# Visualize the best network.