コード例 #1
0
def evaluate_genome(g):
    net = nn.create_feed_forward_phenotype(g)

    fitnesses = []

    for runs in range(runs_per_net):
        sim = cart_pole.CartPole()

        # Run the given simulation for up to num_steps time steps.
        fitness = 0.0
        for s in range(num_steps):
            inputs = sim.get_scaled_state()
            action = net.serial_activate(inputs)

            # Apply action to the simulated cart-pole
            force = cart_pole.discrete_actuator_force(action)
            sim.step(force)

            # Stop if the network fails to keep the cart within the position or angle limits.
            # The per-run fitness is the number of time steps the network can balance the pole
            # without exceeding these limits.
            if abs(sim.x) >= sim.position_limit or abs(sim.theta) >= sim.angle_limit_radians:
                break

            fitness += 1.0

        fitnesses.append(fitness)

    # The genome's fitness is its worst performance across all runs.
    return min(fitnesses)
コード例 #2
0
def run():
    t0 = time.time()

    # Get the path to the config file, which is assumed to live in
    # the same directory as this script.
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'xor2_config')

    # Use a pool of four workers to evaluate fitness in parallel.
    pe = parallel.ParallelEvaluator(fitness,3)

    pop = population.Population(config_path)
    pop.run(pe.evaluate, 400)

    print("total evolution time {0:.3f} sec".format((time.time() - t0)))
    print("time per generation {0:.3f} sec".format(((time.time() - t0) / pop.generation)))

    print('Number of evaluations: {0:d}'.format(pop.total_evaluations))

    # Verify network output against training data.
    print('\nBest network output:')
    winner = pop.statistics.best_genome()
    net = nn.create_feed_forward_phenotype(winner)
    outputs = net.array_activate(xor_inputs)
    
    print("Expected XOR output : ", xor_outputs)
    print("Generated output : ", outputs)
    
    # Visualize the winner network and plot statistics.
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    visualize.draw_net(winner, view=True)
コード例 #3
0
def run():
    t0 = time.time()

    # Get the path to the config file, which is assumed to live in
    # the same directory as this script.
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'xor2_config')

    # Use a pool of four workers to evaluate fitness in parallel.
    pe = parallel.ParallelEvaluator(fitness, 3)

    pop = population.Population(config_path)
    pop.run(pe.evaluate, 400)

    print("total evolution time {0:.3f} sec".format((time.time() - t0)))
    print("time per generation {0:.3f} sec".format(
        ((time.time() - t0) / pop.generation)))

    print('Number of evaluations: {0:d}'.format(pop.total_evaluations))

    # Verify network output against training data.
    print('\nBest network output:')
    winner = pop.statistics.best_genome()
    net = nn.create_feed_forward_phenotype(winner)
    for i, inputs in enumerate(xor_inputs):
        output = net.serial_activate(inputs)  # serial activation
        print("{0:1.5f} \t {1:1.5f}".format(xor_outputs[i], output[0]))

    # Visualize the winner network and plot statistics.
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    visualize.draw_net(winner, view=True)
コード例 #4
0
ファイル: xor2.py プロジェクト: machinebrains/neat-society
def eval_fitness(genomes):
    for g in genomes:
        net = nn.create_feed_forward_phenotype(g)

        sum_square_error = 0.0
        for inputs, expected in zip(xor_inputs, xor_outputs):
            # Serial activation propagates the inputs through the entire network.
            output = net.serial_activate(inputs)
            sum_square_error += (output[0] - expected) ** 2

        # When the output matches expected for all inputs, fitness will reach
        # its maximum value of 1.0.
        g.fitness = 1 - sum_square_error
コード例 #5
0
def eval_fitness(genomes):
    for g in genomes:
        net = nn.create_feed_forward_phenotype(g)

        sum_square_error = 0.0
        for inputs, expected in zip(xor_inputs, xor_outputs):
            # Serial activation propagates the inputs through the entire network.
            output = net.serial_activate(inputs)
            sum_square_error += (output[0] - expected)**2

        # When the output matches expected for all inputs, fitness will reach
        # its maximum value of 1.0.
        g.fitness = 1 - sum_square_error
コード例 #6
0
def eval_mono_image(genome, width, height):
    net = nn.create_feed_forward_phenotype(genome)
    image = []
    for r in range(height):
        y = -2.0 + 4.0 * r / (height - 1)
        row = []
        for c in range(width):
            x = -2.0 + 4.0 * c / (width - 1)
            output = net.serial_activate([x, y])
            gray = 255 if output[0] > 0.0 else 0
            row.append(gray)
        image.append(row)

    return image
コード例 #7
0
def eval_gray_image(genome, width, height):
    net = nn.create_feed_forward_phenotype(genome)
    image = []
    for r in range(height):
        y = -1.0 + 2.0 * r / (height - 1)
        row = []
        for c in range(width):
            x = -1.0 + 2.0 * c / (width - 1)
            output = net.serial_activate([x, y])
            gray = int(round((output[0] + 1.0) * 255 / 2.0))
            gray = max(0, min(255, gray))
            row.append(gray)
        image.append(row)

    return image
コード例 #8
0
def train_model(features, num_generations):
    timestamp = time.strftime("%Y%m%d-%H%M%S")
    print("########################## Time Stamp ==== " + timestamp)
    t0 = time.time()

    print("## Train a NEAT model")
    timestr = time.strftime("%Y%m%d-%H%M%S")

    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'bnp_config')

    # Use a pool of four workers to evaluate fitness in parallel.
    pe = parallel.ParallelEvaluator(fitness, 3, progress_bar=True, verbose=1)

    pop = population.Population(config_path)
    pop.run(pe.evaluate, num_generations)

    print("total evolution time {0:.3f} sec".format((time.time() - t0)))
    print("time per generation {0:.3f} sec".format(
        ((time.time() - t0) / pop.generation)))
    print('Number of evaluations: {0:d}'.format(pop.total_evaluations))

    # Verify network output against training data.
    print("## Test against verification data.")
    winner = pop.statistics.best_genome()

    net = nn.create_feed_forward_phenotype(winner)
    p_train = net.array_activate(X_train[features].values)
    p_valid = net.array_activate(X_valid[features].values)

    score_train = sklearn.metrics.log_loss(y_train, p_train[:, 0])
    score_valid = sklearn.metrics.log_loss(y_valid, p_valid[:, 0])
    print("Score based on training data set = ", score_train)
    print("Score based on validating data set = ", score_valid)

    # Visualize the winner network and plot statistics.
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    visualize.draw_net(winner, view=True)

    print("## Predicting test data")
    preds = net.array_activate(test[features].values)
    test[test_col_name] = preds
    test[[id_col_name,
          test_col_name]].to_csv("../predictions/pred_" + timestr + ".csv",
                                 index=False)
コード例 #9
0
def fitness(genome):
    """
    This function will be run in parallel by ParallelEvaluator.  It takes one
    argument (a single genome) and should return one float (that genome's fitness).

    Note that this function needs to be in module scope for multiprocessing.Pool
    (which is what ParallelEvaluator uses) to find it.  Because of this, make
    sure you check for __main__ before executing any code (as we do here in the
    last two lines in the file), otherwise you'll have made a fork bomb
    instead of a neuroevolution demo. :)
    """
    net = nn.create_feed_forward_phenotype(genome)

    error = 0.0
    outputs = net.array_activate(xor_inputs)
    sum_square_errors = (xor_outputs - outputs) ** 2
    error_sum = np.sum(sum_square_errors)
    return 1.0 - np.sqrt(error_sum / xor_sample_size)
コード例 #10
0
def fitness(genome):
    """
    This function will be run in parallel by ParallelEvaluator.  It takes one
    argument (a single genome) and should return one float (that genome's fitness).

    Note that this function needs to be in module scope for multiprocessing.Pool
    (which is what ParallelEvaluator uses) to find it.  Because of this, make
    sure you check for __main__ before executing any code (as we do here in the
    last two lines in the file), otherwise you'll have made a fork bomb
    instead of a neuroevolution demo. :)
    """
    net = nn.create_feed_forward_phenotype(genome)

    error = 0.0
    outputs = net.array_activate(xor_inputs)
    sum_square_errors = (xor_outputs - outputs)**2
    error_sum = np.sum(sum_square_errors)
    return 1.0 - np.sqrt(error_sum / xor_sample_size)
コード例 #11
0
def train_model(features,num_generations):
    timestamp = time.strftime("%Y%m%d-%H%M%S")
    print("########################## Time Stamp ==== " + timestamp)
    t0 = time.time()
    
    print("## Train a NEAT model")
    timestr = time.strftime("%Y%m%d-%H%M%S")
    
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'bnp_config')
    
    # Use a pool of four workers to evaluate fitness in parallel.
    pe = parallel.ParallelEvaluator(fitness,3,progress_bar=True,verbose=1)

    pop = population.Population(config_path)
    pop.run(pe.evaluate, num_generations)
    
    print("total evolution time {0:.3f} sec".format((time.time() - t0)))
    print("time per generation {0:.3f} sec".format(((time.time() - t0) / pop.generation)))
    print('Number of evaluations: {0:d}'.format(pop.total_evaluations))
    
    # Verify network output against training data.
    print("## Test against verification data.")
    winner = pop.statistics.best_genome()
    
    net = nn.create_feed_forward_phenotype(winner)
    p_train = net.array_activate(X_train[features].values)
    p_valid = net.array_activate(X_valid[features].values)
    
    score_train = sklearn.metrics.log_loss(y_train, p_train[:,0])
    score_valid = sklearn.metrics.log_loss(y_valid, p_valid[:,0])
    print("Score based on training data set = ", score_train)
    print("Score based on validating data set = ", score_valid)

    
    # Visualize the winner network and plot statistics.
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    visualize.draw_net(winner, view=True)
    
    print("## Predicting test data")
    preds = net.array_activate(test[features].values)
    test[test_col_name] = preds
    test[[id_col_name,test_col_name]].to_csv("../predictions/pred_" + timestr + ".csv", index=False)
コード例 #12
0
def eval_color_image(genome, width, height):
    net = nn.create_feed_forward_phenotype(genome)
    image = []
    for r in range(height):
        y = -1.0 + 2.0 * r / (height - 1)
        row = []
        for c in range(width):
            x = -1.0 + 2.0 * c / (width - 1)
            output = net.serial_activate([x, y])
            red = int(round((output[0] + 1.0) * 255 / 2.0))
            green = int(round((output[1] + 1.0) * 255 / 2.0))
            blue = int(round((output[2] + 1.0) * 255 / 2.0))
            red = max(0, min(255, red))
            green = max(0, min(255, green))
            blue = max(0, min(255, blue))
            row.append((red, green, blue))
        image.append(row)

    return image
コード例 #13
0
def fitness(genome):
    """
    This function will be run in parallel by ParallelEvaluator.  It takes one
    argument (a single genome) and should return one float (that genome's fitness).

    Note that this function needs to be in module scope for multiprocessing.Pool
    (which is what ParallelEvaluator uses) to find it.  Because of this, make
    sure you check for __main__ before executing any code (as we do here in the
    last two lines in the file), otherwise you'll have made a fork bomb
    instead of a neuroevolution demo. :)
    """
    net = nn.create_feed_forward_phenotype(genome)

    sum_square_error = 0.0
    for inputData, outputData in zip(xor_inputs, xor_outputs):
        # serial activation
        output = net.serial_activate(inputData)
        sum_square_error += (output[0] - outputData)**2

    return 1 - math.sqrt(sum_square_error / len(xor_outputs))
コード例 #14
0
def fitness(genome):
    """
    This function will be run in parallel by ParallelEvaluator.  It takes one
    argument (a single genome) and should return one float (that genome's fitness).

    Note that this function needs to be in module scope for multiprocessing.Pool
    (which is what ParallelEvaluator uses) to find it.  Because of this, make
    sure you check for __main__ before executing any code (as we do here in the
    last two lines in the file), otherwise you'll have made a fork bomb
    instead of a neuroevolution demo. :)
    """
    net = nn.create_feed_forward_phenotype(genome)

    sum_square_error = 0.0
    for inputData, outputData in zip(xor_inputs, xor_outputs):
        # serial activation
        output = net.serial_activate(inputData)
        sum_square_error += (output[0] - outputData) ** 2

    return 1 - math.sqrt(sum_square_error / len(xor_outputs))
コード例 #15
0
        # its maximum value of 1.0.
        g.fitness = 1 - sum_square_error


pop = population.Population('xor2_config')
pop.run(eval_fitness, 300)

print('Number of evaluations: {0}'.format(pop.total_evaluations))

# Display the most fit genome.
winner = pop.statistics.best_genome()
print('\nBest genome:\n{!s}'.format(winner))

# Verify network output against training data.
print('\nOutput:')
winner_net = nn.create_feed_forward_phenotype(winner)
for inputs, expected in zip(xor_inputs, xor_outputs):
    output = winner_net.serial_activate(inputs)
    print("expected {0:1.5f} got {1:1.5f}".format(expected, output[0]))

# Visualize the winner network and plot/log statistics.
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
visualize.draw_net(winner, view=True, filename="xor2-all.gv")
visualize.draw_net(winner,
                   view=True,
                   filename="xor2-enabled.gv",
                   show_disabled=False)
visualize.draw_net(winner,
                   view=True,
                   filename="xor2-enabled-pruned.gv",
コード例 #16
0
import pickle

from cart_pole import CartPole, discrete_actuator_force
from movie import make_movie

from neatsociety import nn

# load the winner
with open('nn_winner_genome', 'rb') as f:
    c = pickle.load(f)

print('Loaded genome:')
print(c)

net = nn.create_feed_forward_phenotype(c)
sim = CartPole()

print()
print("Initial conditions:")
print("        x = {0:.4f}".format(sim.x))
print("    x_dot = {0:.4f}".format(sim.dx))
print("    theta = {0:.4f}".format(sim.theta))
print("theta_dot = {0:.4f}".format(sim.dtheta))
print()

# Run the given simulation for up to 100k time steps.
num_balanced = 0
for s in range(10 ** 5):
    inputs = sim.get_scaled_state()
    action = net.serial_activate(inputs)
コード例 #17
0
def fitness(genome):
    net = nn.create_feed_forward_phenotype(genome)
    output = net.array_activate(X_train[features].values)
    logloss_error = sklearn.metrics.log_loss(y_train, output[:, 0])
    return 1.0 - logloss_error
コード例 #18
0
ファイル: xor2.py プロジェクト: machinebrains/neat-society
        # its maximum value of 1.0.
        g.fitness = 1 - sum_square_error


pop = population.Population('xor2_config')
pop.run(eval_fitness, 300)

print('Number of evaluations: {0}'.format(pop.total_evaluations))

# Display the most fit genome.
winner = pop.statistics.best_genome()
print('\nBest genome:\n{!s}'.format(winner))

# Verify network output against training data.
print('\nOutput:')
winner_net = nn.create_feed_forward_phenotype(winner)
for inputs, expected in zip(xor_inputs, xor_outputs):
    output = winner_net.serial_activate(inputs)
    print("expected {0:1.5f} got {1:1.5f}".format(expected, output[0]))

# Visualize the winner network and plot/log statistics.
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
visualize.draw_net(winner, view=True, filename="xor2-all.gv")
visualize.draw_net(winner, view=True, filename="xor2-enabled.gv", show_disabled=False)
visualize.draw_net(winner, view=True, filename="xor2-enabled-pruned.gv", show_disabled=False, prune_unused=True)
statistics.save_stats(pop.statistics)
statistics.save_species_count(pop.statistics)
statistics.save_species_fitness(pop.statistics)

コード例 #19
0
def fitness(genome):
    net = nn.create_feed_forward_phenotype(genome)
    output = net.array_activate(X_train[features].values)
    logloss_error = sklearn.metrics.log_loss(y_train, output[:,0])
    return 1.0 - logloss_error