Example #1
0
def run():
    local_dir = os.path.dirname(__file__)
    pop = population.Population(os.path.join(local_dir, 'nn_config'))
    pe = parallel.ParallelEvaluator(eval_fitness)
    pop.run(pe.evaluate, 1000)

    print('Number of evaluations: {0}'.format(pop.total_evaluations))

    # Display the most fit genome.
    print('\nBest genome:')
    winner = pop.statistics.best_genome()
    print(winner)

    # Verify network output against a few randomly-generated sequences.
    winner_net = nn.create_recurrent_phenotype(winner)
    for n in range(4):
        print('\nRun {0} output:'.format(n))
        seq = [random.choice((0, 1)) for _ in range(N)]
        winner_net.reset()
        for s in seq:
            winner_net.activate([s, 0])

        for s in seq:
            output = winner_net.activate([0, 1])
            print("expected {0:1.5f} got {1:1.5f}".format(s, output[0]))

    # Visualize the winner network and plot/log statistics.
    visualize.draw_net(winner, view=True, filename="nn_winner.gv")
    visualize.draw_net(winner, view=True, filename="nn_winner-enabled.gv", show_disabled=False)
    visualize.draw_net(winner, view=True, filename="nn_winner-enabled-pruned.gv", show_disabled=False, prune_unused=True)
    visualize.plot_stats(pop.statistics)
    visualize.plot_species(pop.statistics)
    statistics.save_stats(pop.statistics)
    statistics.save_species_count(pop.statistics)
    statistics.save_species_fitness(pop.statistics)
Example #2
0
def eval_fitness(g):
    net = nn.create_recurrent_phenotype(g)

    error = 0.0
    for _ in range(num_tests):
        # Create a random sequence, and feed it to the network with the
        # second input set to zero.
        seq = [random.choice((0, 1)) for _ in range(N)]
        net.reset()
        for s in seq:
            inputs = [s, 0]
            net.activate(inputs)

        # Set the second input to one, and get the network output.
        for s in seq:
            inputs = [0, 1]
            output = net.activate(inputs)

            error += (output[0] - s) ** 2

    return -(error / (N * num_tests)) ** 0.5
Example #3
0
def eval_fitness(genomes,
                 fitness_function=None,
                 evaluate_function=None,
                 cleaner=None):

    print('\nStarting evaluation...\n\n')

    tot = len(genomes)

    #evaluate the genotypes one by one
    for i, g in enumerate(genomes):

        print('evaluating', i + 1, '/', tot, '\n')
        net = nn.create_recurrent_phenotype(g)

        #run the simulation to evaluate the model
        results = evaluate_function(net)

        # print('\tDistance = ', distance)
        # print('\tDistance from Leader = ', distFromLeader)
        # print('\tAverage Distance from Leader = ', avgDistFromLeader)
        # print('\tRace Position = ', race_position)
        # print('\tLaps = ', laps)
        # print('\tDuration = ', duration)
        # print('\tDamage = ', damage)
        # print('\tPenalty = ', penalty)
        # print('\tDamage/meter = ', damage / math.fabs(distance) if distance != 0.0 else 0.0)
        # print('\tAvgSpeed = ', avg_speed)

        fitness = fitness_function(results)

        print('\tFITNESS =', fitness, '\n')

        g.fitness = fitness

    print('\n... finished evaluation\n\n')

    if cleaner is not None:
        #at the end of the generation, clean the files we don't need anymore
        cleaner()
Example #4
0
def eval_fitness(genomes, fitness_function=None, evaluate_function=None, cleaner=None, timelimit=None):
    
    print('\nStarting evaluation...\n\n')
    
    tot = len(genomes)
    
    #evaluate the genotypes one by one
    for i, g in enumerate(genomes):
        
        print('evaluating', i+1, '/', tot, '\n')
        net = nn.create_recurrent_phenotype(g)
        
        
        #run the simulation to evaluate the model
        values = evaluate_function(net)
        
        if values is None:
            fitness = -100
        else:
            last_result = []
            later_time = 0
            
            if timelimit is not None:
                for val in values:
                    if val[0] > later_time and val[0] <= timelimit:
                        last_result = val
                        later_time = val[0]
                    elif val[0] > timelimit:
                        break
                
                if last_result[0] < timelimit:
                    last_result[6] *= last_result[0]/timelimit
                    last_result[0] = timelimit
            else:
                last_result = values[-1]

            duration, distance, laps, distance_from_start, damage, penalty, avg_speed = last_result[:7]
            
            if timelimit is not None:
                avg_speed *= duration/timelimit
                duration = timelimit
            
            if fitness_function is None:
                #fitness = distance - 0.08*damage - 200*penalty
                #fitness = avg_speed * duration - 0.08 * damage - 200 * penalty
                fitness = avg_speed * duration - 0.2 * damage - 300 * penalty
                if laps >= 2:
                    fitness += 50.0*avg_speed#distance/(duration+1)
            else:
                fitness = fitness_function(*last_result[:7])
            
            #fitness = distance - 1000.0 * damage/ (math.fabs(distance) if distance != 0.0 else 1.0) - 100 * penalty
            print('\tDistance = ', distance)
            print('\tEstimated Distance = ', avg_speed*duration)
            print('\tDamage = ', damage)
            print('\tPenalty = ', penalty)
            print('\tAvgSpeed = ', avg_speed)
            
        
        print('\tFITNESS =', fitness, '\n')
        
        g.fitness = fitness

    print('\n... finished evaluation\n\n')
    
    if cleaner is not None:
        #at the end of the generation, clean the files we don't need anymore
        cleaner()
Example #5
0
def run(output_dir, neat_config=None, generations=20, port=3001, frequency=None, unstuck=False, evaluation=None, checkpoint=None, configuration=None, timelimit=None):

    if output_dir is None:
        print('Error! No output dir has been set')
        return
    
    if neat_config is None:
        neat_config = os.path.join(output_dir, 'nn_config')
    
    
    if evaluation is None:
        fitness_function = get_fitness_function(os.path.join(output_dir, 'fitness.py'))
    else:
        fitness_function = get_fitness_function(evaluation)
        
    
    results_path, models_path, debug_path, checkpoints_path, EVAL_FUNCTION = simulation.initialize_experiments(output_dir, configuration=configuration, unstuck=unstuck, port=port)
    
    best_model_file = os.path.join(output_dir, 'best.pickle')
    
    if frequency is None:
        frequency = generations
    
    pop = population.Population(neat_config)
    
    if checkpoint is not None:
        print('Loading from ', checkpoint)
        pop.load_checkpoint(checkpoint)
    
    for g in range(1, generations+1):
        
        pop.run(lambda individuals: eval_fitness(individuals,
                                                 fitness_function=fitness_function,
                                                 evaluate_function=lambda g : EVAL_FUNCTION(g, pop.generation > 13),
                                                 cleaner=lambda: simulation.clean_temp_files(results_path, models_path),
                                                 timelimit=timelimit
                                                ),
                1)
        
        if g % frequency == 0:
            print('Saving best net in {}'.format(best_model_file))
            best_genome = get_best_genome(pop)
            pickle.dump(nn.create_recurrent_phenotype(best_genome), open(best_model_file, "wb"))
            
            new_checkpoint = os.path.join(checkpoints_path, 'neat_gen_{}.checkpoint'.format(pop.generation))
            print('Storing to ', new_checkpoint)
            pop.save_checkpoint(new_checkpoint)
            
            print('Plotting statistics')
            visualize.plot_stats(pop.statistics, filename=os.path.join(output_dir, 'avg_fitness.svg'))
            visualize.plot_species(pop.statistics, filename=os.path.join(output_dir, 'speciation.svg'))
            
            print('Save network view')
            visualize.draw_net(best_genome, view=False,
                               filename=os.path.join(output_dir, "nn_winner-enabled-pruned.gv"),
                               show_disabled=False, prune_unused=True)

            visualize.draw_net(best_genome, view=False, filename=os.path.join(output_dir, "nn_winner.gv"))
            visualize.draw_net(best_genome, view=False, filename=os.path.join(output_dir, "nn_winner-enabled.gv"),
                               show_disabled=False)
                
                
    print('Number of evaluations: {0}'.format(pop.total_evaluations))

    print('Saving best net in {}'.format(best_model_file))
    pickle.dump(nn.create_recurrent_phenotype(get_best_genome(pop)), open(best_model_file, "wb"))
    
    # Display the most fit genome.
    #print('\nBest genome:')
    winner = pop.statistics.best_genome()
    #print(winner)

    

    # Visualize the winner network and plot/log statistics.
    visualize.draw_net(winner, view=True, filename=os.path.join(output_dir, "nn_winner.gv"))
    visualize.draw_net(winner, view=True, filename=os.path.join(output_dir, "nn_winner-enabled.gv"), show_disabled=False)
    visualize.draw_net(winner, view=True, filename=os.path.join(output_dir, "nn_winner-enabled-pruned.gv"), show_disabled=False, prune_unused=True)
    visualize.plot_stats(pop.statistics, filename=os.path.join(output_dir, 'avg_fitness.svg'))
    visualize.plot_species(pop.statistics, filename=os.path.join(output_dir, 'speciation.svg'))
    statistics.save_stats(pop.statistics, filename=os.path.join(output_dir, 'fitness_history.csv'))
    statistics.save_species_count(pop.statistics, filename=os.path.join(output_dir, 'speciation.csv'))
    statistics.save_species_fitness(pop.statistics, filename=os.path.join(output_dir, 'species_fitness.csv'))