Beispiel #1
0
def try_finish(population: [Module], config: Configuration, moo_ops: callable) -> [Module]:
    print(f"--> Possible final solution discovered. Checking...")

    # Changing settings of training steps:
    original_training_settings = copy.deepcopy(config.training)
    config.training.use_restart = False
    config.training.fixed_epochs = True
    config.training.epochs = 100

    # Finding the best networks:
    best = population[:config.compute_capacity(maximum=False)]

    # Performing training step:
    best = workers.start(best, config)

    # Reset settings and return:
    config.training = original_training_settings

    best.sort(key=weighted_overfit_score(config), reverse=True)
    if any(ind.test_acc() >= config.training.acceptable_scores for ind in best):
        generation_finished(best, config, "--> Found final solution:")
        config.results.store_generation(best, config.generation + 1)
        return best, True
    else:
        # A final solution was not found... Keep the best individs:
        population = best + population
        population = nsga_ii(
            population,
            moo_ops.classification_objectives(config),
            moo_ops.classification_domination_operator(
                moo_ops.classification_objectives(config)
            ),
            config
        )
        keep = len(population) - config.population_size
        population, removed = population[keep:], population[:keep]
        generation_finished(population, config, "--> Leaderboards after final solution try failed:")
        generation_finished(removed, config, "--> Removed after final solution try failed:")
        return population, False
Beispiel #2
0
def main(config: Configuration):
    # 0.1 How many nets can be trained for each generation?
    solved = False
    compute_capacity = config.compute_capacity()

    # 0.2 Initializing multi objective optimisation sorting:
    moo_objectives = moo.classification_objectives(config)
    domination_operator = moo.classification_domination_operator(
        moo.classification_objectives(config))

    patterns, nets = initialize_population(config, compute_capacity)

    i = 0
    while not solved:

        # 3. Evolve for <x> generations:
        for generation in range(config.generations * i,
                                config.generations * (i + 1)):
            config.generation = generation

            # 3.1 Select some patterns for mutation. Tournament
            selected = selection.tournament(patterns,
                                            size=int(len(patterns) / 2))

            # 3.2 Perform Mutations + Crossover on selected patterns
            mutations, crossovers = selection.divide(selected)
            patterns = patterns + \
                       mutator.apply(mutations) + \
                       crossover.apply(crossovers)

            # 3.3 Evaluate new patterns. Fitness calculation
            nets = recombine.combine(patterns,
                                     compute_capacity,
                                     config.min_size,
                                     config.max_size,
                                     include_optimal=True)
            nets = workers.start(nets, config)
            patterns = evaluation.inherit_results(patterns, nets)

            # 3.4 Rank all patterns using MOO. Diversity in position, 2D vs 1D, scores ++
            patterns = nsga_ii(patterns, moo_objectives, domination_operator,
                               config)

            # 3.5 Evolution of the fittest. Elitism
            patterns = patterns[-config.population_size:]

            # 3.6 Feedback:
            print(f"--> Generation {generation} Leaderboards")
            generation_finished(patterns, config, "    - Patterns:")
            generation_finished(nets, config, "    - Neural networks:")
            config.results.store_generation(patterns, generation)
            config.results.store_generation(nets, generation)

        # To finish up, the best combination of patterns needs to be returned and trained for
        # much longer than what they are during fitness evaluation. The previous steps should only
        # be used for verifying that the combination of layers is good.
        #
        # This might need to be tried multiple times. When a good result is gotten, the algorithm should
        # stop and return the final structure with trained weights.

        print("Testing best combination of patterns")

        # Changing settings of training steps:
        original_training_settings = copy.deepcopy(config.training)
        config.training.use_restart = False
        config.training.fixed_epochs = True
        config.training.epochs = 300

        # Finding the best combined network:@
        config.type = "ea-nas"
        nets.sort(key=weighted_overfit_score(config), reverse=True)
        config.type = "PatternNets"
        best_net = nets[-1]

        # Performing training step:
        best_net = workers.start([best_net], config)[0]

        # Reset settings and return:
        config.training = original_training_settings

        if best_net.test_acc() >= config.training.acceptable_scores:
            print("Found good network! ")
            solved = True

        config.type = "ea-nas"
        generation_finished([best_net], config, "--> Found final solution:")
        config.type = "PatternNets"
        patterns = evaluation.inherit_results(patterns, nets)
        i += 1