Exemplo n.º 1
0
def stat_basing_on_pop(pop, record_valid_only, optimal_in_theory=None):
    """
    return some statstics basing on the populations
    :param pop:
    :param optimal_in_theory:
    :param record_valid_only:
    :return:
        * hyper_volume
        * spread
        * IGD
        * frontier_size
        * valid_rate
    """
    vpop = filter(lambda p: p.fitness.correct, pop)
    if len(pop) == 0:
        return 0, 1, 1, 0, 0

    if record_valid_only and len(vpop) == 0:
        return 0, 1, 1, 0, 0

    front = _get_frontier(vpop) if record_valid_only else _get_frontier(pop)

    front_objs = [f.fitness.values for f in front]
    reference_point = [1] * len(front_objs[0])
    hv = HyperVolume(reference_point).compute(front_objs)  # did NOT use deap module calc
    sort_front_by_obj0 = sorted(front, key=lambda f: f.fitness.values[1], reverse=True)

    first, last = sort_front_by_obj0[0], sort_front_by_obj0[-1]
    spread = diversity(front, first, last)
    if optimal_in_theory is None:  # not available!!
        IGD = -1
    else:
        IGD = convergence(front, optimal_in_theory)
    frontier_size = len(front)
    valid_rate = len(vpop) / len(pop)

    return round(hv, 3), round(spread, 3), round(IGD, 3), frontier_size, valid_rate
Exemplo n.º 2
0
        pop = toolbox.select(pop + offspring, MU)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    return pop, logbook

if __name__ == "__main__":
    with open("pareto_front/zdt1_front.json") as optimal_front_data:
        optimal_front = json.load(optimal_front_data)
    # Use 500 of the 1000 points in the json file
    optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))

    pop, stats = main()
    pop.sort(key=lambda x: x.fitness.values)

    print(stats)
    print("Convergence: ", convergence(pop, optimal_front))
    print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))

    import matplotlib.pyplot as plt
    import numpy

    front = numpy.array([ind.fitness.values for ind in pop])
    optimal_front = numpy.array(optimal_front)
    plt.scatter(optimal_front[:,0], optimal_front[:,1], c="r")
    plt.scatter(front[:,0], front[:,1], c="b")
    plt.axis("tight")
    plt.show()

Exemplo n.º 3
0
popfinal.sort(key=lambda x: x.fitness.values)
popfinal

# C:\Users\langzx\Desktop\deapexample\deap-master\examples\ga\pareto_front

from pathlib import Path
data_folder = Path(
    "C:/Users/langzx/Desktop/deapexample/deap-master/examples/ga/pareto_front")
optimal_front_data = open(data_folder / "zdt1_front.json")
optimal_front = json.load(optimal_front_data)
optimal_front
optimal_front = sorted(optimal_front[i]
                       for i in range(0, len(optimal_front), 2))
pop, stats = main()
pop.sort(key=lambda x: x.fitness.values)
convergence(pop, optimal_front)
print(stats)
print("Convergence: ", convergence(pop, optimal_front))
print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))
front = np.array([ind.fitness.values for ind in pop])
front
optimal_front = np.array(optimal_front)
plt.scatter(optimal_front[:, 0], optimal_front[:, 1], c="r")
plt.scatter(front[:, 0], front[:, 1], c="b")
plt.axis("tight")
plt.show()
# print(stats)
# print("Convergence: ", convergence(pop, optimal_front))
# print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))

# import matplotlib.pyplot as plt
Exemplo n.º 4
0
def runNSGA2(seed, function, nReps, NEval, NPop, refPoint):

    print 'Running NSGA-II\n'

    with open(''.join(['../dev/pareto_front/zdt', function[3],
                       '_front.json'])) as optimal_front_data:
        optimal_front = json.load(optimal_front_data)
    # Use 500 of the 1000 points in the json file
    # optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))

    hvValues = []
    conv = []
    diver = []
    fronts = []

    NGEN = NEval / NPop

    random.seed(seed)

    for nexec in xrange(nReps):

        print 'Starting execution %d ...' % (nexec + 1)

        start = time.time()

        pop, stats, hv = main(function, NGEN, NPop, refPoint)
        hvValues.append(hv)
        #pop.sort(key=lambda x: x.fitness.values)

        # print(stats)
        conv.append(convergence(pop, optimal_front))
        #diver.append(diversity(pop, optimal_front[0], optimal_front[-1]))
        print 'Convergence metric = ', conv[nexec]
        #print("Diversity: ", diver[nexec])

        front = numpy.array([ind.fitness.values for ind in pop])
        fronts.append(front.tolist())
        #optimal_front = numpy.array(optimal_front)
        #plt.scatter(optimal_front[:,0], optimal_front[:,1], c="r")
        #plt.scatter(front[:,0], front[:,1], c="b")
        #plt.axis("tight")

        end = time.time()
        print 'Execution %d  completed in  %f seconds\n' % (nexec + 1,
                                                            end - start)

##        if(nReps == 1):
##            plt.show()
##        else:
##            plt.savefig(''.join(['results/figures/NSGA2_',function,'_exec',str(nexec),'.png']), bbox_inches='tight')

    finalHV = [x[-1] for x in hvValues]
    print 'Average hypervolume=', sum(finalHV) / nReps
    print 'Best hypervolume=', max(finalHV)

    with open(''.join(['../dev/files/Pop_', function, '_NSGA2.json']),
              'w') as outfile:
        json.dump(fronts, outfile)

    with open(''.join(['../dev/files/HV_', function, '_NSGA2.json']),
              'w') as outfile:
        json.dump(hvValues, outfile)

    with open(''.join(['../dev/files/conv_', function, '_NSGA2.json']),
              'w') as outfile:
        json.dump(conv, outfile)

##    with open(''.join(['../dev/files/diver_',function,'_NSGA2.json']),'w') as outfile:
##        json.dump(diver,outfile)

    print '\nNSGA-II finished all experiments\n'
    #print "best cover front      : ", ga.Hypervolume(best_pareto_cover, refpoint)
    print "average hypervolume: ", sum_hypervolume /NTEST
    #print "average best cover front       : ", sumHYPER_bestcover/NTEST

    print "\n"
    aux=ga.Cover2sets(optimalpareto, best_pareto_hypervolume)
    print "optimal coverage: ", aux

    print "\n"
    #rebuild Fronts
    optimal=[]
    best_pareto_hypervolume.sort(key=lambda x: x.fitness.values)
    #optimalpareto.sort(key=lambda x: x.fitness.values)
    for i in optimalpareto:
        optimal.append([i.fitness.values[0],i.fitness.values[1]])
    print("Convergence: ", convergence(best_pareto_hypervolume, optimal))
    print("Diversity: ", diversity(best_pareto_hypervolume, optimal[0], optimal[-1]))
    #converge and divers

    #print "best hypervolume front: ", aux[1]
    #print "best cover front      : ", aux[2]
    # print "average best hypervolume front : ", sumCOVER_besthypervolume/NTEST
    # print "average best cover front       : ", sumCOVER_bestcover/NTEST
    print "\n*************\n\n\n"
    print(lst_hyper)

    #save solution to file
    #my_world.WriteFileSolution(optimalpareto, 0) #0=Pareto; 1=Best Hypercube; 2=Best Cover
    #solution=my_world.GetFileSolution(0) #0=Pareto; 1=Best Hypercube; 2=Best Cover

    #my_world.WriteFileSolution(best_pareto_hypervolume, 1) #0=Pareto; 1=Best Hypercube; 2=Best Cover
Exemplo n.º 6
0
    def test010_(self):
        print("**** TEST {} ****".format(whoami()))
        NDIM = 30
        BOUND_LOW, BOUND_UP = 0.0, 1.0
        BOUND_LOW_STR, BOUND_UP_STR = '0.0', '1.0' 
        RES_STR = '0.01'
        NGEN = 250
        POPSIZE = 40
        MU = 100
        CXPB = 0.9
        
        # Create variables
        var_names = [str(num) for num in range(NDIM)]
        myLogger.setLevel("CRITICAL")
        basis_set = [Variable.from_range(name, BOUND_LOW_STR, RES_STR, BOUND_UP_STR) for name in var_names]
        myLogger.setLevel("DEBUG")
        
        # Create DSpace
        thisDspace = DesignSpace(basis_set)
        
        # Create OSpace
        objective_names = ('obj1','obj3')
        objective_goals = ('Max', 'Min')
        this_obj_space = ObjectiveSpace(objective_names, objective_goals)
        mapping = Mapping(thisDspace, this_obj_space)
                
        # Statistics and logging
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean, axis=0)
        stats.register("std", np.std, axis=0)
        stats.register("min", np.min, axis=0)
        stats.register("max", np.max, axis=0)        
        logbook = tools.Logbook()
        logbook.header = "gen", "evals", "std", "min", "avg", "max"
        
        creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
        
        toolbox = base.Toolbox()
        
        #--- Eval
        toolbox.register("evaluate", benchmarks.mj_zdt1_decimal)
        
        #--- Operators
        toolbox.register("mate", tools.cxSimulatedBinaryBounded, 
                         low=BOUND_LOW, up=BOUND_UP, eta=20.0)
        toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, 
                         eta=20.0, indpb=1.0/NDIM)
        toolbox.register("select", tools.selNSGA2)
        
        # Create the population
        mapping.assign_individual(Individual2)
        mapping.assign_fitness(creator.FitnessMin)
        pop = mapping.get_random_population(POPSIZE)
        
        # Evaluate first pop        
        invalid_ind = [ind for ind in pop if not ind.fitness.valid]
        toolbox.map(toolbox.evaluate, invalid_ind)
        logging.debug("Evaluated {} individuals".format(len(invalid_ind)))
        
        # Check that they are evaluated
        invalid_ind = [ind for ind in pop if not ind.fitness.valid]
        assert not invalid_ind
        
        pop = toolbox.select(pop, len(pop))
        logging.debug("Crowding distance applied to initial population of {}".format(len(pop)))
        
        myLogger.setLevel("CRITICAL")
        for gen in range(1, NGEN):
            # Vary the population
            offspring = tools.selTournamentDCD(pop, len(pop))
            offspring = [toolbox.clone(ind) for ind in offspring]
            logging.debug("Selected and cloned {} offspring".format(len(offspring)))
            
            #print([ind.__hash__() for ind in offspring])
            #for ind in offspring:
            #    print()
            pairs = zip(offspring[::2], offspring[1::2])
            for ind1, ind2 in pairs:
                if random.random() <= CXPB:
                    toolbox.mate(ind1, ind2)
                    
                toolbox.mutate(ind1)
                toolbox.mutate(ind2)
                del ind1.fitness.values, ind2.fitness.values
            logging.debug("Operated over {} pairs".format(len(pairs)))

            # Evaluate the individuals with an invalid fitness
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            processed_ind = toolbox.map(toolbox.evaluate, invalid_ind)
            logging.debug("Evaluated {} individuals".format(len(processed_ind)))
            
            #raise
            #for ind, fit in zip(invalid_ind, fitnesses):
            #    ind.fitness.values = fit
        
            # Select the next generation population
            pop = toolbox.select(pop + offspring, MU)
            record = stats.compile(pop)
            logbook.record(gen=gen, evals=len(invalid_ind), **record)
            print(logbook.stream)
        
        ###
        with open(r"C:\Users\jon\git\deap1\examples\ga\pareto_front\zdt1_front.json") as optimal_front_data:
            optimal_front = json.load(optimal_front_data)
        # Use 500 of the 1000 points in the json file
        optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))
                
        pop.sort(key=lambda x: x.fitness.values)
        print(stats)
        print("Convergence: ", convergence(pop, optimal_front))
        print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))

        
        front = np.array([ind.fitness.values for ind in pop])
        optimal_front = np.array(optimal_front)
        plt.scatter(optimal_front[:,0], optimal_front[:,1], c="r")
        print(front)
        plt.scatter(front[:,0], front[:,1], c="b")
        plt.axis("tight")
        #plt.savefig('C:\ExportDir\test1.png')
        plt.savefig('C:\\ExportDir\\out.pdf', transparent=True, bbox_inches='tight', pad_inches=0)
        #plt.show()
        
        
Exemplo n.º 7
0
def main(config, verbose):
    # Don't bother with determinism since tournament is stochastic!

    # Set last time to start time
    last_time = start_time

    # MP
    processes = multiprocessing.cpu_count() // 2
    pool = multiprocessing.Pool(processes=processes)

    # Build network
    network = build_network_partial(config)

    # Build environments and randomize
    envs = [build_environment(config) for _ in config["env"]["h0"]]
    for env in envs:
        randomize_env(env, config)

    # Objectives
    # Time to land, final height, final velocity, spikes per second
    valid_objectives = [
        "time to land",
        "time to land scaled",
        "final height",
        "final velocity",
        "final velocity squared",
        "spikes",
    ]
    assert (
        len(config["evo"]["objectives"]) >= 3
    ), "Only 3 or more objectives are supported"
    assert len(config["evo"]["objectives"]) == len(
        config["evo"]["obj weights"]
    ), "There should be as many weights as objectives"
    assert all(
        [obj in valid_objectives for obj in config["evo"]["objectives"]]
    ), "Invalid objective"

    # Optimal front and reference point for hypervolume
    optimal_front = config["evo"]["obj optimal"]
    hyperref = config["evo"]["obj worst"]
    optim_performance = []

    # Set up DEAP
    creator.create("Fitness", base.Fitness, weights=config["evo"]["obj weights"])
    creator.create("Individual", list, fitness=creator.Fitness)

    toolbox = base.Toolbox()
    toolbox.register(
        "individual", tools.initRepeat, container=creator.Individual, func=network, n=1
    )
    toolbox.register(
        "population", tools.initRepeat, container=list, func=toolbox.individual
    )
    toolbox.register(
        "evaluate",
        partial(evaluate, valid_objectives, config, envs, config["env"]["h0"]),
    )
    toolbox.register("mate", crossover_none)
    toolbox.register(
        "mutate",
        partial(
            mutate_call_network,
            config["evo"]["genes"],
            config["evo"]["types"],
            mutation_rate=config["evo"]["mutation rate"],
        ),
    )
    toolbox.register("select", tools.selNSGA2)
    toolbox.register("map", pool.map)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("median", np.median, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = ("gen", "evals", "avg", "median", "std", "min", "max")

    # Initialize population
    # Pareto front: set of individuals that are not strictly dominated
    # (i.e., better scores for all objectives) by others
    population = toolbox.population(n=config["evo"]["pop size"])
    hof = tools.ParetoFront()  # hall of fame!

    # Evaluate initial population
    fitnesses = toolbox.map(toolbox.evaluate, population)
    for ind, fit in zip(population, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance (needed for selTournamentDCD())
    # to the individuals, no actual selection is done
    population = toolbox.select(population, len(population))

    # Update hall of fame
    hof.update(population)

    # Log first record
    record = stats.compile(population)
    logbook.record(
        gen=0, evals=len(population), **{k: v.round(2) for k, v in record.items()}
    )

    # Log convergence (of first front) and hypervolume
    pareto_fronts = tools.sortNondominated(population, len(population))
    current_time = time.time()
    minutes = (current_time - last_time) / 60
    last_time = time.time()
    time_past = (current_time - start_time) / 60
    conv = convergence(pareto_fronts[0], optimal_front)
    hyper = hypervolume(pareto_fronts[0], hyperref)
    optim_performance.append([0, time_past, minutes, conv, hyper])
    print(
        f"gen: 0, time past: {time_past:.2f} min, minutes: {minutes:.2f} min, convergence: {conv:.3f}, hypervolume: {hyper:.3f}"
    )

    if verbose:
        # Plot relevant part of population fitness
        last_fig = []
        for dims in config["evo"]["plot"]:
            last_fig.append(
                vis_relevant(
                    population, hof, config["evo"]["objectives"], dims, verbose=verbose
                )
            )

        # Create folders for parameters of individuals
        # Only save hall of fame
        os.makedirs(f"{config['log location']}hof_000/")

        # And log the initial performance
        # Figures
        for i, last in enumerate(last_fig):
            if last[2]:
                last[0].savefig(f"{config['fig location']}relevant{i}_000.png")
        # Parameters
        for i, ind in enumerate(hof):
            torch.save(
                ind[0].state_dict(),
                f"{config['log location']}hof_000/individual_{i:03}.net",
            )
        # Fitnesses
        pd.DataFrame(
            [ind.fitness.values for ind in hof], columns=config["evo"]["objectives"]
        ).to_csv(f"{config['log location']}hof_000/fitnesses.csv", index=False, sep=",")

    # Begin the evolution!
    for gen in range(1, config["evo"]["gens"]):
        # Randomize environments (in-place) for this generation
        # Each individual in a generation experiences the same environments,
        # but re-seeding per individual is not done to prevent identically-performing
        # agents (and thus thousands of HOFs, due to stepping nature of SNNs)
        for env in envs:
            randomize_env(env, config)

        # Selection: Pareto front + best of the rest
        pareto_fronts = tools.sortNondominated(population, len(population))
        selection = pareto_fronts[0]
        others = list(chain(*pareto_fronts[1:]))
        # We need a multiple of 4 for selTournamentDCD()
        if len(others) % 4:
            others.extend(random.sample(selection, 4 - (len(others) % 4)))
        selection.extend(tools.selTournamentDCD(others, len(others)))

        # Get offspring: mutate selection
        # TODO: maybe add crossover? Which is usually done binary,
        #  so maybe not that useful..
        offspring = [
            toolbox.mutate(toolbox.clone(ind)) for ind in selection[: len(population)]
        ]

        # Re-evaluate last generation/population, because their conditions are random
        # and we want to test each individual against as many as possible
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        # And evaluate the entire new offspring, for the same reason
        fitnesses = toolbox.map(toolbox.evaluate, offspring)
        for ind, fit in zip(offspring, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the offspring,
        # so we get the best of population + offspring in there
        # Also include population, because we re-evaluated it
        hof.update(population + offspring)

        # Select the population for the next generation
        # from the last generation and its offspring
        population = toolbox.select(population + offspring, config["evo"]["pop size"])

        # Log stuff, but don't print!
        record = stats.compile(population)
        logbook.record(
            gen=gen,
            evals=len(offspring) + len(population),
            **{k: v.round(2) for k, v in record.items()},
        )

        # Log convergence (of first front) and hypervolume
        pareto_fronts = tools.sortNondominated(population, len(population))
        current_time = time.time()
        minutes = (current_time - last_time) / 60
        last_time = time.time()
        time_past = (current_time - start_time) / 60
        conv = convergence(pareto_fronts[0], optimal_front)
        hyper = hypervolume(pareto_fronts[0], hyperref)
        optim_performance.append([gen, time_past, minutes, conv, hyper])
        print(
            f"gen: {gen}, time past: {time_past:.2f} min, minutes: {minutes:.2f} min, convergence: {conv:.3f}, hypervolume: {hyper:.3f}"
        )

        if verbose:
            # Plot relevant part of population fitness
            for i, last, dims in zip(
                range(len(last_fig)), last_fig, config["evo"]["plot"]
            ):
                last_fig[i] = vis_relevant(
                    population,
                    hof,
                    config["evo"]["objectives"],
                    dims,
                    last=last,
                    verbose=verbose,
                )

            # Log every so many generations
            if not gen % config["log interval"] or gen == config["evo"]["gens"] - 1:
                # Create directory
                if not os.path.exists(f"{config['log location']}hof_{gen:03}/"):
                    os.makedirs(f"{config['log location']}hof_{gen:03}/")

                # Save population figure
                for i, last in enumerate(last_fig):
                    if last[2]:
                        last[0].savefig(
                            f"{config['fig location']}relevant{i}_{gen:03}.png"
                        )

                # Save parameters of hall of fame individuals
                for i, ind in enumerate(hof):
                    torch.save(
                        ind[0].state_dict(),
                        f"{config['log location']}hof_{gen:03}/individual_{i:03}.net",
                    )

                # Save fitnesses
                pd.DataFrame(
                    [ind.fitness.values for ind in hof],
                    columns=config["evo"]["objectives"],
                ).to_csv(
                    f"{config['log location']}hof_{gen:03}/fitnesses.csv",
                    index=False,
                    sep=",",
                )

                # Save logbook
                pd.DataFrame(logbook).to_csv(
                    f"{config['log location']}logbook.csv", index=False, sep=","
                )

                # Save optimization performance
                pd.DataFrame(
                    optim_performance,
                    columns=[
                        "gen",
                        "time past",
                        "minutes",
                        "convergence",
                        "hypervolume",
                    ],
                ).to_csv(
                    f"{config['log location']}optim_performance.csv",
                    index=False,
                    sep=",",
                )

    # Close multiprocessing pool
    pool.close()