Example #1
0
def hvolume_evolution(R: dict,
                      individuals_gen: list,
                      a: np.ndarray,
                      phase="test",
                      start=-1) -> np.ndarray:
    """

    :param R: Dict storing ensembles evaluated (key = ensemble id, value = Source.system_evaluator.Results object)
    :param individuals_gen: [ (ids, fit), (ids, fit) ... ]. Each element stores the ids and fitness value of the ensembles present in the generation
    :param a: Array of integers. Indicates which objectives take into account to compute the hypervolume (1) or not (0)
    :param phase: test or val
    :param start: From which iteration of EARN compute the hypervolume
    :return:
    """

    assert phase in ["test", "val"]
    assert start < len(individuals_gen)
    assert 0 < a.shape[0] < 4

    # NN evaluations
    R_models = dict([(k, r) for k, r in R.items()
                     if len(r.val if phase == "val" else r.test) < 3])
    limits = make_limits_dict()
    update_limit_dict(limits, R_models, phase)

    # Reference point
    ref = np.array([1.] * np.count_nonzero(a)
                   )  # Worst solution possible regarding NN pool of 32 models

    # Return evolution of the hypervolume
    hvolume = []
    for gen in individuals_gen[start:]:
        Rgen = [R[g] for g in gen[0]]
        obj = f(Rgen, limits, phase)
        obj = obj[:, a > 0]
        hvolume.append(compute_hvolume(obj, ref))

    return np.array(hvolume)
Example #2
0
    def __str__(self):
        self.i += 1
        return "[[%s]]" % ("#" * (self.i % self.steps) + " " *
                           (self.steps - self.i % self.steps))


if __name__ == "__main__":

    args = argument_parse(sys.argv[1:])
    os.environ['TMP'] = 'Definitions/Classifiers/tmp/' + args.dataset[12:-15]

    # Load 32 DNNs
    S_initial = []
    S_eval_dict = {}
    limits = make_limits_dict()

    classifier_path = os.path.join(os.environ['FCM'], 'Definitions',
                                   'Classifiers', args.dataset)
    classifier_files = [f for f in os.listdir(classifier_path) if ".pkl" in f]
    for c_id in classifier_files:
        sys = sb.SystemBuilder(verbose=False)
        c_file = os.path.join(classifier_path, c_id)
        sys.add_classifier(mutil.make_classifier(c_id, c_file))
        sys.set_start(c_id)
        S_initial.append(sys)
        S_eval_dict[c_id] = evaluate(sys, sys.get_start(), phases=["val"])
    update_limit_dict(limits, S_eval_dict, phase="val")

    # Initialize Q-Learning table
    Qtable = {}
Example #3
0
    if not os.path.exists(os.path.join(os.environ['FCM'], os.environ['TMP'])):
        os.makedirs(os.path.join(os.environ['FCM'], os.environ['TMP']))
    random.seed()

    # Initial population
    P = generate_initial_population()
    R = evaluate_population(P)
    P_all = []
    individuals_fitness_per_generation = []

    # Evaluation Results Dictionary
    R_dict = {}
    R_dict_old = {}
    R_dict_models = dict(zip([p.get_sysid() for p in P], R))
    R_dict_old.update(R_dict_models)
    limits = fit_fun.make_limits_dict()
    fit_fun.update_limit_dict(limits, R_dict_models, phase="val")

    fit = fit_fun.f2_time_param_penalization(R, args.a, limits)

    # Start the loop over generations
    iteration = 0
    p_update = args.pm / (args.iterations + 10)
    while iteration < args.iterations:
        """
        # Dynamic decreasing high mutation ratio (DHM)
        args.pm -= p_update
        args.pc += p_update
        """

        start = time.time()
Example #4
0
def multinode_earn(comm):
    r = comm.Get_rank()
    s = comm.Get_size()

    P = P_fit = R_dict = limit = None
    individuals_fitness_per_generation = []

    if r == 0:
        P = main.generate_initial_population()
        R = main.evaluate_population(P, phases=["val", "test"])
        R_dict = dict(zip([p.get_sysid() for p in P], R))
        limit = fit_fun.make_limits_dict()
        fit_fun.update_limit_dict(limit, R_dict, phase="val")
        print(str(limit))
        P_fit = fit_fun.f2_time_param_penalization(R,
                                                   main.args.a,
                                                   limit,
                                                   phase="val")
        R_dict = {}  # Evaluation results

        for i, p in enumerate(P):
            R_dict[p.get_sysid()] = R[i]

    for epoch in range(main.args.iterations):

        # 1) Send population and fitness to all nodes
        if r == 0:
            start = time.time()
            manager_data = {
                'individuals': P,
                'fitness': P_fit,
                'O': main.args.offspring // s,
                'limits': limit
            }
        else:
            manager_data = None
        manager_data = comm.bcast(manager_data, root=0)

        # 2) Nodes receive data
        P = manager_data['individuals']
        P_fit = manager_data['fitness']
        O = manager_data['O']
        limit = manager_data['limits']

        # 3) Every Node: Generate and evaluate offspring and fitness
        P_offspring_worker = main.generate_offspring(P, P_fit, O)
        R_offspring_worker = main.evaluate_population(P_offspring_worker,
                                                      phases=["val", "test"])
        fit_offspring_worker = fit_fun.f2_time_param_penalization(
            R_offspring_worker, main.args.a, limit, phase="val")
        worker_data = {
            'offspring': P_offspring_worker,
            'fitness': fit_offspring_worker,
            'R': R_offspring_worker
        }

        # 4) Send work back to manager node (rank=0)
        workers_data = comm.gather(worker_data, root=0)

        # 5) Manager Node: Receive work and select best individuals for next generations
        if r == 0:
            P_offspring = []
            R_offspring = []
            fit_offspring = []
            for work in workers_data:
                P_offspring = P_offspring + work['offspring']
                R_offspring = R_offspring + work['R']
                fit_offspring = fit_offspring + work['fitness']

            P_generation = P + P_offspring
            R_generation = R + R_offspring
            fit_generation = P_fit + fit_offspring

            if main.args.selection == "nfit":
                best = selection.most_fit_selection(fit_generation,
                                                    main.args.population)
            else:
                best = selection.roulette_selection(fit_generation,
                                                    main.args.population)

            P = [P_generation[i] for i in best]
            R = [R_generation[i] for i in best]
            P_fit = [fit_generation[i] for i in best]

            # Save generation individuals
            for i, p in enumerate(P):
                R_dict[p.get_sysid()] = R[i]

            # Save which individuals alive every iteration
            ids = [p.get_sysid() for p in P]
            individuals_fitness_per_generation += [(ids, P_fit)]

            print("Iteration %d" % epoch)
            print("TIME: Seconds per generation: %f " % (time.time() - start))

    if r == 0:
        save_results(R_dict, individuals_fitness_per_generation)