def eval_individual(self, individual): """ Function for evaluating an individual. :param individual: DEAP individual :return: Fitness value. """ desc, hypers = individual.descriptor model = MNM(desc, hypers["btch_sz"], self.train_inputs, self.train_outputs, loss_func_weights={ "o0": hypers["wo0"], "o1": hypers["wo1"], "o2": hypers["wo2"] }) model.epoch_train(hypers["btch_sz"], 400, 0) a = model.predict(self.test_inputs, [], new=True)[0] m2e = np.mean(self.evaluation["o0"](a["o0"], self.test_outputs["o0"])) acc = np.mean(self.evaluation["o1"](a["o1"][:, 0], np.argmax(self.test_outputs["o1"], axis=1))) i_d = -np.mean(self.evaluation["o2"](a["o2"])) tf.reset_default_graph() del model return acc, m2e, i_d
def eval_valp(individual): """ Given a descriptor, this function creates a VALP and evaluates it. Used just for development :param individual: VALP descriptor :return: -- """ model = MNM(individual.model_descriptor, batch_size, data_inputs, data_outputs, loss_weights) loss = model.epoch_train(batch_size, 40000, 5) a, = model.predict({"i0": x_test}, [], new=True)
def test_clone_morphism(d_m): d_m, _, _ = network_clone_morphism(d_m, "n1") model_m = MNM(d_m, 150, data_inputs["Train"], data_outputs["Train"], loss_weights, init=False) model_m.load_weights("1") a_m = model_m.predict({"i0": x_test}, new=True)[0] acc_m = accuracy_score(a_m["o1"], np.argmax(c_test, axis=1)) mse_m = mean_squared_error(a_m["o0"], y_test) print(acc_m, mse_m)
def reload(path="", seed=0): """ This function reloads an already trained and saved VALP (according to the specified first seed). The relevance of the different networks in the VALP is measured too. :return: The list of nets present in the VALP, values associated to their importance in the model (the lower value the more important), the ranked version of these values, the model descriptor, and the hyperparameters. """ name = str(seed) hypers = np.load(path + "hypers" + str(seed) + ".npy", allow_pickle=True).item() assert isinstance(hypers, dict) np.random.seed(seed) tf.random.set_random_seed(seed) random.seed(seed) orig_res = np.load(path + "orig_results" + str(seed) + ".npy") orig_res[2] = 50 + orig_res[2] desc = MNMDescriptor(10, inp_dict, outp_dict, name=name) desc.load(path + "model_" + str(seed) + ".txt") model = MNM(desc, hypers["btch_sz"], data_inputs["Train"], data_outputs["Train"], loss_func_weights={ "o0": hypers["wo0"], "o1": hypers["wo1"], "o2": hypers["wo2"] }, name=name, load=False, init=False, random_seed=seed) model.initialize(load=True, load_path=path) nets, probs, ranks = network_relevance( model, orig_res ) # List of network names, their associated probabilities (to be mutated) and their rankings. del model return nets, probs, ranks, desc, hypers
def modify(nets, probs, ranks, desc, hypers, seed=0, seed2=0): """ Main function for applying a modification to a VALP. It also evaluates the VALP and saves the results :param nets: List of nets in a VALP :param probs: Probability of modifying the previous networks :param ranks: Rankings of the probability values :param desc: VALP descroptor :param hypers: VALP hyperparameters :return: -- """ name = str(seed) np.random.seed(seed2) tf.random.set_random_seed(seed2) random.seed(seed2) if not rnd: # If randomness is not applied print(ranks.sum(axis=1)) if (ranks.sum(axis=1) == 0).any( ): # If there are any network in the bottom three in importance in all objectives probs = ( ranks.sum(axis=1) == 0 ) * probs # Only accept a network as modifiable if they rank between 3 least important networks in all three objectives probs = probs / np.sum( probs ) # Update probabilities once the networks more important than bottom three have been taken away trainables, res, mutation, comp, reaching_outs = reducing_mutations( nets, probs, desc) else: trainables, res, mutation, comp, reaching_outs = increasing_mutations( nets, probs, desc) else: # Random application comp = np.random.choice(nets) _, in_conns, out_conns, _ = desc.get_net_context(comp) conns = in_conns + out_conns # Checka si esto da error reaching_outs = list(set([x for x in desc.reachable[comp] if "o" in x ])) # Outputs affected by the mutation mutations = [con for con in conns if is_deletable(desc, con)] mutations += ["add_con", "divide_con", "reinit"] if is_bypassable(desc, comp): mutations += ["bypass"] mutation = np.random.choice(mutations) res, trainables = mutate(mutation, desc, comp, conns) print(mutation) model = MNM(desc, hypers["btch_sz"], data_inputs["Train"], data_outputs["Train"], loss_func_weights={ "o0": hypers["wo0"], "o1": hypers["wo1"], "o2": hypers["wo2"] }, name=name, load=None, init=False, random_seed=seed2, lr=0.001) model.initialize(load=True, load_path="", variables=trainables) model.convergence_train(hypers["btch_sz"], iter_lim // 100, conv_param, proportion, iter_lim // 20, display_step=-1) results = evaluate_model(model) del model if rnd == 1: n = "resultsrandom" else: n = "results" np.save(n + str(seed) + "_" + str(seed2) + ".npy", np.concatenate((results, [res, mutation, comp], reaching_outs)))
def train_init(): """ This function trains random VALPs. It is used for generating random initial VALPs to which mutation operators can be applied. :return: -- (The structure, hyperparameters, weights, and performance of the VALP are saved in files including the seed (first one) used to generate them) """ np.random.seed(seed) tf.random.set_random_seed(seed) random.seed(seed) name = str(seed) desc = MNMDescriptor(5, inp_dict, outp_dict, name=name) desc = recursive_creator(desc, 0, 0, seed) hypers = {} for hyper in hyps: hypers[hyper] = np.random.choice(hyps[hyper]) model = MNM(desc, hypers["btch_sz"], data_inputs["Train"], data_outputs["Train"], loss_func_weights={ "o0": hypers["wo0"], "o1": hypers["wo1"], "o2": hypers["wo2"] }, name=name, lr=hypers["lr"], opt=hypers["opt"], random_seed=seed) if intelligent_training == 2: loss_weights = model.sequential_training(hypers["btch_sz"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1) else: loss_weights = model.autoset_training(hypers["btch_sz"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale) # ####### Save model characteristics. model.descriptor.save(path="") model.save_weights(path="") results = evaluate_model(model) np.save( "hypers" + str(seed) + "_" + str(intelligent_training) + "_" + str(n_networks) + "_" + ".npy", hypers) np.save( "orig_results" + str(seed) + "_" + str(intelligent_training) + "_" + str(n_networks) + "_" + ".npy", results) np.save( "loss_weights" + str(seed) + "_" + str(intelligent_training) + "_" + str(n_networks) + "_" + ".npy", loss_weights)
net).producing.type: return True if "amples" in desc.comp_by_ind( net).taking.type and "alues" in desc.comp_by_ind( net).producing.type: return True return False if __name__ == "__main__": loss_weights, (data_inputs, inp_dict), (data_outputs, outp_dict), (x_train, c_train, y_train, x_test, c_test, y_test) = diol() d = MNMDescriptor(5, inp_dict, outp_dict, name="1") d = recursive_creator(d, 0, 0, seed=0) d.print_model_graph("huehue1") model = MNM(d, 150, data_inputs["Train"], data_outputs["Train"], loss_weights, init=False) #model.load_weights("1") #model.save_weights("1") a = model.predict({"i0": x_test}, new=True)[0] acc = accuracy_score(a["o1"], np.argmax(c_test, axis=1)) mse = mean_squared_error(a["o0"], y_test) print(acc, mse) test_clone_morphism(d)
def hill_climbing(seed, evals_remaining, local): """ Perform Hill Climbing (HC) :param seed: Random :param evals_remaining: Number of evaluations allowed in total :param local: Number of evaluations allowed in this HC run (before restarting until reaching evals_remaining) :return: -- Save the data related to the HC search """ global pareto global three_objectives chriterion = improve_two_obectives # is_non_dominated reset_no = -1 reset_graph(seed) dom = [False, -1] data = [[ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, datetime.datetime.now().timestamp() ]] # This will contain the data to be saved while evals_remaining > 0: three_objectives = np.array([999, 999, 999]) pareto = [] reset_no += 1 trial = 0 # Create and evaluate first random VALP pivot = MNMDescriptor(10, inp_dict, outp_dict) pivot = recursive_creator(pivot, 0, 0) # pivot.print_model_graph("Pivot") g_2 = tf.Graph() with g_2.as_default(): model = MNM(pivot, btch_sz, data_inputs, data_outputs, loss_weights) model.convergence_train(btch_sz, min_iter, conv_param, max_iter, { "i0": x_tt, "o1": c_tt, "o0": y_tt, "o2": x_tt }, sync=1) model.save_weights(str(evals_remaining)) pivot_fit = evaluate(model) chriterion(pivot_fit) pivot.save("descriptors/Seed" + str(seed) + "_Eval" + str(evals_remaining) + "_local" + str(trial) + "_reset" + str(reset_no) + "_acc" + str(pivot_fit[0]) + "_mse" + str(pivot_fit[1]) + "_sam" + str(pivot_fit[2]) + ".txt") data = data + [[ evals_remaining, trial, reset_no, pivot_fit[0], pivot_fit[1], pivot_fit[2], pivot_fit[3], pivot_fit[4], pivot_fit[5], -1, 1, datetime.datetime.now().timestamp() ]] # Perform local search while trial < local and evals_remaining > 0: new = deepcopy(pivot) op = np.random.randint(len(ops)) # Operation choosen randomly # Perform the change and evaluate again res = ops[op](new, dom[1]) #print(res, ops[op].__name__) # new.print_model_graph("Eval" + str(evals_remaining) + str(ops[op].__name__) + " " + str(res) + "_Last" + str(last_impr)) if res == -1: continue elif op == 0 and os.path.isfile(res + ".npy"): os.remove(res + ".npy") log = str(ops[op]) + " " + str(res) fix_in_out_sizes(new, loaded=True) evals_remaining -= 1 trial += 1 try: with g_2.as_default(): model = MNM(new, btch_sz, data_inputs, data_outputs, loss_weights, init=False) model.load_weights() model.convergence_train(btch_sz, min_iter, conv_param, max_iter, { "i0": x_tt, "o1": c_tt, "o0": y_tt, "o2": x_tt }, sync=1) loss = evaluate(model) dom = chriterion( loss) # Check whether it should be accepted or not data = data + [[ evals_remaining, trial, reset_no, loss[0], loss[1], loss[2], loss[3], loss[4], loss[5], op, int(dom[0]), datetime.datetime.now().timestamp() ]] except Exception as e: #print("huehue", log, e) model.save_weights(str(evals_remaining)) with g_2.as_default(): model.sess.close() # raise e if dom[0]: # In case it should be accepted, model.save_weights(str(evals_remaining)) pivot = new new.save("descriptors/Seed" + str(seed) + "_Eval" + str(evals_remaining) + "_local" + str(trial) + "_reset" + str(reset_no) + "_acc" + str(loss[0]) + "_mse" + str(loss[1]) + "_sam" + str(loss[2]) + ".txt") trial = 0 model.sess.close() np.save("Data" + str(seed) + ".npy", data)
data_outputs["o2"] = x_train load_model() min_iter = 100 max_iter = 5000 conv_param = 1.0 sd = args.integers[0] while sd < args.integers[0] + 2: hill_climbing(sd, 50, 55) sd += 1 three_objectives = np.array([0.9, 0.05, 18]) loss_weights, (data_inputs, inp_dict), (data_outputs, outp_dict), (x_train, c_train, y_train, x_test, c_test, y_test) = diol() reset_graph(1) pvt = MNMDescriptor(10, inp_dict, outp_dict) pvt.load("model.txt") mdl = MNM(pvt, 150, data_inputs, data_outputs, loss_weights, init=False) mdl.load_weights() b = improve_two_obectives(np.array([0.1, 0.04, 7])) pvt.print_model_graph("huehue0") for i in range(1, 500): reset_graph(i) piv = deepcopy(pvt) print(bypass(piv, b[1]))