示例#1
0
    def __init__(self, descriptor, batch_size, inputs, outputs, loss_func_weights, name="", load=None, init=True, lr=0.001, opt=0, random_seed=None):

        if random_seed is not None:
            np.random.seed(random_seed)
            tf.random.set_random_seed(random_seed)
            random.seed(random_seed)

        self.name = name

        self.descriptor = descriptor
        if load is not None:
            if isinstance(load, str):
                self.descriptor.load(load + "model_" + name + ".txt")
            else:
                self.descriptor.load("model_" + name + ".txt")
        elif not descriptor.constructed:
            self.descriptor = recursive_creator(descriptor, 0, 0)  # Create a new descriptor if it is empty and not loaded

        self.inputs = {}  # ID: placeholder
        self.outputs = {}  # ID: placeholder
        self.components = {}  # ID: Component
        self.predictions = {}  # ID: Data (where the result is placed, what has to be sess.run-ed)
        self.input_data = {}  # ID: Data (numpy data, from which learning is done)
        self.output_data = {}  # ID: DAta (numpy data, from which learning is done)
        self.lr = lr
        self.opt = opt
        self.loss_func_weights = loss_func_weights
        self.subopts = {}

        self.initialized = []  # List of initialized components (to know what to build, when recursively creating tf DNNs)
        self.sess = tf.Session()  # config=tf.ConfigProto(device_count={'GPU': 0})
        self.optimizer = None  # To be sess.run-ed to train all the objectives of the VALP
        self.optimizer_samp = None  # To be sess.run-ed to train only the sampling output (VAE does multiple training for each data piece)
        self.loss_function = 0  # General loss function
        self.loss_function_sample = 0  # loss function containing only the sampling and KL losses
        self.batch_size = batch_size
        self.example_num = inputs[random.choice(list(inputs.keys()))].shape[0]
        self.loss_weights = {}  # Beta parameter. Implemented as tf variables, in case we want to dinamically modify it
        self.b_assign = {}  # Operations to actualize the value of the Beta parameters
        self.b_ph = {}  # Placeholders where the data will be placed

        self.sub_losses = {}  # All the loss functions separated (mainly for debugging)

        for model_input in descriptor.inputs:
            self.add_input(inputs[model_input], model_input)

        for outp in descriptor.outputs:
            self.add_output(outputs[outp], outp)

        if init:  # If the tf variables have to be initialized (most of the times)
            if load is not None:  # If the weights have to be loaded
                if "str" in type(load).__name__:  # specific path
                    self.load(load)
                elif load:  # or default
                    self.load("/")
            else:
                self.initialize(load)  # Random initialization
示例#2
0
    def init_individual(self, init_ind, no_batch, no_drop):
        """
        Creation of a single individual
        :param init_ind: DEAP function for transforming a VALP descriptor + evolvable hyperparameters into a DEAP individual
        :param no_batch: Boolean, whether networks can apply batch normalization or not
        :param no_drop: Boolean, whether networks can apply dropout or not
        :return: a DEAP individual
        """

        desc = MNMDescriptor(10, inp_dict, outp_dict)
        desc = recursive_creator(desc, 0, 0)
        hypers = {}
        if len(self.ev_hypers) > 0:
            for hyper in self.ev_hypers:
                hypers[hyper] = np.random.choice(self.ev_hypers[hyper])

        return init_ind([desc, hypers])
def train_init():
    """
    This function trains random VALPs. It is used for generating random initial VALPs to which mutation operators can be applied.
    :return: -- (The structure, hyperparameters, weights, and performance of the VALP are saved in files including the seed (first one) used to generate them)
    """
    np.random.seed(seed)
    tf.random.set_random_seed(seed)
    random.seed(seed)

    name = str(seed)
    desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)
    desc = recursive_creator(desc, 0, 0, seed)
    hypers = {}
    for hyper in hyps:
        hypers[hyper] = np.random.choice(hyps[hyper])

    model = MNM(desc,
                hypers["btch_sz"],
                data_inputs["Train"],
                data_outputs["Train"],
                loss_func_weights={
                    "o0": hypers["wo0"],
                    "o1": hypers["wo1"],
                    "o2": hypers["wo2"]
                },
                name=name,
                lr=hypers["lr"],
                opt=hypers["opt"],
                random_seed=seed)
    if intelligent_training == 2:
        loss_weights = model.sequential_training(hypers["btch_sz"],
                                                 iter_lim // 50,
                                                 conv_param,
                                                 proportion,
                                                 iter_lim,
                                                 display_step=-1)
    else:
        loss_weights = model.autoset_training(hypers["btch_sz"],
                                              iter_lim // 50,
                                              conv_param,
                                              proportion,
                                              iter_lim,
                                              display_step=-1,
                                              incr=incr,
                                              decr=decr,
                                              scaling=scale)

    # ####### Save model characteristics.

    model.descriptor.save(path="")
    model.save_weights(path="")

    results = evaluate_model(model)

    np.save(
        "hypers" + str(seed) + "_" + str(intelligent_training) + "_" +
        str(n_networks) + "_" + ".npy", hypers)

    np.save(
        "orig_results" + str(seed) + "_" + str(intelligent_training) + "_" +
        str(n_networks) + "_" + ".npy", results)

    np.save(
        "loss_weights" + str(seed) + "_" + str(intelligent_training) + "_" +
        str(n_networks) + "_" + ".npy", loss_weights)
示例#4
0
    btch_sz = 50
    loss_weights = {"o0": 1, "o1": 1, "o2": 1}

    accs = []
    mses = []
    images = []
    conds = []
    total = 0

    for seed in range(0, 500):

        reset_graph(seed)

        model_descriptor = MNMDescriptor(max_comp=10, model_inputs=inp_dict, model_outputs=outp_dict)

        model_descriptor = recursive_creator(model_descriptor, 0, conv_prob=0)
        model_descriptor.print_model_graph()
        model = MNM(model_descriptor, btch_sz, data_inputs, data_outputs, loss_weights)

        start = time.time()
        print("Seed:", str(seed), "Started at", time.asctime(time.localtime(start)))

        loss = model.epoch_train(batch_size=btch_sz, epochs=40000, sync=0)
        # model.save()
        # a, = model.predict({"i0": x_test}, [], new=False)
        a, = model.predict({"i0": x_test}, [], new=True)

        last_run = time.time() - start
        total += last_run

        print("Ended at", time.asctime(time.localtime(time.time())), "Total time spent", timedelta(seconds=last_run))
示例#5
0
                net).producing.type:
        return True
    if "amples" in desc.comp_by_ind(
            net).taking.type and "alues" in desc.comp_by_ind(
                net).producing.type:
        return True
    return False


if __name__ == "__main__":
    loss_weights, (data_inputs,
                   inp_dict), (data_outputs,
                               outp_dict), (x_train, c_train, y_train, x_test,
                                            c_test, y_test) = diol()
    d = MNMDescriptor(5, inp_dict, outp_dict, name="1")
    d = recursive_creator(d, 0, 0, seed=0)
    d.print_model_graph("huehue1")
    model = MNM(d,
                150,
                data_inputs["Train"],
                data_outputs["Train"],
                loss_weights,
                init=False)
    #model.load_weights("1")
    #model.save_weights("1")
    a = model.predict({"i0": x_test}, new=True)[0]
    acc = accuracy_score(a["o1"], np.argmax(c_test, axis=1))
    mse = mean_squared_error(a["o0"], y_test)
    print(acc, mse)
    test_clone_morphism(d)
示例#6
0
    parser.add_argument('integers',
                        metavar='int',
                        type=int,
                        choices=range(3000),
                        nargs='+',
                        help='an integer in the range 0..3000')

    args = parser.parse_args()
    batch_size = 50

    loss_weights, (data_inputs,
                   inp_dict), (data_outputs,
                               outp_dict), (x_train, c_train, y_train, x_test,
                                            c_test, y_test) = diol()
    md = MNMDescriptor(30, inp_dict, outp_dict)
    descriptor = recursive_creator(md, 0)
    descriptor.save("basic.txt")
    descriptor.load("basic.txt")
    descriptor.print_model_graph("basic")

    #descriptor = overkill_conn(descriptor)
    #descriptor.save("overkill.txt")
    #descriptor.print_model_graph("overkill")
    #descriptor.load("overkill.txt")

    o2_agent = []
    o1_agent = []
    o0_agent = []
    get_agent(descriptor, "o2", o2_agent)
    get_agent(descriptor, "o1", o1_agent)
    get_agent(descriptor, "o0", o0_agent)
示例#7
0
def hill_climbing(seed, evals_remaining, local):
    """
    Perform Hill Climbing (HC)
    :param seed: Random
    :param evals_remaining: Number of evaluations allowed in total
    :param local: Number of evaluations allowed in this HC run (before restarting until reaching evals_remaining)
    :return: -- Save the data related to the HC search
    """
    global pareto
    global three_objectives
    chriterion = improve_two_obectives  # is_non_dominated

    reset_no = -1
    reset_graph(seed)
    dom = [False, -1]

    data = [[
        -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
        datetime.datetime.now().timestamp()
    ]]  # This will contain the data to be saved
    while evals_remaining > 0:
        three_objectives = np.array([999, 999, 999])
        pareto = []
        reset_no += 1
        trial = 0
        # Create and evaluate first random VALP
        pivot = MNMDescriptor(10, inp_dict, outp_dict)
        pivot = recursive_creator(pivot, 0, 0)
        # pivot.print_model_graph("Pivot")
        g_2 = tf.Graph()
        with g_2.as_default():
            model = MNM(pivot, btch_sz, data_inputs, data_outputs,
                        loss_weights)
            model.convergence_train(btch_sz,
                                    min_iter,
                                    conv_param,
                                    max_iter, {
                                        "i0": x_tt,
                                        "o1": c_tt,
                                        "o0": y_tt,
                                        "o2": x_tt
                                    },
                                    sync=1)
            model.save_weights(str(evals_remaining))

        pivot_fit = evaluate(model)
        chriterion(pivot_fit)

        pivot.save("descriptors/Seed" + str(seed) + "_Eval" +
                   str(evals_remaining) + "_local" + str(trial) + "_reset" +
                   str(reset_no) + "_acc" + str(pivot_fit[0]) + "_mse" +
                   str(pivot_fit[1]) + "_sam" + str(pivot_fit[2]) + ".txt")

        data = data + [[
            evals_remaining, trial, reset_no, pivot_fit[0], pivot_fit[1],
            pivot_fit[2], pivot_fit[3], pivot_fit[4], pivot_fit[5], -1, 1,
            datetime.datetime.now().timestamp()
        ]]

        # Perform local search
        while trial < local and evals_remaining > 0:

            new = deepcopy(pivot)
            op = np.random.randint(len(ops))  # Operation choosen randomly

            # Perform the change and evaluate again
            res = ops[op](new, dom[1])
            #print(res, ops[op].__name__)
            # new.print_model_graph("Eval" + str(evals_remaining) + str(ops[op].__name__) + " " + str(res) + "_Last" + str(last_impr))
            if res == -1:
                continue
            elif op == 0 and os.path.isfile(res + ".npy"):
                os.remove(res + ".npy")
            log = str(ops[op]) + " " + str(res)
            fix_in_out_sizes(new, loaded=True)
            evals_remaining -= 1

            trial += 1
            try:
                with g_2.as_default():
                    model = MNM(new,
                                btch_sz,
                                data_inputs,
                                data_outputs,
                                loss_weights,
                                init=False)
                    model.load_weights()
                    model.convergence_train(btch_sz,
                                            min_iter,
                                            conv_param,
                                            max_iter, {
                                                "i0": x_tt,
                                                "o1": c_tt,
                                                "o0": y_tt,
                                                "o2": x_tt
                                            },
                                            sync=1)
                    loss = evaluate(model)
                    dom = chriterion(
                        loss)  # Check whether it should be accepted or not
                    data = data + [[
                        evals_remaining, trial, reset_no, loss[0], loss[1],
                        loss[2], loss[3], loss[4], loss[5], op,
                        int(dom[0]),
                        datetime.datetime.now().timestamp()
                    ]]
            except Exception as e:
                #print("huehue", log, e)
                model.save_weights(str(evals_remaining))
                with g_2.as_default():
                    model.sess.close()
                # raise e

            if dom[0]:  # In case it should be accepted,
                model.save_weights(str(evals_remaining))
                pivot = new
                new.save("descriptors/Seed" + str(seed) + "_Eval" +
                         str(evals_remaining) + "_local" + str(trial) +
                         "_reset" + str(reset_no) + "_acc" + str(loss[0]) +
                         "_mse" + str(loss[1]) + "_sam" + str(loss[2]) +
                         ".txt")
                trial = 0

            model.sess.close()

    np.save("Data" + str(seed) + ".npy", data)