Ejemplo n.º 1
0
def eval_cnn_ae(preds, placeholders, sess, graph, inputs, _, __):
    with graph.as_default():
        res = sess.run(preds["n1"], feed_dict={placeholders["i0"]: inputs["i0"]})
        sess.close()
        if np.isnan(res).any():
            return 288,
        else:
            return mean_squared_error(np.reshape(res, (-1)), np.reshape(inputs["i0"], (-1))),


if __name__ == "__main__":

    x_train, y_train, x_test, y_test = load_fashion()

    x_train = np.expand_dims(x_train, axis=3)/255
    x_train = np.concatenate((x_train, x_train, x_train), axis=3)

    x_test = np.expand_dims(x_test, axis=3)/255
    x_test = np.concatenate((x_test, x_test, x_test), axis=3)

    OHEnc = OneHotEncoder(categories='auto')

    y_train = OHEnc.fit_transform(np.reshape(y_train, (-1, 1))).toarray()

    y_test = OHEnc.fit_transform(np.reshape(y_test, (-1, 1))).toarray()
    # Here we define a convolutional-transposed convolutional network combination
    e = Evolving(loss=train_cnn_ae, desc_list=[ConvDescriptor, TConvDescriptor], x_trains=[x_train], y_trains=[y_train], x_tests=[x_test], y_tests=[y_test], evaluation=eval_cnn_ae, batch_size=150, population=2, generations=10, n_inputs=[[28, 28, 3], [7, 7, 1]], n_outputs=[[49], [28, 28, 3]], cxp=0, mtp=1, hyperparameters = {"lrate": [0.1, 0.5, 1], "optimizer": [0, 1, 2]}, no_batch_norm=False, no_dropout=False)
    a = e.evolve()

    print(a[-1])
Ejemplo n.º 2
0
    generations = args.integers[4]
    epochs = args.integers[5]
    z_size = args.integers[6]
    x_train = create_data(n_gauss, n_samples)
    x_train = x_train - np.min(x_train, axis=0)
    x_train = x_train / np.max(x_train, axis=0)

    x_test = create_data(n_gauss, n_samples)
    x_test = x_test - np.min(x_test, axis=0)
    x_test = x_test / np.max(x_test, axis=0)
    # The GAN evolutive process is a common 2-DNN evolution
    e = Evolving(loss=gan_train,
                 desc_list=[MLPDescriptor, MLPDescriptor],
                 x_trains=[x_train],
                 y_trains=[x_train],
                 x_tests=[x_test],
                 y_tests=[x_test],
                 evaluation=gan_eval,
                 batch_size=50,
                 population=population,
                 generations=generations,
                 n_inputs=[[2], [z_size]],
                 n_outputs=[[1], [2]],
                 cxp=0.5,
                 mtp=0.5,
                 no_dropout=True,
                 no_batch_norm=True)
    res = e.evolve()

    print(res[0])
Ejemplo n.º 3
0
from metrics import ret_evals

if __name__ == "__main__":

    x_train, y_train, x_test, y_test = load_fashion()

    OHEnc = OneHotEncoder(categories='auto')

    y_train = OHEnc.fit_transform(np.reshape(y_train, (-1, 1))).toarray()

    y_test = OHEnc.fit_transform(np.reshape(y_test, (-1, 1))).toarray()

    e = Evolving(loss="XEntropy",
                 desc_list=[MLPDescriptor],
                 x_trains=[x_train],
                 y_trains=[y_train],
                 x_tests=[x_test],
                 y_tests=[y_test],
                 evaluation="Accuracy_error",
                 n_inputs=[[28, 28]],
                 n_outputs=[[10]],
                 batch_size=150,
                 population=500,
                 generations=100,
                 iters=2000,
                 n_layers=10,
                 max_layer_size=100)
    a = e.evolve()
    np.save("simple_res.npy", np.array(ret_evals()))
    print(a)
Ejemplo n.º 4
0
    :param outputs: Data outputs for the metric
    :param _: hyperparameters, because we are evolving the optimizer selection and learning rate, they are unused when testing
    :return: fitness of the model (as a tuple)
    """
    with graph.as_default():
        res = sess.run(tf.nn.softmax(preds["n1"]), feed_dict={placeholders["i0"]: inputs["i0"]})
        sess.close()

        return accuracy_error(res, outputs["o0"]),


if __name__ == "__main__":

    x_train, y_train, x_test, y_test = load_fashion()

    OHEnc = OneHotEncoder(categories='auto')

    y_train = OHEnc.fit_transform(np.reshape(y_train, (-1, 1))).toarray()

    y_test = OHEnc.fit_transform(np.reshape(y_test, (-1, 1))).toarray()
    # When calling the function, we indicate the training function, what we want to evolve (two MLPs), input and output data for training and
    # testing, fitness function, batch size, population size, number of generations, input and output dimensions of the networks, crossover and
    # mutation probability, the hyperparameters being evolved (name and possibilities), and whether batch normalization and dropout should be
    # present in evolution
    e = Evolving(loss=train_sequential, desc_list=[MLPDescriptor, MLPDescriptor], x_trains=[x_train], y_trains=[y_train], x_tests=[x_test],
                 y_tests=[y_test], evaluation=eval_sequential, batch_size=150, population=10, generations=10, n_inputs=[[28, 28], [10]],
                 n_outputs=[[10], [10]], cxp=0.5, mtp=0.5, hyperparameters={"lrate": [0.1, 0.5, 1], "optimizer": [0, 1, 2]},
                 no_batch_norm=True, no_dropout=True)
    a = e.evolve()
    print(a[-1])
Ejemplo n.º 5
0
    y_train = OHEnc.fit_transform(np.reshape(y_train, (-1, 1))).toarray()

    y_test = OHEnc.fit_transform(np.reshape(y_test, (-1, 1))).toarray()

    e = Evolving(loss=train_wann,
                 desc_list=[MLPDescriptor],
                 x_trains=[x_train],
                 y_trains=[y_train],
                 x_tests=[x_test],
                 y_tests=[y_test],
                 evaluation=eval_wann,
                 batch_size=150,
                 population=500,
                 generations=10000,
                 n_inputs=[[28, 28]],
                 n_outputs=[[2]],
                 cxp=0,
                 mtp=1,
                 no_batch_norm=True,
                 no_dropout=True,
                 hyperparameters={
                     "weight1": np.arange(-2, 2, 0.5),
                     "weight2": np.arange(-2, 2, 0.5),
                     "start": ["0", "1"],
                     "p1": ["01", "10"],
                     "p2": ["001", "010", "011", "101", "110", "100"]
                 })  # Los pesos, que también evolucionan
    a = e.evolve()
    np.save("simple_res_rand.npy", np.array(ret_evals()))
    print(a[-1])
Ejemplo n.º 6
0
    fashion_y_train = OHEnc.fit_transform(np.reshape(fashion_y_train,
                                                     (-1, 1))).toarray()

    fashion_y_test = OHEnc.fit_transform(np.reshape(fashion_y_test,
                                                    (-1, 1))).toarray()

    mnist_y_train = OHEnc.fit_transform(np.reshape(mnist_y_train,
                                                   (-1, 1))).toarray()

    mnist_y_test = OHEnc.fit_transform(np.reshape(mnist_y_test,
                                                  (-1, 1))).toarray()

    # In this case, we provide two data inputs and outputs
    e = Evolving(loss=train,
                 desc_list=[MLPDescriptor, MLPDescriptor],
                 x_trains=[fashion_x_train, mnist_x_train],
                 y_trains=[fashion_y_train, mnist_y_train],
                 x_tests=[fashion_x_test, mnist_x_test],
                 y_tests=[fashion_y_test, mnist_y_test],
                 evaluation=eval,
                 batch_size=150,
                 population=10,
                 generations=10,
                 n_inputs=[[28, 28], [28, 28]],
                 n_outputs=[[10], [10]],
                 sel=2)
    res = e.evolve()

    print(res[0])
Ejemplo n.º 7
0
Archivo: wann.py Proyecto: r3v1/EvoFlow
    :param inputs: Data inputs for the model
    :param outputs: Data outputs for the metric
    :param _: hyperparameters, because we are evolving the optimizer selection and learning rate, they are unused when testing
    :return: fitness of the model (as a tuple)
    """
    with graph.as_default():

        sess.run(tf.global_variables_initializer())
        res = sess.run(tf.nn.softmax(preds["n0"]), feed_dict={placeholders["i0"]: inputs["i0"]})
        sess.close()

        return accuracy_error(res, outputs["o0"]),


if __name__ == "__main__":

    x_train, y_train, x_test, y_test = load_fashion()

    OHEnc = OneHotEncoder(categories='auto')

    y_train = OHEnc.fit_transform(np.reshape(y_train, (-1, 1))).toarray()

    y_test = OHEnc.fit_transform(np.reshape(y_test, (-1, 1))).toarray()

    e = Evolving(loss=train_wann, desc_list=[MLPDescriptor], x_trains=[x_train], y_trains=[y_train], x_tests=[x_test], y_tests=[y_test],
                 evaluation=eval_wann, batch_size=150, population=500, generations=10000, n_inputs=[[28, 28]], n_outputs=[[10]], cxp=0, mtp=1,
                 no_batch_norm=True, no_dropout=True)
    a = e.evolve()

    print(a[-1])
Ejemplo n.º 8
0
        aux_preds[w] *
        np.log(aux_preds[w] / predictions[w]) if aux_preds[w] > 0 else 0
        for w in range(predictions.shape[0])
    ]),


if __name__ == "__main__":

    mobile_graph, model = load_model(
    )  # The model and its graph are used as global variables

    x_train, _, x_test, _ = load_fashion()
    # The GAN evolutive process is a common 2-DNN evolution
    e = Evolving(loss=gan_train,
                 desc_list=[MLPDescriptor, MLPDescriptor],
                 x_trains=[x_train],
                 y_trains=[x_train],
                 x_tests=[x_test],
                 y_tests=[x_test],
                 evaluation=gan_eval,
                 batch_size=150,
                 population=10,
                 generations=10,
                 n_inputs=[[28, 28], [10]],
                 n_outputs=[[1], [784]],
                 cxp=0.5,
                 mtp=0.5)
    res = e.evolve()

    print(res[0])