Example #1
0
               epochs=iters, batch_size=batch_size, verbose=0)
    
    #tf.keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
        
    pred_0, pred_1 = model.predict([test_inputs['i0'], test_inputs['i1']])
        
    res_0 = tf.nn.softmax(pred_0)
    res_1 = tf.nn.softmax(pred_1)

    # Return both accuracies
    return accuracy_error(res_0, test_outputs["o0"]), accuracy_error(res_1, test_outputs["o1"])


if __name__ == "__main__":

    fashion_x_train, fashion_y_train, fashion_x_test, fashion_y_test, fashion_x_val, fashion_y_val = load_fashion()
    mnist_x_train, mnist_y_train, mnist_x_test, mnist_y_test, mnist_x_val, mnist_y_val = load_mnist()

    OHEnc = OneHotEncoder()

    fashion_y_train = OHEnc.fit_transform(np.reshape(fashion_y_train, (-1, 1))).toarray()
    fashion_y_test = OHEnc.fit_transform(np.reshape(fashion_y_test, (-1, 1))).toarray()
    fashion_y_val = OHEnc.fit_transform(np.reshape(fashion_y_val, (-1, 1))).toarray()

    mnist_y_train = OHEnc.fit_transform(np.reshape(mnist_y_train, (-1, 1))).toarray()
    mnist_y_test = OHEnc.fit_transform(np.reshape(mnist_y_test, (-1, 1))).toarray()
    mnist_y_val = OHEnc.fit_transform(np.reshape(mnist_y_val, (-1, 1))).toarray()

    # In this case, we provide two data inputs and outputs
    e = Evolving(evaluation=evaluation, desc_list=[MLPDescriptor, MLPDescriptor],
                 x_trains=[fashion_x_train, mnist_x_train], y_trains=[fashion_y_train, mnist_y_train], 
Example #2
0
Fashion mnist dataset is used, that is why 28x28 is the input size and 10 the output size.
"""
import sys
sys.path.append('..')

import numpy as np

from deatf.auxiliary_functions import load_fashion
from deatf.network import MLPDescriptor
from deatf.evolution import Evolving

from sklearn.preprocessing import OneHotEncoder

if __name__ == "__main__":

    x_train, y_train, x_test, y_test, x_val, y_val = load_fashion()

    OHEnc = OneHotEncoder()

    y_train = OHEnc.fit_transform(np.reshape(y_train, (-1, 1))).toarray()
    y_test = OHEnc.fit_transform(np.reshape(y_test, (-1, 1))).toarray()
    y_val = OHEnc.fit_transform(np.reshape(y_val, (-1, 1))).toarray()

    e = Evolving(evaluation="XEntropy",
                 desc_list=[MLPDescriptor],
                 compl=False,
                 x_trains=[x_train],
                 y_trains=[y_train],
                 x_tests=[x_val],
                 y_tests=[y_val],
                 n_inputs=[[28, 28]],
Example #3
0
    for epoch in range(iters):

        image_batch = batch(train_inputs["i0"], batch_size, aux_ind)
        aux_ind = (aux_ind + batch_size) % train_inputs["i0"].shape[0]
        train_step(image_batch)

    noise = np.random.normal(size=(150, 10))

    generated_images = g_model(noise, training=False)

    return generator_loss(generated_images).numpy(),


if __name__ == "__main__":

    x_train, _, x_test, _, x_val, _ = load_fashion()

    # The GAN evolutive process is a common 2-DNN evolution

    e = Evolving(evaluation=eval_gan,
                 desc_list=[MLPDescriptor, MLPDescriptor],
                 x_trains=[x_train],
                 y_trains=[x_train],
                 x_tests=[x_val],
                 y_tests=[x_val],
                 n_inputs=[[28, 28], [10]],
                 n_outputs=[[1], [784]],
                 population=5,
                 generations=5,
                 batch_size=150,
                 iters=50,