예제 #1
0
    y_val = OHEnc.fit_transform(np.reshape(y_val, (-1, 1))).toarray()

    e = Evolving(evaluation="XEntropy",
                 desc_list=[MLPDescriptor],
                 compl=False,
                 x_trains=[x_train],
                 y_trains=[y_train],
                 x_tests=[x_val],
                 y_tests=[y_val],
                 n_inputs=[[28, 28]],
                 n_outputs=[[10]],
                 population=5,
                 generations=5,
                 batch_size=200,
                 iters=50,
                 lrate=0.1,
                 cxp=0,
                 mtp=1,
                 seed=0,
                 max_num_layers=10,
                 max_num_neurons=100,
                 max_filter=4,
                 max_stride=3,
                 evol_alg='mu_plus_lambda',
                 sel='tournament',
                 sel_kwargs={'tournsize': 3},
                 evol_kwargs={},
                 batch_norm=False,
                 dropout=False)

    a = e.evolve()
예제 #2
0
    e = Evolving(evaluation=eval_sequential,
                 desc_list=[MLPDescriptor, MLPDescriptor],
                 x_trains=[x_train],
                 y_trains=[y_train],
                 x_tests=[x_val],
                 y_tests=[y_val],
                 n_inputs=[[28, 28], [10]],
                 n_outputs=[[10], [10]],
                 population=5,
                 generations=5,
                 batch_size=150,
                 iters=50,
                 lrate=0.1,
                 cxp=0.5,
                 mtp=0.5,
                 seed=0,
                 max_num_layers=10,
                 max_num_neurons=100,
                 max_filter=4,
                 max_stride=3,
                 evol_alg='mu_plus_lambda',
                 sel='best',
                 sel_kwargs={},
                 hyperparameters={
                     "lrate": [0.1, 0.5, 1],
                     "optimizer": [0, 1, 2]
                 },
                 batch_norm=False,
                 dropout=False)
예제 #3
0
    ev = evaluate(probabilities)

    index = [int(''.join(re.findall(r'\d', model.name))) if model.name != 'model' else 0, 0]    

    interactive_loss(probabilities, wanted_blocks, bounds, index, False)
    #f.write('Final ev: {}'.format(ev))

    return ev,


if __name__ == "__main__":
    
    bounds = [9,9,9]
    wanted_blocks = [-1,164, 169, 173]
    
    train_noise = np.random.normal(size=(5000, np.array(bounds).prod()))
    test_noise = np.random.normal(size=(2000,  np.array(bounds).prod()))
    val_noise = np.random.normal(size=(2000,  np.array(bounds).prod()))
    
    e = Evolving(desc_list=[MLPDescriptor], x_trains=[train_noise], y_trains=[train_noise], 
                 x_tests=[val_noise], y_tests=[val_noise], evaluation=eval_model, 
                 batch_size=200, population=5, generations=5, iters=500, 
                 n_inputs=[[train_noise.shape[1:]]], n_outputs=[bounds+[len(wanted_blocks)]], 
                 evol_alg='mu_comm_lambda', sel='best', 
                 cxp=0., mtp=1., hyperparameters={"lrate": [0.1, 0.5, 1], "optimizer": [0, 1, 2]}, 
                 batch_norm=True, dropout=True)
    
    a = e.evolve()

    print(a[-1])
예제 #4
0
def test(dataset_name, descriptors=[], eval_func=None, batch_size=150, population=5, 
         generations=10, iters=100, seed=None, max_filter=4, max_stride=3,
         lrate=0.01, cxp=0, mtp=1, evol_alg='mu_plus_lambda', sel='best', sel_kwargs={},
         max_num_layers=10, max_num_neurons=20, hyperparameters={}, 
         is_time_series=False, series_input_width=30, series_label_width=1):
    """
    Responsible of load the desired dataset, prepare it to fit it into
    the models and it to them. Also it call the evolutionary process in
    order to evolve those generated models.
    
    :param dataset_name: String with the name of one of the available datasets.
    :param descriptors: List with the descriptors that will be used in the test.
    :param eval_func: Evaluation function that will be used to se the performance of the models.
    :param batch_size: Batch size that will be taken form the data during training.
    :param population: Number of individuals that will be evaluated in each generation 
                       of the evolution algorithm.
    :param generations: Number of generations that the evolution algorithm will be executed.
    :param iters: Number of iterations that each model will be trained.
    :param seed: Seed of the random processes.
    :param lrate: Learning rate.
    :param cxp: Crossover probability.
    :param mtp: Mutation probability.
    :param evol_alg: Evolutionary algorithm that will be used (strig or a function).
    :param sel: Selection method that will be used (strig or a function).
    :param max_num_layers: Maximum number of layer allowed in the initialization of the networks.
    :param max_num_neurons: Maximum number of neurons in each layer allowed in the 
                            initialization of the networks. 
    :param hyperparameters: Dictionary with the hyperparameters to be evolved.
    :param is_time_series: Boolean that indicates if the processed data is a time series or not.
    :param series_input_width: If is a series of data, the width that the input data will have. 
    :param series_label_width: If is a series of data, the width that the labels will have.
    :return: The last generation, a log book (stats) and the hall of fame (the best 
                 individuals found).
    """
    
    x_train, x_test, x_val, y_train, y_test, y_val, mode = load_dataset(dataset_name, 
            is_time_series=is_time_series, series_input_width=series_input_width, series_label_width=series_label_width)
    
    if not isinstance(y_train[0], float):
        OHEnc = OneHotEncoder()
    
        y_train = OHEnc.fit_transform(np.reshape(y_train, (-1, 1))).toarray()
        y_test = OHEnc.fit_transform(np.reshape(y_test, (-1, 1))).toarray()
        y_val = OHEnc.fit_transform(np.reshape(y_val, (-1, 1))).toarray()
        
    else:
        y_train = np.reshape(y_train, (-1, 1))
        y_test = np.reshape(y_test, (-1, 1))
        y_val = np.reshape(y_val, (-1, 1))

    input_shape = x_train.shape[1:]
    output_shape = y_val.shape[1:]
    if len(output_shape) == 1:
        output_shape =  [int(math.sqrt(output_shape[0])) + 1, int(math.sqrt(output_shape[0])) + 1, 1]
    
    if eval_func == None:
        eval_func = select_evaluation(mode)
    
    e = Evolving(evaluation=eval_func, 
			 desc_list=descriptors, 
			 x_trains=[x_train], y_trains=[y_train], 
			 x_tests=[x_val], y_tests=[y_val],
			 n_inputs=[input_shape],
			 n_outputs=[output_shape],
			 batch_size=batch_size,
			 population=population,
			 generations=generations,
			 iters=iters, 
             seed=seed,
             lrate=lrate,
             cxp=cxp,
             mtp=mtp,
             evol_alg=evol_alg,
             sel=sel,
             sel_kwargs=sel_kwargs,
			 max_num_layers=max_num_layers, 
			 max_num_neurons=max_num_neurons,
             max_filter=max_filter,
             max_stride=max_stride,
             hyperparameters=hyperparameters)   
     
    a = e.evolve()
    return a
예제 #5
0
def test_TCNN(dataset_name,
              eval_func=None,
              batch_size=150,
              population=5,
              generations=10,
              iters=100,
              max_num_layers=10,
              max_num_neurons=20,
              evol_alg='mu_plus_lambda',
              sel='best',
              lrate=0.01,
              cxp=0,
              mtp=1,
              seed=None,
              sel_kwargs={},
              max_filter=4,
              max_stride=3):
    """
    Tests the TCNN network with the specified dataset and parameter selection.

    :param dataset_name: Name of the dataset that will be used in the genetic algorithm.
    :param eval_func: Evaluation function for evaluating each network.
    :param batch_size: Batch size of the data during the training of the networks.
    :param population: Number of individuals in the populations in the genetic algorithm.
    :param generations: Number of generations that will be done in the genetic algorithm.
    :param iters: Number of iterations that each network will be trained.
    :param max_num_layers: Maximum number of layers allowed in the networks.
    :param max_num_neurons: Maximum number of neurons allowed in the networks.
    :param max_filter: Maximum size of the filter allowed in the networks.
    :param max_stride: Maximum size of the stride allowed in the networks.
    :param evol_alg: Evolving algorithm that will be used during the genetic algorithm.
    :param sel: Selection method that will be used during the genetic algorithm.
    :param sel_kwargs: Arguments for selection method.
    :param lrate: Learning rate that will be used during training.
    :param cxp: Crossover probability that will be used during the genetic algorithm.
    :param mtp: Mutation probability that will be used during the genetic algorithm.
    :param seed: Seed that will be used in every random method.
    :return: The last generation, a log book (stats) and the hall of fame (the best 
                 individuals found).
    """

    x_train, x_test, x_val, _, _, _, mode = load_dataset(dataset_name)

    x_train = x_train[:5000] / 255
    x_test = x_test[:2500] / 255
    x_val = x_val[:2500] / 255

    train_noise = np.random.normal(size=(x_train.shape[0], 7, 7, 1))
    test_noise = np.random.normal(size=(x_test.shape[0], 7, 7, 1))
    val_noise = np.random.normal(size=(x_val.shape[0], 7, 7, 1))

    input_shape = train_noise.shape[1:]
    output_shape = x_train.shape[1:]

    if eval_func == None:
        eval_func = select_evaluation(mode)

    e = Evolving(evaluation=eval_func,
                 desc_list=[TCNNDescriptor],
                 x_trains=[train_noise],
                 y_trains=[x_train],
                 x_tests=[val_noise],
                 y_tests=[x_val],
                 n_inputs=[input_shape],
                 n_outputs=[output_shape],
                 batch_size=batch_size,
                 population=population,
                 generations=generations,
                 iters=iters,
                 max_num_layers=max_num_layers,
                 max_num_neurons=max_num_neurons,
                 hyperparameters={
                     "lrate": [0.1, 0.5, 1],
                     "optimizer": [0, 1, 2]
                 })

    a = e.evolve()
    return a
예제 #6
0
if __name__ == "__main__":

    fashion_x_train, fashion_y_train, fashion_x_test, fashion_y_test, fashion_x_val, fashion_y_val = load_fashion()
    mnist_x_train, mnist_y_train, mnist_x_test, mnist_y_test, mnist_x_val, mnist_y_val = load_mnist()

    OHEnc = OneHotEncoder()

    fashion_y_train = OHEnc.fit_transform(np.reshape(fashion_y_train, (-1, 1))).toarray()
    fashion_y_test = OHEnc.fit_transform(np.reshape(fashion_y_test, (-1, 1))).toarray()
    fashion_y_val = OHEnc.fit_transform(np.reshape(fashion_y_val, (-1, 1))).toarray()

    mnist_y_train = OHEnc.fit_transform(np.reshape(mnist_y_train, (-1, 1))).toarray()
    mnist_y_test = OHEnc.fit_transform(np.reshape(mnist_y_test, (-1, 1))).toarray()
    mnist_y_val = OHEnc.fit_transform(np.reshape(mnist_y_val, (-1, 1))).toarray()

    # In this case, we provide two data inputs and outputs
    e = Evolving(evaluation=evaluation, desc_list=[MLPDescriptor, MLPDescriptor],
                 x_trains=[fashion_x_train, mnist_x_train], y_trains=[fashion_y_train, mnist_y_train], 
                 x_tests=[fashion_x_val, mnist_x_val], y_tests=[fashion_y_val, mnist_y_val], 
                 n_inputs=[[28, 28], [28, 28]], n_outputs=[[10], [10]],
                 population=5, generations=5, batch_size=150, iters=50, 
                 lrate=0.1, cxp=0, mtp=1, seed=0,
                 max_num_layers=10, max_num_neurons=100, max_filter=4, max_stride=3,
                 evol_alg='mu_plus_lambda', sel='best', sel_kwargs={}, 
                 hyperparameters={}, 
                 batch_norm=True, dropout=True)

    res = e.evolve()

    print(res[0])
예제 #7
0
    x_test = x_test / 1
    x_val = x_val / 1

    e = Evolving(evaluation=ae_eval,
                 desc_list=[MLPDescriptor],
                 x_trains=[x_train],
                 y_trains=[x_train],
                 x_tests=[x_val],
                 y_tests=[x_val],
                 n_inputs=[[784]],
                 n_outputs=[[784]],
                 population=5,
                 generations=5,
                 batch_size=150,
                 iters=50,
                 lrate=0.1,
                 cxp=0,
                 mtp=1,
                 seed=0,
                 max_num_layers=10,
                 max_num_neurons=100,
                 max_filter=4,
                 max_stride=3,
                 evol_alg='mu_plus_lambda',
                 sel='best',
                 sel_kwargs={},
                 hyperparameters={"lrate": [0.1, 0.5, 1]},
                 batch_norm=True,
                 dropout=True)

    a = e.evolve()
예제 #8
0
 e = Evolving(evaluation=eval_wann,
              desc_list=[MLPDescriptor],
              x_trains=[x_train],
              y_trains=[y_train],
              x_tests=[x_val],
              y_tests=[y_val],
              n_inputs=[[28, 28]],
              n_outputs=[[2]],
              population=5,
              generations=5,
              batch_size=150,
              iters=50,
              lrate=0.1,
              cxp=0,
              mtp=1,
              seed=0,
              max_num_layers=10,
              max_num_neurons=100,
              max_filter=4,
              max_stride=3,
              evol_alg='mu_plus_lambda',
              sel='best',
              sel_kwargs={},
              hyperparameters={
                  "weight1": np.arange(-2, 2, 0.5),
                  "weight2": np.arange(-2, 2, 0.5),
                  "start": ["0", "1"],
                  "p1": ["01", "10"],
                  "p2": ["001", "010", "011", "101", "110", "100"]
              },
              batch_norm=False,
              dropout=False)  # The weights, that are also evolved
예제 #9
0
def test_RNN(dataset_name,
             eval_func=None,
             batch_size=150,
             population=5,
             generations=10,
             iters=100,
             max_num_layers=10,
             max_num_neurons=20,
             evol_alg='mu_plus_lambda',
             sel='best',
             lrate=0.01,
             cxp=0,
             mtp=1,
             seed=None,
             sel_kwargs={},
             max_filter=4,
             max_stride=3,
             hyperparameters={},
             is_time_series=True,
             series_input_width=30,
             series_label_width=1):
    """
    Tests the RNN network with the specified dataset and parameter selection.

    :param dataset_name: Name of the dataset that will be used in the genetic algorithm.
    :param eval_func: Evaluation function for evaluating each network.
    :param batch_size: Batch size of the data during the training of the networks.
    :param population: Number of individuals in the populations in the genetic algorithm.
    :param generations: Number of generations that will be done in the genetic algorithm.
    :param iters: Number of iterations that each network will be trained.
    :param max_num_layers: Maximum number of layers allowed in the networks.
    :param max_num_neurons: Maximum number of neurons allowed in the networks.
    :param max_filter: Maximum size of the filter allowed in the networks.
    :param max_stride: Maximum size of the stride allowed in the networks.
    :param evol_alg: Evolving algorithm that will be used during the genetic algorithm.
    :param sel: Selection method that will be used during the genetic algorithm.
    :param sel_kwargs: Arguments for selection method.
    :param lrate: Learning rate that will be used during training.
    :param cxp: Crossover probability that will be used during the genetic algorithm.
    :param mtp: Mutation probability that will be used during the genetic algorithm.
    :param seed: Seed that will be used in every random method.
    :param hyperparameters: Hyperparameters that will be evolved during the genetic algorithm.
    :param is_time_series: Boolean that indicates if the data is a time series.
    :param series_input_width: Width of the input series data. 
    :param series_label_width: Width of the labels series data. 
    :return: The last generation, a log book (stats) and the hall of fame (the best 
                 individuals found).
    """

    x_train, x_test, x_val, y_train, y_test, y_val, mode = load_dataset(
        dataset_name,
        is_time_series=is_time_series,
        series_input_width=series_input_width,
        series_label_width=series_label_width)

    # Shape of logits must be 2
    if len(x_train.shape[1:]) == 3:
        x_train = x_train.reshape(x_train.shape[:-1])
        x_test = x_test.reshape(x_test.shape[:-1])
        x_val = x_val.reshape(x_val.shape[:-1])
    elif len(x_train.shape[1:]) == 1:
        x_train = x_train.reshape(list(x_train.shape) + [1])
        x_test = x_test.reshape(list(x_test.shape) + [1])
        x_val = x_val.reshape(list(x_val.shape) + [1])

    input_shape = list(x_train.shape[1:])

    if len(y_train.shape) == 1:
        output_shape = [y_train.max() + 1]
    else:
        output_shape = y_train.shape[1:]

    if eval_func == None:
        eval_func = select_evaluation(mode)

    e = Evolving(evaluation=eval_func,
                 desc_list=descriptors,
                 x_trains=[x_train],
                 y_trains=[y_train],
                 x_tests=[x_val],
                 y_tests=[y_val],
                 n_inputs=[input_shape],
                 n_outputs=[output_shape],
                 batch_size=batch_size,
                 population=population,
                 generations=generations,
                 iters=iters,
                 seed=seed,
                 lrate=lrate,
                 cxp=cxp,
                 mtp=mtp,
                 evol_alg=evol_alg,
                 sel=sel,
                 sel_kwargs=sel_kwargs,
                 max_num_layers=max_num_layers,
                 max_num_neurons=max_num_neurons,
                 max_filter=max_filter,
                 max_stride=max_stride,
                 hyperparameters=hyperparameters)

    a = e.evolve()
    return a
예제 #10
0
    layer_size = 4
    center_layer = int(layer_size/2)-1

    bounds = [layer_size,max_layers,layer_size]
    
    #wanted_blocks = [23,45,64,22,33]      
    wanted_blocks = [-1,23,45,64,22,200,105,75,61]
    n_wanted_blocks = len(wanted_blocks)
    
    aux_data = np.zeros(bounds)

    # This evolving object is auxiliar, only for createing the data and population
    e = Evolving(desc_list=[MLPDescriptor], x_trains=[aux_data], y_trains=[aux_data], 
                 x_tests=[aux_data], y_tests=[aux_data], evaluation=eval_model, 
                 batch_size=150, population=num_models, generations=10, iters=10, 
                 max_num_layers=max_layers, max_num_neurons=max_neurons,
                 n_inputs=[np.prod(bounds)], n_outputs=[n_wanted_blocks*np.prod(bounds)], 
                 cxp=0., mtp=1., hyperparameters={"lrate": [0.1, 0.5, 1], "optimizer": [0, 1, 2]}, 
                 batch_norm=True, dropout=True)
    
    data, noisy_data, net_population = creating_data_and_population(e, wanted_blocks, bounds, center_layer, layer_size)

    x_train = noisy_data

    y_train = np.copy(data)
    for i, block in enumerate(wanted_blocks):   
        y_train = np.where(y_train == block, i, y_train)
    
    num_samples = list(y_train.shape) + [-1]
    y_train = y_train.flatten()