def runNeuralNetwork(networkArchitecture, addBatchNormalization=False):
    writeLog("starting process for: " + networkArchitecture)

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=40,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       fill_mode='nearest',
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')

    # img rgb => 3 channels => depth 3
    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)

    # to free memory
    if K.backend() == 'tensorflow':
        K.clear_session()

    learningRate = getLearningRate(networkArchitecture)

    model = createModelForNeuralNetwork(networkArchitecture, input_shape,
                                        addBatchNormalization)
    model = getBestModel(model, learningRate, train_generator,
                         validation_generator)

    scores = model.evaluate_generator(validation_generator,
                                      nb_validation_samples)

    accuracy = scores[1]

    writeLog("Accuracy on test data is: " + str(accuracy))

    del train_generator
    del validation_generator
    del train_datagen
    del test_datagen
    del model
    gc.collect()

    return scores[1]
def runBests(bests, addBatchNormalization):
    for arch in bests:
        firstRun = runNeuralNetwork(arch, addBatchNormalization)
        # secondRun = runNeuralNetworkCifar(arch, addDropout, addBatchNormalization)
        # thirdRun = runNeuralNetworkCifar(arch, addDropout, addBatchNormalization)
        # result = (firstRun + secondRun + thirdRun)/3

        writeLog(arch + " => addBatchNormalization: " +
                 str(addBatchNormalization))
        writeLog(arch + " => firstRun: " + str(firstRun))
def createModelForNeuralNetwork(networkArchitecture, input_shape,
                                addBatchNormalization):

    layerQuantity = getLayerQuantity(networkArchitecture)
    convQuantity = getConvQuant(networkArchitecture)
    pool = hasPool(networkArchitecture)
    fcQuantity = getFCquantity(networkArchitecture)
    addDropout = hasDropout(networkArchitecture)

    if (not pool and layerQuantity == 3
            and convQuantity == 3):  # adicionando pool na maior arquitetura
        writeLog("adicionando pool em arquitetura muito grande")
        pool = True

    model = Sequential()

    filterLenght = 32
    for layer in range(layerQuantity):
        for conv in range(convQuantity):

            model.add(
                Conv2D(filterLenght, (3, 3),
                       activation='relu',
                       padding='same',
                       input_shape=input_shape))
            if (((conv + layer) % 2) == 1):
                filterLenght = filterLenght * 2  #duplica o tamanho do filtro a cada duas camadas convolutivas

            if addBatchNormalization:
                model.add(BatchNormalization())

        if pool:
            model.add(MaxPooling2D(pool_size=(2, 2)))
            if addDropout:
                model.add(Dropout(0.25))

    model.add(Flatten())
    for fc in range(fcQuantity):
        model.add(Dense(64))
        model.add(Activation('relu'))

    if addDropout:
        model.add(Dropout(0.5))

    if num_classes == 2:
        model.add(Dense(1))
        model.add(Activation('sigmoid'))
    else:
        model.add(Dense(num_classes, activation='softmax'))

    writeModelSummaryLog(model)
    return model
def log_history(history):
    acc = "acc = " + str(history.history['acc'])
    val_acc = "val_acc = " + str(history.history['val_acc'])
    loss = "loss = " + str(history.history['loss'])
    val_loss = "val_loss = " + str(history.history['val_loss'])
    writeLog(acc)
    writeLog(val_acc)
    writeLog(loss)
    writeLog(val_loss)
    return
Esempio n. 5
0
def print_final_stats():
    """
    Prints a final review of the overall evolutionary process.

    :return: Nothing.
    """

    if hasattr(params['FITNESS_FUNCTION'], "training_test"):
        print("\n\nBest:\n  Training fitness:\t",
              trackers.best_ever.training_fitness)
        print("  Test fitness:\t\t", trackers.best_ever.test_fitness)
    else:
        print("\n\nBest:\n  Fitness:\t", trackers.best_ever.fitness)

    print("  Phenotype:", trackers.best_ever.phenotype)
    writeLog(" Best Phenotype: " + trackers.best_ever.phenotype)
    print("  Genome:", trackers.best_ever.genome)
    writeLog(" Best Genome: " + str(trackers.best_ever.genome))
    print_generation_stats()
Esempio n. 6
0
def generate_new_genome_and_phenotype():
    writeLog('Creating new individual values')

    depths = range(params['BNF_GRAMMAR'].min_ramp + 1,
                   params['MAX_INIT_TREE_DEPTH'] + 1)
    size = params['POPULATION_SIZE']
    if size < len(depths):
        depths = depths[:int(size)]

    max_depth = depths[int(len(depths) / 2)]

    # Initialise an instance of the tree class
    ind_tree = Tree(str(params['BNF_GRAMMAR'].start_rule["symbol"]), None)

    # Generate a tree
    genome, output, nodes, depth = pi_grow(ind_tree, max_depth)
    # Get remaining individual information
    phenotype, invalid, used_cod = "".join(output), False, len(genome)

    return phenotype, nodes, genome, depth, used_cod, invalid
Esempio n. 7
0
def search_loop():
    """
    This is a standard search process for an evolutionary algorithm. Loop over
    a given number of generations.
    
    :return: The final population after the evolutionary process has run for
    the specified number of generations.
    """
    if params['MULTICORE']:
        # initialize pool once, if mutlicore is enabled
        params['POOL'] = Pool(processes=params['CORES'],
                              initializer=pool_init,
                              initargs=(params, ))  # , maxtasksperchild=1)

    # Initialise population
    individuals = initialisation(params['POPULATION_SIZE'])

    # Evaluate initial population
    individuals = evaluate_fitness(individuals)

    # Generate statistics for run so far
    get_stats(individuals)
    writeLog("Inicialization...")
    for counter, i in enumerate(individuals):
        print(str(counter) + " - " + i.__str__())
        writeLog(str(counter) + " - " + i.__str__())
    # import pdb; pdb.set_trace()
    # Traditional GE
    for generation in range(1, (params['GENERATIONS'] + 1)):
        writeLog("Generation: " + str(generation))
        stats['gen'] = generation

        # New generation
        individuals = params['STEP'](individuals)
        for counter, i in enumerate(individuals):
            print(str(counter) + " - " + i.__str__())
            writeLog(str(counter) + " - " + i.__str__())
        # import pdb; pdb.set_trace()

    if params['MULTICORE']:
        # Close the workers pool (otherwise they'll live on forever).
        params['POOL'].close()

    return individuals
Esempio n. 8
0
def search_loop_from_state():
    """
    Run the evolutionary search process from a loaded state. Pick up where
    it left off previously.

    :return: The final population after the evolutionary process has run for
    the specified number of generations.
    """
    individuals = trackers.state_individuals
    writeLog("Inicializing from previous state...")
    for counter, i in enumerate(individuals):
        print(str(counter) + " - " + i.__str__())
        writeLog(str(counter) + " - " + i.__str__())

    if params['MULTICORE']:
        # initialize pool once, if mutlicore is enabled
        params['POOL'] = Pool(processes=params['CORES'],
                              initializer=pool_init,
                              initargs=(params, ))  # , maxtasksperchild=1)

    # Traditional GE
    for generation in range(stats['gen'] + 1, (params['GENERATIONS'] + 1)):
        writeLog("Generation: " + str(generation))
        stats['gen'] = generation

        # New generation
        individuals = params['STEP'](individuals)
        for counter, i in enumerate(individuals):
            print(str(counter) + " - " + i.__str__())
            writeLog(str(counter) + " - " + i.__str__())
        # import pdb; pdb.set_trace()

    if params['MULTICORE']:
        # Close the workers pool (otherwise they'll live on forever).
        params['POOL'].close()

    return individuals
def runBests(bests, addDropout, addBatchNormalization, useDataAugmentation):
    for arch in bests:
        firstRun = runNeuralNetworkCifar(arch, addDropout,
                                         addBatchNormalization,
                                         useDataAugmentation)
        secondRun = runNeuralNetworkCifar(arch, addDropout,
                                          addBatchNormalization,
                                          useDataAugmentation)
        thirdRun = runNeuralNetworkCifar(arch, addDropout,
                                         addBatchNormalization,
                                         useDataAugmentation)
        result = (firstRun + secondRun + thirdRun) / 3

        writeLog(arch + " => addDropout: " + str(addDropout) +
                 "; addBatchNormalization: " + str(addBatchNormalization) +
                 "; useDataAugmentation: " + str(useDataAugmentation))
        writeLog(arch + " => firstRun: " + str(firstRun) + "; secondRun: " +
                 str(secondRun) + "; thirdRun: " + str(thirdRun))
        writeLog(arch + " => media: " + result)
Esempio n. 10
0
    def evaluate(self):
        """
        Evaluates phenotype in using the fitness function set in the params
        dictionary. For regression/classification problems, allows for
        evaluation on either training or test distributions. Sets fitness
        value.

        :return: Nothing unless multicore evaluation is being used. In that
        case, returns self.
        """

        # Evaluate fitness using specified fitness function.
        # import pdb; pdb.set_trace()
        tries = 0
        while True:
            try:
                # import pdb; pdb.set_trace()
                if (tries < 5):
                    self.fitness = runNeuralNetwork(self.phenotype)
                else:
                    writeLog('5 attempts reached for ' + self.phenotype)
                    self.fitness = 0
                break

            except Exception as error:
                # import pdb; pdb.set_trace()
                writeLog('[individual.py] Caught this error: ' + repr(error))
                writeLog('tries ' + str(tries))
                tries += 1
                # generate new individual
                phenotype, nodes, genome, depth, used_cod, invalid = generate_new_genome_and_phenotype(
                )
                self.phenotype, self.nodes, self.genome = phenotype, nodes, genome
                self.depth, self.used_codons, self.invalid = depth, used_cod, invalid

        # self.fitness = runNeuralNetwork(self.phenotype)#params['FITNESS_FUNCTION'](self)
        # import pdb; pdb.set_trace()

        if params['MULTICORE']:
            return self
def runNeuralNetwork(networkArchitecture, use_step_decay=False):
    writeLog("starting neuralNetwork_assuncao process for: " +
             networkArchitecture)

    # load data
    data_all = load_data_from_nii_files(data_paths)
    images = transform_list_to_array(data_all)
    #print(images.shape)
    #mask = load_mask_file(data_mask_path)

    # preprocessing
    #masked_imgs = apply_mask_to_data_files(data_all, mask)
    #images = apply_zscore(masked_imgs)

    img_quantity = images.shape[0]
    indexes_dis, indexes_con = get_shuffled_index_list(img_quantity)
    img_quantity_balanced = indexes_dis.shape[0]
    train_indexes, test_indexes = split_train_and_test_index_set(
        indexes_dis, indexes_con, img_quantity_balanced)
    X_train, X_test = split_data_into_training_and_test_sets(
        images, train_indexes, test_indexes)

    y_train_as_3D, y_test_as_3D = create_outcome_variables(
        labels, train_indexes, test_indexes)
    y_train, y_test = reshape_outcome_variables_to_categorical(
        y_train_as_3D, y_test_as_3D)

    data_shape = tuple(X_train.shape[1:])
    k.clear_session()
    model = createModelForNeuralNetwork(networkArchitecture, data_shape)

    # learning_rate = 1e-5
    # adam = Adam(lr=learning_rate)
    optimizer = getLearningOptFromNetwork(networkArchitecture)
    loss = 'binary_crossentropy'

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    # import pdb; pdb.set_trace() # debug

    # train the model
    start = time.time()

    # Let's test the model:
    if (use_step_decay):
        # alterar o learning rate em determinados pontos
        lrate = LearningRateScheduler(step_decay)
        callbacks_list = [lrate]

        model_info = model.fit(X_train,
                               y_train,
                               validation_data=(X_test, y_test),
                               epochs=epochs,
                               batch_size=batch_size,
                               callbacks=callbacks_list)
    else:
        model_info = model.fit(X_train,
                               y_train,
                               validation_data=(X_test, y_test),
                               epochs=epochs,
                               batch_size=batch_size)

    evaluation = model.evaluate(X_test, y_test)

    end = time.time()
    log_execution_time(start, end)
    log_history(model_info)

    loss_result = (evaluation[0])
    accuracy_result = (evaluation[1] * 100)

    writeLog('Loss in Test set:        %.02f' % loss_result)
    writeLog('Accuracy in Test set:    %.02f' % accuracy_result)

    #show_activation('conv2d_1', model, X_train)

    memory_clean(model)

    return accuracy_result
def log_execution_time(start_time, end_time):
    diff = int(end_time - start_time)
    minutes, seconds = diff // 60, diff % 60
    writeLog("Model took minutes to train " + str(minutes) + ':' +
             str(seconds).zfill(2))
    return
def runNeuralNetworkCifar(networkArchitecture,
                          addDropout=False,
                          addBatchNormalization=False,
                          useDataAugmentation=False):

    writeLog("starting process for: " + networkArchitecture)
    # load cifar data
    (train_features, train_labels), (test_features,
                                     test_labels) = cifar10.load_data()
    # split the data on test (test_features_test) and validation (test_features_val)
    test_features_val, test_features_test, test_labels_val, test_labels_test = train_test_split(
        test_features, test_labels, test_size=0.2, random_state=42)

    num_classes = len(np.unique(train_labels))

    train_features = train_features.astype("float") / 255.0
    test_features_val = test_features_val.astype("float") / 255.0
    test_features_test = test_features_test.astype("float") / 255.0

    # convert the labels from integers to vectors
    lb = LabelBinarizer()
    train_labels = lb.fit_transform(train_labels)
    test_labels_val = lb.transform(test_labels_val)
    test_labels_test = lb.transform(test_labels_test)

    input_shape = (32, 32, 3)
    batch_size = 128
    epochs = 10

    # to free memory
    if K.backend() == 'tensorflow':
        K.clear_session()

    # Create the model according to the networkArchitecture
    model = createModelForNeuralNetwork(networkArchitecture, input_shape,
                                        num_classes, addDropout,
                                        addBatchNormalization)

    # Compile the model
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # train the model
    start = time.time()

    if (useDataAugmentation):
        # adding data augmentation
        datagen = ImageDataGenerator(zoom_range=0.2, horizontal_flip=True)

        model_info = model.fit_generator(
            datagen.flow(train_features, train_labels, batch_size=batch_size),
            steps_per_epoch=train_features.shape[0] // batch_size,
            epochs=epochs,
            validation_data=(test_features_val, test_labels_val),
            verbose=0)

    else:
        model_info = model.fit(train_features,
                               train_labels,
                               batch_size=batch_size,
                               epochs=epochs,
                               validation_data=(test_features_val,
                                                test_labels_val),
                               verbose=0)

    end = time.time()
    timeMsg = "Model took seconds to train " + str((end - start))
    print(timeMsg)
    writeLog(timeMsg)
    # compute test accuracy
    accuracyValue = accuracy(test_features_test, test_labels_test, model)
    accMsg = "Accuracy on test data is: " + str(accuracyValue)
    print(accMsg)
    writeLog(accMsg)
    return accuracyValue
Esempio n. 14
0
#                    David Fagan, Stefan Forstenlechner,
#                    and Erik Hemberg
# Hereby licensed under the GNU GPL v3.
""" Python GE implementation """

from utilities.algorithm.general import check_python_version

check_python_version()

from stats.stats import get_stats
from algorithm.parameters import params, set_params
import sys

from writeFileHelper import writeLog


def mane():
    """ Run program """

    # Run evolution
    individuals = params['SEARCH_LOOP']()

    # Print final review
    get_stats(individuals, end=True)


if __name__ == "__main__":
    writeLog('Starting Process....')
    set_params(sys.argv[1:])  # exclude the ponyge.py arg itself
    mane()
Esempio n. 15
0
def runNeuralNetwork(networkArchitecture,
                     data_dir,
                     epochs=100,
                     batch_size=32,
                     img_width=120,
                     img_height=120):
    writeLog("starting neuralNetwork_assuncao_outrasBases process for: " +
             networkArchitecture)

    #####################################
    # basic configuration - dont change
    #####################################
    input_shape = getInputShape(img_width, img_height)
    train_data_dir = data_dir + '/train'
    validation_data_dir = data_dir + '/validation'
    num_classes = getNumberOfClasses(train_data_dir)
    nb_train_samples = getQuantityOfFilesInAFolder(
        train_data_dir)  # dividido igualmente entre as classes
    nb_validation_samples = getQuantityOfFilesInAFolder(
        validation_data_dir)  # dividido igualmente entre as classes
    # print("nb_train_samples: " + str(nb_train_samples) + "; nb_validation_samples: " + str(nb_validation_samples))
    # print("num_classes: "+str(num_classes))
    #####################################

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=40,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       fill_mode='nearest',
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')
    # class_mode='categorical' ???

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='binary')

    # to free memory
    if K.backend() == 'tensorflow':
        K.clear_session()

    model = createModelForNeuralNetwork(networkArchitecture,
                                        input_shape,
                                        numClasses=num_classes)

    optimizer = getLearningOptFromNetwork(networkArchitecture)

    model.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # alterar o learning rate em determinados pontos
    lrate = LearningRateScheduler(step_decay)
    callbacks_list = [lrate]

    # model training
    start = time.time()

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size,
        callbacks=callbacks_list  #[earlyStopping, saveBestModel]
    )

    writeLog("[INFO] evaluating network...")
    scores = model.evaluate_generator(validation_generator,
                                      nb_validation_samples)

    end = time.time()
    logRunTime(start, end)
    logHistoryLog(history)

    # writeLog("[INFO] evaluating network...")
    # predictions = model.predict(test_features_val, batch_size=batch_size)

    accuracy = scores[1]
    writeLog("Accuracy on test data is: " + str(accuracy))

    memoryClean(train_generator, validation_generator, train_datagen,
                test_datagen, model)

    return accuracy
Esempio n. 16
0
def runNeuralNetwork(networkArchitecture,
                     epochs=400,
                     batch_size=128,
                     useDataAugmentation=False):
    writeLog("starting neuralNetwork_assuncao process for: " +
             networkArchitecture)
    # load cifar data
    (train_features, train_labels), (test_features,
                                     test_labels) = cifar10.load_data()
    # split the data on test (test_features_test) and validation (test_features_val)
    test_features_val, test_features_test, test_labels_val, test_labels_test = train_test_split(
        test_features, test_labels, test_size=0.2, random_state=42)

    train_features = train_features.astype("float") / 255.0
    test_features_val = test_features_val.astype("float") / 255.0
    test_features_test = test_features_test.astype("float") / 255.0

    # convert the labels from integers to vectors
    lb = LabelBinarizer()
    train_labels = lb.fit_transform(train_labels)
    test_labels_val = lb.transform(test_labels_val)
    test_labels_test = lb.transform(test_labels_test)

    # to free memory
    if K.backend() == 'tensorflow':
        K.clear_session()

    # Create the model according to the networkArchitecture
    model = createModelForNeuralNetwork(networkArchitecture, input_shape)

    optimizer = getLearningOptFromNetwork(networkArchitecture)

    # Compile the model
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # train the model
    start = time.time()

    # alterar o learning rate em determinados pontos
    lrate = LearningRateScheduler(step_decay)
    callbacks_list = [lrate]

    if (useDataAugmentation):
        # adding data augmentation
        datagen = ImageDataGenerator(zoom_range=0.2, horizontal_flip=True)

        model_info = model.fit_generator(
            datagen.flow(train_features, train_labels, batch_size=batch_size),
            steps_per_epoch=train_features.shape[0] // batch_size,
            epochs=epochs,
            validation_data=(test_features_val, test_labels_val),
            callbacks=callbacks_list,
            verbose=0)

    else:
        model_info = model.fit(train_features,
                               train_labels,
                               batch_size=batch_size,
                               epochs=epochs,
                               validation_data=(test_features_val,
                                                test_labels_val),
                               callbacks=callbacks_list,
                               verbose=1)

    writeLog("[INFO] evaluating network...")
    # predictions = model.predict(test_features_val, batch_size=batch_size)

    end = time.time()

    logRunTime(start, end)
    logHistoryLog(model_info)

    # compute test accuracy
    accuracyValue = accuracy(test_features_test, test_labels_test, model)
    writeLog("Accuracy on test data is: " + str(accuracyValue))

    memoryClean(model)

    return accuracyValue