Example #1
0
def run_cifar_user_input_bidi():

    status = program.Status()

    status.max_layer_conv2d = int(
        input("Enter max convolution layers model: "))
    status.max_filter = 530
    status.max_filter_dense = 270
    status.max_kernel_dense = 9
    status.max_pool_layer = 4
    status.max_parents = 2

    list_conditions = {
        DNA_conditions.max_filter: status.max_filter,
        DNA_conditions.max_filter_dense: status.max_filter_dense,
        DNA_conditions.max_kernel_dense: status.max_kernel_dense,
        DNA_conditions.max_layer: status.max_layer_conv2d,
        DNA_conditions.min_filter: 3,
        DNA_conditions.max_pool_layer: status.max_pool_layer,
        DNA_conditions.max_parents: status.max_parents,
        DNA_conditions.no_con_last_layer: 1,
    }

    def condition(DNA):
        return DNA_conditions.dict2condition(DNA, list_conditions)

    def dropout_function(base_p, total_layers, index_layer, isPool=False):

        value = 0
        if index_layer != 0 and isPool == False:
            value = 0.05

        if index_layer == total_layers - 2:
            value = 0.05

        return value

    settings = ExperimentSettings.ExperimentSettings()

    settings.version = directions_version.CONVEX_VERSION
    settings.dropout_function = dropout_function
    settings.eps_batchorm = 0.001
    settings.momentum = 0.9
    settings.enable_activation = True
    settings.enable_last_activation = True
    settings.enable_augmentation = True
    settings.enable_track_stats = True
    settings.dropout_value = 0.05
    settings.weight_decay = 0.0005
    settings.evalLoss = True

    # Batch size
    status.S = 64
    e = 50000 / status.S
    status.iterations_per_epoch = math.ceil(e)

    status.condition = condition
    status.dt_Max = 0.05
    status.dt_min = 0.0000001
    status.clear_period = 200000
    status.max_iter = 400000
    status.restart_period = 18 * status.iterations_per_epoch
    status.max_layer = 8
    #status.max_filter=51
    from utilities.Abstract_classes.classes.uniform_random_selector_2 import (
        centered_random_selector as Selector)
    status.mutations = ((1, 0, 0, 0), (1, 0, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0),
                        (4, 0, 0, 0), (0, 0, 1), (0, 0, -1), (0, 0, 1, 1),
                        (0, 0, -1, -1), (0, 0, 2))
    status.num_actions = int(input("num_actions : "))

    status.Selector_creator = Selector
    status.log_size = 200
    status.min_log_size = 100
    status.version = directions_version.CONVEX_VERSION
    status.cuda = True

    augSettings = AugmentationSettings.AugmentationSettings()

    dict_transformations = {
        augSettings.baseline_customRandomCrop: True,
        augSettings.randomHorizontalFlip: True,
        augSettings.randomErase_1: True
    }

    transform_compose = augSettings.generateTransformCompose(
        dict_transformations, False)
    settings.transformations_compose = transform_compose
    settings.ricap = Augmentation_Utils.Ricap(beta=0.3)

    settings.cuda = status.cuda
    status.mutation_coefficient = float(input("mutation_coefficient : "))
    status.save2database = False

    # DNA BASE
    status.Center = DNAs.DNA_base

    status.settings = settings
    program.run(status)
def run_cifar_user_input_bidi(test_name, mutations_actions,
                              const_direction_version):

    import Geometric.Products.Product_f_cifar_save_2 as program
    #import Product_f_cifar_save_nm as program

    save = True  #bool(input('Insert any input to save'))
    status = program.Status()

    status.max_layer_conv2d = int(input("Enter max conv2d layers: "))
    status.max_filter = 530
    status.max_filter_dense = 270
    status.max_kernel_dense = 9
    status.max_pool_layer = 4
    status.max_parents = 2

    list_conditions = {
        DNA_conditions.max_filter: status.max_filter,
        DNA_conditions.max_filter_dense: status.max_filter_dense,
        DNA_conditions.max_kernel_dense: status.max_kernel_dense,
        DNA_conditions.max_layer: status.max_layer_conv2d,
        DNA_conditions.min_filter: 3,
        DNA_conditions.max_pool_layer: status.max_pool_layer,
        DNA_conditions.max_parents: status.max_parents,
        DNA_conditions.no_con_last_layer: 1,
    }

    def condition(DNA):
        return DNA_conditions.dict2condition(DNA, list_conditions)

    def dropout_function(base_p, total_layers, index_layer, isPool=False):

        value = 0
        if index_layer != 0 and isPool == False:
            value = 0.05

        if index_layer == total_layers - 2:
            value = 0.05

        return value

    settings = ExperimentSettings.ExperimentSettings()

    settings.version = const_direction_version  #directions_version.POOL_VERSION
    settings.dropout_function = dropout_function
    settings.eps_batchorm = 0.001
    settings.momentum = 0.9
    custom = False  #bool(input('No input for defaults :'))
    if custom == True:
        ENABLE_ACTIVATIO = int(
            input("Enable last layer activation? (1 = yes, 0 = no): "))
        ENABLE_LAST_ACTIVATION = int(
            input("Enable last layer activation? (1 = yes, 0 = no): "))
        ENABLE_AUGMENTATION = int(
            input("Enable Data augmentation? (1 = yes, 0 = no): "))
        ENABLE_TRACK = int(
            input("Enable tracking var/mean batchnorm? (1 = yes, 0 = no): "))
        settings.dropout_value = float(input("dropout value: "))
        settings.weight_decay = float(input('weight_decay: '))
    else:
        ENABLE_ACTIVATION = 1
        ENABLE_LAST_ACTIVATION = 1
        ENABLE_AUGMENTATION = 1
        ENABLE_TRACK = 1
        settings.dropout_value = 0.05
        settings.weight_decay = 0.0005

    value = True
    if ENABLE_ACTIVATION == 0:
        value = False
    settings.enable_activation = value

    # ENABLE_LAST_ACTIVATION, enable/disable last layer relu

    value = False
    if ENABLE_LAST_ACTIVATION == 1:
        value = True
    settings.enable_last_activation = value

    # ENABLE_AUGMENTATION, enable/disable data augmentation

    value = True
    if ENABLE_AUGMENTATION == 0:
        value = False
    ENABLE_AUGMENTATION = value

    settings.enable_augmentation = value

    # ALLOW TRACK BATCHNORM

    value = True
    if ENABLE_TRACK == 0:
        value = False
    settings.enable_track_stats = value

    status.S = 64
    e = 50000 / status.S
    status.iterations_per_epoch = math.ceil(e)

    status.condition = condition
    status.dt_Max = 0.05
    status.dt_min = 0.0000001
    status.clear_period = 200000
    status.max_iter = 400000
    status.restart_period = 18 * status.iterations_per_epoch
    status.max_layer = 8
    #status.max_filter=51
    from utilities.Abstract_classes.classes.uniform_random_selector_2 import (
        centered_random_selector as Selector)
    status.mutations = mutations_actions
    status.num_actions = int(input("num_actions : "))

    status.Selector_creator = Selector
    status.log_size = 200  #int(input("Log size : "))
    status.min_log_size = 100
    status.version = settings.version
    status.cuda = True  #bool(input("Any input for cuda : "))

    settings.evalLoss = True  #bool(input("Any input to activate EvalLoss : "))
    '''
    augSettings = AugmentationSettings.AugmentationSettings()
    dict_transformations = {
        augSettings.baseline_customRandomCrop : True,
        augSettings.randomHorizontalFlip : True,
        augSettings.randomErase_1 : True
    }

    transform_compose = augSettings.generateTransformCompose(dict_transformations, False)
    settings.transformations_compose = transform_compose
    '''

    settings.ricap = Augmentation_Utils.Ricap(beta=0.3)

    settings.cuda = status.cuda

    status.mutation_coefficient = float(input("mutation_coefficient : "))
    if save:
        if test_name is not None:
            status.experiment_name = test_name
        else:
            status.experiment_name = input("insert experiment name : ")
        status.save_space_period = 200  #int(input("save_space_period : "))
        status.save_net_period = 600  #int(input("save_space_net_period : "))
    status.save2database = save
    x = 32
    y = 32

    status.Center = DNAs.DNA_base
    status.settings = settings
    program.run(status)
import DAO.database.dao.TestModelDAO as TestModelDAO
import random
import os
import utilities.NetworkStorage as NetworkStorage
import utilities.MemoryManager as MemoryManager
from commands import CommandCreateDataGen
import utilities.ExperimentSettings as ExperimentSettings

testModelDAO = TestModelDAO.TestModelDAO()
memoryManager = MemoryManager.MemoryManager()

settings = ExperimentSettings.ExperimentSettings()
settings.version = "convex"
settings.batch_size = 64
settings.cuda = True
settings.eps_batchorm = 0.001
settings.dropout_value = 0.05
settings.weight_decay = 0.0005
settings.momentum = 0.9
settings.enable_activation = True
settings.enable_last_activation = True
settings.enable_track_stats = True

test_id = int(input("Enter id test: "))
max_alai_time = int(input("Enter max alai time: "))

test_models = testModelDAO.findByLimitAlai(idTest=test_id,
                                           limit_alai_time=max_alai_time)

dataCreator = CommandCreateDataGen.CommandCreateDataGen(cuda=True)
dataCreator.execute(compression=2,
def TestMemoryManager():

    epochs = 0.2
    batch_size = 64

    def dropout_function(base_p, total_layers, index_layer, isPool=False):

        value = 0
        if index_layer != 0 and isPool == False:
            value = base_p

        if index_layer == total_layers - 2:
            value = base_p

        print("conv2d: ", index_layer, " - dropout: ", value, " - isPool: ",
              isPool)

        return value

    settings = ExperimentSettings.ExperimentSettings()
    settings.momentum = 0.9
    settings.dropout_value = 0.05
    settings.weight_decay = 0.0005
    settings.enable_activation = True
    settings.enable_last_activation = True
    settings.enable_track_stats = True
    settings.version = directions_version.CONVEX_VERSION
    settings.eps_batchorm = 0.001
    settings.dropout_function = dropout_function
    settings.ricap = Augmentation.Ricap(beta=0.3)

    dataGen = GeneratorFromCIFAR.GeneratorFromCIFAR(2,
                                                    batch_size,
                                                    threads=0,
                                                    dataAugmentation=True)
    dataGen.dataConv2d()
    memoryManager = MemoryManager.MemoryManager()

    mutation_manager = MutationManager.MutationManager(
        directions_version=settings.version)

    dna = test_DNAs.DNA_base

    e = 50000 / batch_size
    e = math.ceil(e)
    print("e: ", e)
    print("total iterations: ", int(epochs * e))
    dt_array = Alaising(1.2, 99, int(epochs * e))

    input("press to continue: before load network")
    network = nw_dendrites.Network(
        dna,
        cuda_flag=True,
        momentum=settings.momentum,
        weight_decay=settings.weight_decay,
        enable_activation=settings.enable_activation,
        enable_track_stats=settings.enable_track_stats,
        dropout_value=settings.dropout_value,
        dropout_function=settings.dropout_function,
        enable_last_activation=settings.enable_last_activation,
        version=settings.version,
        eps_batchnorm=settings.eps_batchorm)

    input("press to continue: before training network")

    network.training_custom_dt(dataGenerator=dataGen,
                               dt_array=dt_array,
                               ricap=settings.ricap,
                               evalLoss=True)

    network.generate_accuracy(dataGen)
    print("net acc: ", network.get_accuracy())

    input("press to continue: before save network")
    memoryManager.saveTempNetwork(network)
    input("press to continue: after save network")

    input("press to continue: before load temp network")
    network_loaded = memoryManager.loadTempNetwork(dna, settings)
    input("press to continue: after load temp network")

    network_loaded.generate_accuracy(dataGen)
    print("loaded acc: ", network_loaded.get_accuracy())

    input("press to continue: before mutate network (add filters layer 1)")
    dna_mutate = direction_dna.increase_filters(1, network_loaded.dna)
    network_mutate = mutation_manager.execute_mutation(network_loaded,
                                                       dna_mutate)
    input("press to continue: after mutate network")

    input("press to continue: before delete old network")
    memoryManager.deleteNetwork(network_loaded)
    input("press to continue: after delete old network")

    network_mutate.generate_accuracy(dataGen)
    print("mutated acc: ", network_mutate.get_accuracy())
    input("press to conitnue: before training mutate network")
    network_mutate.training_custom_dt(dataGenerator=dataGen,
                                      dt_array=dt_array,
                                      ricap=settings.ricap,
                                      evalLoss=True)
    input("press to conitnue: after training mutate network")
    network_mutate.generate_accuracy(dataGen)
    print("mutate net acc: ", network_mutate.get_accuracy())

    input("press to continue: before save network")
    memoryManager.saveTempNetwork(network_mutate)
    input("press to continue: after save network")

    input("press to continue: before load network")
    network_loaded = memoryManager.loadTempNetwork(dna_mutate, settings)
    input("press to continue: after load network")

    network_loaded.generate_accuracy(dataGen)
    print("loaded acc: ", network_loaded.get_accuracy())

    input("press to continue: before mutate network (add layer pool 1)")
    dna_mutate_2 = direction_dna.add_pool_layer(1, network_loaded.dna)
    network_mutate = mutation_manager.execute_mutation(network_loaded,
                                                       dna_mutate_2)
    input("press to continue: after mutate network")

    input("press to continue: before delete old network")
    memoryManager.deleteNetwork(network_loaded)
    input("press to continue: after delete old network")

    network_mutate.generate_accuracy(dataGen)
    print("mutated acc: ", network_mutate.get_accuracy())
    input("press to conitnue: before training mutate network")
    network_mutate.training_custom_dt(dataGenerator=dataGen,
                                      dt_array=dt_array,
                                      ricap=settings.ricap,
                                      evalLoss=True)
    input("press to conitnue: after training mutate network")
    network_mutate.generate_accuracy(dataGen)
    print("mutate net acc: ", network_mutate.get_accuracy())

    input("press to continue: before save network")
    memoryManager.saveTempNetwork(network_mutate)
    input("press to continue: after save network")