Beispiel #1
0
 def add_net(self, key):
     node = self.key2node(key)
     if not (node):
         self.add_node(key)
         network = nw.Network(key, cuda_flag=False)
         self.link_node(key, network)
         self.charge_node(key)
         print('added net')
 def add_net(self, key):
     node = self.key2node(key)
     if not (node):
         self.add_node(key)
         network = nw.Network(
             key,
             cuda_flag=self.cuda,
             momentum=0.9,
             weight_decay=0.0005,
         )
         self.link_node(key, network)
         self.charge_node(key)
         print('added net')
Beispiel #3
0
def createNetwork(dna, settings):

    return nw.Network(dna=dna,
                      cuda_flag=settings.cuda,
                      momentum=settings.momentum,
                      weight_decay=settings.weight_decay,
                      enable_activation=settings.enable_activation,
                      enable_track_stats=settings.enable_track_stats,
                      dropout_value=settings.dropout_value,
                      dropout_function=settings.dropout_function,
                      enable_last_activation=settings.enable_last_activation,
                      version=settings.version,
                      eps_batchnorm=settings.eps_batchorm)
 def add_net(self, key):
     node = self.key2node(key)
     if not (node):
         self.add_node(key)
         settings = self.settings
         network = nw.Network(
             key,
             cuda_flag=settings.cuda,
             momentum=settings.momentum,
             weight_decay=settings.weight_decay,
             enable_activation=settings.enable_activation,
             enable_track_stats=settings.enable_track_stats,
             dropout_value=settings.dropout_value,
             dropout_function=settings.dropout_function,
             enable_last_activation=settings.enable_last_activation,
             version=settings.version,
             eps_batchnorm=settings.eps_batchorm)
         self.link_node(key, network)
         self.charge_node(key)
         print('added net')
def execute_mutation(old_network, new_dna):
    
    network = nw.Network(new_dna, cuda_flag=old_network.cuda_flag, momentum=old_network.momentum, 
                            weight_decay=old_network.weight_decay, enable_activation=old_network.enable_activation,
                            enable_track_stats=old_network.enable_track_stats, dropout_value=old_network.dropout_value,
                            dropout_function=old_network.dropout_function, enable_last_activation=old_network.enable_last_activation,
                            version=old_network.version, eps_batchnorm=old_network.eps_batchnorm)
                            
    network.loss_history = old_network.loss_history[-200:]

    length_new_dna = __generateLenghtDNA(new_dna)
    length_old_dna = __generateLenghtDNA(old_network.dna)

    old_network.set_grad_flag(False)
    network.set_grad_flag(False)


    if length_new_dna == length_old_dna:
        #print("default mutation process")
        __init_mutation(old_network=old_network, network=network, lenghtDna=length_new_dna)

    elif length_new_dna > length_old_dna: # add layer
        #print("add layer mutation")
        index_layer = __getTargetIndex(old_dna=old_network.dna, new_dna=new_dna, direction_function=direction_dna.add_layer)

        if index_layer == None:
            index_layer = __getTargetIndex(old_dna=old_network.dna, new_dna=new_dna, direction_function=direction_dna.add_pool_layer)

        __init_add_layer_mutation(old_network=old_network, network=network, lenghtold_dna=length_old_dna, added_layer_index=index_layer)

    elif length_old_dna > length_new_dna: # remove layer
        #print("remove layer mutation")
        index_layer = __getTargetIndex(old_dna=old_network.dna, new_dna=new_dna, direction_function=direction_dna.remove_layer)
        __init_remove_layer_mutation(old_network=old_network, network=network, lengthnew_dna=length_new_dna, removed_layer_index=index_layer)

    old_network.set_grad_flag(True)
    network.set_grad_flag(True)

    return network
Beispiel #6
0
def file2net(file, DNA, cuda_flag=False):
    net = nw.Network(DNA, cuda_flag=False)
    final_path = os.path.join("temporary_nets", file)
    net.load_model(final_path)
    return net
Beispiel #7
0
def execute_mutation(old_network, new_dna):

    file_manager = FileManager.FileManager()
    file_manager.setFileName("dnas_mutation_error.txt")

    try:
        # Se crea la nueva red neuronal con la nueva estructura deseada.
        network = nw.Network(
            new_dna,
            cuda_flag=old_network.cuda_flag,
            momentum=old_network.momentum,
            weight_decay=old_network.weight_decay,
            enable_activation=old_network.enable_activation,
            enable_track_stats=old_network.enable_track_stats,
            dropout_value=old_network.dropout_value,
            dropout_function=old_network.dropout_function,
            enable_last_activation=old_network.enable_last_activation,
            version=old_network.version,
            eps_batchnorm=old_network.eps_batchnorm)

        network.loss_history = old_network.loss_history[-200:]

        # Se calcula la cantidad de capas que posee la nueva red neuronal.
        length_new_dna = __generateLenghtDNA(new_dna)

        # Se calcula la cantidad de capas que posee la antigua red neuronal.
        length_old_dna = __generateLenghtDNA(old_network.dna)

        # Se desactivan el registro de gradientes de ambas redes neuronales.
        old_network.set_grad_flag(False)
        network.set_grad_flag(False)

        # Se ejecuta el proceso de modificación y transferencia de parámetros de entrenamiento,
        # dependiendo si ambas redes neuronales poseen la misma cantidad de capas o no.
        if length_new_dna == length_old_dna:
            __init_mutation(old_network=old_network,
                            network=network,
                            lenghtDna=length_new_dna)

        elif length_new_dna > length_old_dna:
            index_layer, mutation_type = __getTargetIndex(
                old_dna=old_network.dna, new_dna=new_dna)
            __init_add_layer_mutation(old_network=old_network,
                                      network=network,
                                      lenghtold_dna=length_old_dna,
                                      added_layer_index=index_layer,
                                      mutation_type=mutation_type)

        elif length_old_dna > length_new_dna:  # remove layer
            #print("remove layer mutation")
            index_layer = __getTargetRemoved(old_dna=old_network.dna,
                                             new_dna=new_dna)
            __init_remove_layer_mutation(old_network=old_network,
                                         network=network,
                                         lengthnew_dna=length_new_dna,
                                         removed_layer_index=index_layer)

    except:
        file_manager.appendFile("## MUTATION ##")
        file_manager.appendFile("old DNA: " + str(old_network.dna))
        file_manager.appendFile("new DNA: " + str(new_dna))

        print("#### ERROR DNAs  ####")
        print("OLD")
        print(old_network.dna)
        print("NEW")
        print(new_dna)
        raise

    old_network.set_grad_flag(True)
    network.set_grad_flag(True)

    return network
Beispiel #8
0
def run(status):
    loaded_network = bool(input("any input to run loaded network"))
    print("Loaded network: ", loaded_network)
    status.Transfer = tran.TransferRemote(status, 'remote2local.txt',
                                          'local2remote.txt')
    #status.Transfer.readLoad()
    print(f'status.settings is {status.settings}')

    create_objects(status, loaded_network)
    print('The value of typos after loading is')
    print(status.typos)
    print("objects created")
    status.print_DNA()
    status.Transfer.un_load()
    status.Transfer.write()
    k = 0

    test_id = 0
    testDao = test_dao.TestDAO()
    testResultDao = test_result_dao.TestResultDAO()
    testModelDao = test_model_dao.TestModelDAO()
    print("cuda=", status.cuda)

    print("max layers: ", status.max_layer_conv2d)

    settings = status.settings

    if loaded_network == False:
        network = nw.Network(
            status.Center,
            cuda_flag=settings.cuda,
            momentum=settings.momentum,
            weight_decay=settings.weight_decay,
            enable_activation=settings.enable_activation,
            enable_track_stats=settings.enable_track_stats,
            dropout_value=settings.dropout_value,
            dropout_function=settings.dropout_function,
            enable_last_activation=settings.enable_last_activation,
            version=settings.version,
            eps_batchnorm=settings.eps_batchorm)

        print("starting pre-training")

        print("iterations per epoch = ", status.iterations_per_epoch)
        dt_array = status.Alai.get_increments(20 * status.iterations_per_epoch)

        if status.save2database == True:
            test_id = testDao.insert(testName=status.experiment_name,
                                     dt=status.dt_Max,
                                     dt_min=status.dt_min,
                                     batch_size=status.S,
                                     max_layers=status.max_layer_conv2d,
                                     max_filters=status.max_filter,
                                     max_filter_dense=status.max_filter_dense,
                                     max_kernel_dense=status.max_kernel_dense,
                                     max_pool_layer=status.max_pool_layer,
                                     max_parents=status.max_parents)

        network.training_custom_dt(dataGenerator=status.Data_gen,
                                   dt_array=dt_array,
                                   ricap=settings.ricap,
                                   evalLoss=settings.evalLoss)

    else:

        path = os.path.join("saved_models", "product_database",
                            "7_test_final_experiment_model_6044")
        network = NetworkStorage.load_network(fileName=None,
                                              settings=settings,
                                              path=path)
        network.generate_accuracy(status.Data_gen)
        acc = network.get_accuracy()
        print("Acc loaded network: ", acc)
        print("Alai time loaded: ", status.Alai.computeTime())
        L_1 = status.Alai.computeTime() // status.save_space_period
        L_2 = status.Alai.computeTime() // status.save_net_period
        print("L_1= ", L_1)
        print("L_2= ", L_2)
        time.sleep(2)

        if status.save2database == True:
            test_id = testDao.insert(testName=status.experiment_name,
                                     dt=status.dt_Max,
                                     dt_min=status.dt_min,
                                     batch_size=status.S,
                                     max_layers=status.max_layer_conv2d,
                                     max_filters=status.max_filter,
                                     max_filter_dense=status.max_filter_dense,
                                     max_kernel_dense=status.max_kernel_dense,
                                     max_pool_layer=status.max_pool_layer,
                                     max_parents=status.max_parents)

    status.stream.add_node(network.dna)
    status.stream.link_node(network.dna, network)

    if status.save2database == True and loaded_network == False:
        dna_graph = status.Dynamics.phase_space.DNA_graph
        testResultDao.insert(idTest=test_id,
                             iteration=0,
                             dna_graph=dna_graph,
                             current_alai_time=status.Alai.computeTime(),
                             reset_count=status.Alai.reset_count)

        save_model(status, 0, testModelDao, test_id, TrainingType.PRE_TRAINING)

    #update(status)
    while False:
        update(status)
        status.Transfer.un_load()
        status.Transfer.write()
        transfer = status.Transfer.status_transfer
        k = k + 1
        pass

    L_1 = 1
    L_2 = 1
    save_17_layers = True
    save_18_layers = True
    save_19_layers = True
    save_24_layers = True
    save_25_layers = True
    save_26_layers = True
    save_27_layers = True
    save_30_layers = True
    save_33_layers = True
    save_36_layers = True
    save_39_layers = True
    save_42_layers = True
    save_45_layers = True
    save_48_layers = True
    save_51_layers = True

    if loaded_network == True:
        save_6_layers = False
        save_17_layers = False
        save_18_layers = False
        save_19_layers = False
        L_1 = status.Alai.computeTime() // status.save_space_period
        L_2 = status.Alai.computeTime() // status.save_net_period

    while k < status.max_iter:
        #\begin{with gui}
        #status.Transfer.readLoad()
        #\end{with gui}
        #\begin{wituhout gui}
        status.active = True
        #\end{without gui}
        if status.active:
            update(status)
            print(f'The iteration number is: {k}')
            #if k % 20 == 0:
            #    status.print_accuracy()
            #status.print_energy()
            status.print_predicted_actions()
            if status.Alai:
                status.Alai.update()
            #status.print_particles()
            #status.print_particles()
            #status.print_max_particles()
            #print(status.typos)
            #status.print_signal()
            #status.print_difussion_filed()
    #        print_nets(status)
    #        time.sleep(0.5)0

            center_dna = status.Dynamics.phase_space.center()
            layers_count = 0
            if center_dna is not None:
                layers_count = countLayers(center_dna)

            print("current layers: ", layers_count)
            if status.save2database == True:

                if status.Alai.computeTime() >= L_1 * status.save_space_period:
                    print("saving space: ", L_1)
                    L_1 += 1
                    dna_graph = status.Dynamics.phase_space.DNA_graph
                    testResultDao.insert(
                        idTest=test_id,
                        iteration=k + 1,
                        dna_graph=dna_graph,
                        current_alai_time=status.Alai.computeTime(),
                        reset_count=status.Alai.reset_count)

                if status.Alai.computeTime() >= L_2 * status.save_net_period:
                    print("saving model: ", L_2)
                    L_2 += 1
                    save_model(status, k + 1, testModelDao, test_id,
                               TrainingType.MUTATION)

                if layers_count >= 17 and save_17_layers == True:
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)
                    save_17_layers = False

                elif layers_count >= 18 and save_18_layers == True:
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)
                    save_18_layers = False

                elif layers_count >= 19 and save_19_layers == True:
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)
                    save_19_layers = False

                elif layers_count >= 24 and save_24_layers == True:
                    save_24_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 25 and save_25_layers == True:
                    save_25_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 26 and save_26_layers == True:
                    save_26_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 27 and save_27_layers == True:
                    save_27_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 30 and save_30_layers == True:
                    save_30_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 33 and save_33_layers == True:
                    save_33_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 36 and save_36_layers == True:
                    save_36_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 39 and save_39_layers == True:
                    save_39_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 42 and save_42_layers == True:
                    save_42_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 45 and save_45_layers == True:
                    save_45_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 48 and save_48_layers == True:
                    save_48_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                elif layers_count >= 51 and save_51_layers == True:
                    save_51_layers = False
                    save_checkpoint(status, testResultDao, layers_count,
                                    test_id, testModelDao, k)

                if layers_count >= 39:
                    print("STOPPED MAX LAYERS: ", layers_count)
                    break
        else:
            #print('inactive')
            pass
        k = k + 1
def Test_Mutacion():
    memoryManager = MemoryManager.MemoryManager()

    augSettings = AugmentationSettings.AugmentationSettings()

    list_transform = {
        augSettings.randomHorizontalFlip: True,
        augSettings.translate: True,
    }

    PARENT_DNA = ((-1, 1, 3, 32, 32), (0, 3, 64, 3, 3), (0, 64, 128, 3, 3, 2),
                  (0, 192, 256, 3, 3, 2), (0, 256, 256, 13, 13), (1, 256, 10),
                  (2, ), (3, -1, 0), (3, 0, 1), (3, 1, 2), (3, 0, 2),
                  (3, 2, 3), (3, 3, 4), (3, 4, 5))

    MUTATE_DNA = direction_dna.spread_convex_dendrites(1, PARENT_DNA)
    print("MUTATED DNA: ", MUTATE_DNA)
    transform_compose = augSettings.generateTransformCompose(
        list_transform, False)
    dataGen = GeneratorFromCIFAR.GeneratorFromCIFAR(
        2,
        64,
        threads=0,
        dataAugmentation=True,
        transforms_mode=transform_compose)
    dataGen.dataConv2d()

    version = directions_version.POOL_VERSION

    mutation_manager = MutationManager.MutationManager(
        directions_version=version)

    parent_network = nw_dendrites.Network(dna=PARENT_DNA,
                                          cuda_flag=True,
                                          momentum=0.9,
                                          weight_decay=0,
                                          enable_activation=True,
                                          enable_track_stats=True,
                                          dropout_value=0.2,
                                          dropout_function=None,
                                          version=version)

    parent_network.training_cosine_dt(dataGenerator=dataGen,
                                      max_dt=0.001,
                                      min_dt=0.001,
                                      epochs=1,
                                      restart_dt=1)

    parent_network.generate_accuracy(dataGen)
    print("original acc: ", parent_network.get_accuracy())
    mutate_network = mutation_manager.execute_mutation(parent_network,
                                                       MUTATE_DNA)
    mutate_network.generate_accuracy(dataGen)
    print("mutated acc: ", mutate_network.get_accuracy())
    mutate_network.training_cosine_dt(dataGenerator=dataGen,
                                      max_dt=0.001,
                                      min_dt=0.001,
                                      epochs=1,
                                      restart_dt=1)

    mutate_network.generate_accuracy(dataGen)
    print("mutated acc after training: ", mutate_network.get_accuracy())
def TestMemoryManager():

    epochs = 0.2
    batch_size = 64

    def dropout_function(base_p, total_layers, index_layer, isPool=False):

        value = 0
        if index_layer != 0 and isPool == False:
            value = base_p

        if index_layer == total_layers - 2:
            value = base_p

        print("conv2d: ", index_layer, " - dropout: ", value, " - isPool: ",
              isPool)

        return value

    settings = ExperimentSettings.ExperimentSettings()
    settings.momentum = 0.9
    settings.dropout_value = 0.05
    settings.weight_decay = 0.0005
    settings.enable_activation = True
    settings.enable_last_activation = True
    settings.enable_track_stats = True
    settings.version = directions_version.CONVEX_VERSION
    settings.eps_batchorm = 0.001
    settings.dropout_function = dropout_function
    settings.ricap = Augmentation.Ricap(beta=0.3)

    dataGen = GeneratorFromCIFAR.GeneratorFromCIFAR(2,
                                                    batch_size,
                                                    threads=0,
                                                    dataAugmentation=True)
    dataGen.dataConv2d()
    memoryManager = MemoryManager.MemoryManager()

    mutation_manager = MutationManager.MutationManager(
        directions_version=settings.version)

    dna = test_DNAs.DNA_base

    e = 50000 / batch_size
    e = math.ceil(e)
    print("e: ", e)
    print("total iterations: ", int(epochs * e))
    dt_array = Alaising(1.2, 99, int(epochs * e))

    input("press to continue: before load network")
    network = nw_dendrites.Network(
        dna,
        cuda_flag=True,
        momentum=settings.momentum,
        weight_decay=settings.weight_decay,
        enable_activation=settings.enable_activation,
        enable_track_stats=settings.enable_track_stats,
        dropout_value=settings.dropout_value,
        dropout_function=settings.dropout_function,
        enable_last_activation=settings.enable_last_activation,
        version=settings.version,
        eps_batchnorm=settings.eps_batchorm)

    input("press to continue: before training network")

    network.training_custom_dt(dataGenerator=dataGen,
                               dt_array=dt_array,
                               ricap=settings.ricap,
                               evalLoss=True)

    network.generate_accuracy(dataGen)
    print("net acc: ", network.get_accuracy())

    input("press to continue: before save network")
    memoryManager.saveTempNetwork(network)
    input("press to continue: after save network")

    input("press to continue: before load temp network")
    network_loaded = memoryManager.loadTempNetwork(dna, settings)
    input("press to continue: after load temp network")

    network_loaded.generate_accuracy(dataGen)
    print("loaded acc: ", network_loaded.get_accuracy())

    input("press to continue: before mutate network (add filters layer 1)")
    dna_mutate = direction_dna.increase_filters(1, network_loaded.dna)
    network_mutate = mutation_manager.execute_mutation(network_loaded,
                                                       dna_mutate)
    input("press to continue: after mutate network")

    input("press to continue: before delete old network")
    memoryManager.deleteNetwork(network_loaded)
    input("press to continue: after delete old network")

    network_mutate.generate_accuracy(dataGen)
    print("mutated acc: ", network_mutate.get_accuracy())
    input("press to conitnue: before training mutate network")
    network_mutate.training_custom_dt(dataGenerator=dataGen,
                                      dt_array=dt_array,
                                      ricap=settings.ricap,
                                      evalLoss=True)
    input("press to conitnue: after training mutate network")
    network_mutate.generate_accuracy(dataGen)
    print("mutate net acc: ", network_mutate.get_accuracy())

    input("press to continue: before save network")
    memoryManager.saveTempNetwork(network_mutate)
    input("press to continue: after save network")

    input("press to continue: before load network")
    network_loaded = memoryManager.loadTempNetwork(dna_mutate, settings)
    input("press to continue: after load network")

    network_loaded.generate_accuracy(dataGen)
    print("loaded acc: ", network_loaded.get_accuracy())

    input("press to continue: before mutate network (add layer pool 1)")
    dna_mutate_2 = direction_dna.add_pool_layer(1, network_loaded.dna)
    network_mutate = mutation_manager.execute_mutation(network_loaded,
                                                       dna_mutate_2)
    input("press to continue: after mutate network")

    input("press to continue: before delete old network")
    memoryManager.deleteNetwork(network_loaded)
    input("press to continue: after delete old network")

    network_mutate.generate_accuracy(dataGen)
    print("mutated acc: ", network_mutate.get_accuracy())
    input("press to conitnue: before training mutate network")
    network_mutate.training_custom_dt(dataGenerator=dataGen,
                                      dt_array=dt_array,
                                      ricap=settings.ricap,
                                      evalLoss=True)
    input("press to conitnue: after training mutate network")
    network_mutate.generate_accuracy(dataGen)
    print("mutate net acc: ", network_mutate.get_accuracy())

    input("press to continue: before save network")
    memoryManager.saveTempNetwork(network_mutate)
    input("press to continue: after save network")
def Test_Convex():
    augSettings = AugmentationSettings.AugmentationSettings()

    list_transform = {
        augSettings.randomHorizontalFlip: True,
        augSettings.translate: True,
    }

    version = directions_version.CONVEX_VERSION

    mutation_manager = MutationManager.MutationManager(
        directions_version=version)
    transform_compose = augSettings.generateTransformCompose(
        list_transform, False)
    dataGen = GeneratorFromCIFAR.GeneratorFromCIFAR(
        2,
        128,
        threads=0,
        dataAugmentation=True,
        transforms_mode=transform_compose)
    dataGen.dataConv2d()

    DNA = ((-1, 1, 3, 32, 32), (0, 3, 32, 3, 3), (0, 32, 64, 3, 3, 2),
           (0, 64, 128, 3, 3, 2), (0, 128, 256, 3, 3), (0, 256, 128, 2, 2),
           (0, 128, 128, 3, 3, 2), (0, 256, 256, 3, 3), (0, 384, 256, 3, 3),
           (0, 256, 128, 3, 3), (0, 384, 128, 3, 3), (0, 128, 128, 3, 3),
           (0, 256, 256, 3, 3), (0, 256, 128, 8, 8), (1, 128, 10), (2, ),
           (3, -1, 0), (3, 0, 1), (3, 1, 2), (3, 2, 3), (3, 3, 4), (3, 4, 5),
           (3, 3, 6), (3, 5, 7), (3, 6, 7), (3, 7, 8), (3, 8, 9), (3, 3, 9),
           (3, 4, 10), (3, 10, 11), (3, 9, 11), (3, 11, 12), (3, 13, 14))
    MUTATE_DNA = ((-1, 1, 3, 32, 32), (0, 3, 32, 3, 3), (0, 32, 64, 3, 3, 2),
                  (0, 64, 128, 3, 3, 2), (0, 128, 256, 3, 3),
                  (0, 256, 128, 2, 2), (0, 128, 128, 3, 3, 2),
                  (0, 256, 256, 3, 3), (0, 128, 128, 3, 3),
                  (0, 384, 256, 3, 3), (0, 256, 128, 3, 3), (0, 384, 128, 3,
                                                             3),
                  (0, 128, 128, 3, 3), (0, 256, 256, 3, 3), (0, 256, 128, 8,
                                                             8), (1, 128, 10),
                  (2, ), (3, -1, 0), (3, 0, 1), (3, 1, 2), (3, 2, 3),
                  (3, 3, 4), (3, 4, 5), (3, 3, 6), (3, 5, 7), (3, 7, 8),
                  (3, 6, 8), (3, 8, 9), (3, 9, 10), (3, 3, 10), (3, 4, 11),
                  (3, 11, 12), (3, 10, 12), (3, 12, 13), (3, 14, 15))

    ((-1, 1, 3, 32, 32), (0, 3, 32, 3, 3), (0, 32, 64, 3, 3, 2), (0, 64, 128,
                                                                  3, 3, 2),
     (0, 128, 256, 3, 3), (0, 256, 256, 8, 8), (1, 128, 10), (2, ), (3, -1, 0),
     (3, 0, 1), (3, 1, 2), (3, 2, 3), (3, 3, 4), (3, 5, 6))

    parent_network = nw_dendrites.Network(dna=DNA,
                                          cuda_flag=True,
                                          momentum=0.9,
                                          weight_decay=0,
                                          enable_activation=True,
                                          enable_track_stats=True,
                                          dropout_value=0,
                                          dropout_function=None,
                                          version=version)

    print("starting mutation")
    mutate_network = mutation_manager.execute_mutation(parent_network,
                                                       MUTATE_DNA)
Beispiel #12
0
def run(status):

    fileManager = FileManager.FileManager()
    fileManager.setFileName(FileNames.SAVED_MODELS)
    fileManager.writeFile("")

    status.Transfer = tran.TransferRemote(status, 'remote2local.txt',
                                          'local2remote.txt')
    print(f'status.settings is {status.settings}')

    create_objects(status)
    print('The value of typos after loading is')
    print(status.typos)
    print("objects created")
    status.print_DNA()
    status.Transfer.un_load()
    status.Transfer.write()
    k = 0

    print("max convolution layers: ", status.max_layer_conv2d)

    settings = status.settings

    network = nw.Network(
        status.Center,
        cuda_flag=settings.cuda,
        momentum=settings.momentum,
        weight_decay=settings.weight_decay,
        enable_activation=settings.enable_activation,
        enable_track_stats=settings.enable_track_stats,
        dropout_value=settings.dropout_value,
        dropout_function=settings.dropout_function,
        enable_last_activation=settings.enable_last_activation,
        version=settings.version,
        eps_batchnorm=settings.eps_batchorm)

    print("starting pre-training")
    dt_array = status.Alai.get_increments(20 * status.iterations_per_epoch)

    network.training_custom_dt(dataGenerator=status.Data_gen,
                               dt_array=dt_array,
                               ricap=settings.ricap,
                               evalLoss=settings.evalLoss)

    status.stream.add_node(network.dna)
    status.stream.link_node(network.dna, network)

    last_center_dna = None
    while k < status.max_iter:

        status.active = True

        if status.active:
            update(status)

            print(f'The iteration number is: {k}')
            status.print_predicted_actions()

            if status.Alai:
                status.Alai.update()

            center_dna = status.Dynamics.phase_space.center()
            layers_count = 0

            if center_dna is not None:
                layers_count = countLayers(center_dna)

                if center_dna != last_center_dna:
                    last_center_dna = center_dna
                    save_model(dna=center_dna,
                               alaiTime=status.Alai.computeTime(),
                               fileManager=fileManager)

            print("current layers: ", layers_count)

            if layers_count >= status.max_layer_conv2d:
                print("STOPPED MAX LAYERS: ", layers_count)
                break
        k = k + 1
Beispiel #13
0
def load_network(fileName,
                 settings: utilities.ExperimentSettings.ExperimentSettings,
                 path=None):

    if path is None:
        path = os.path.join("saved_models", "cifar", fileName)

    checkpoint = torch.load(path)

    if settings.momentum == None:
        momentum = checkpoint['momentum']
    else:
        momentum = settings.momentum

    if settings.weight_decay == None:
        weight_decay = checkpoint['weight_decay']
    else:
        weight_decay = settings.weight_decay

    if settings.enable_activation == None:
        enable_activation = checkpoint['enable_activation']
    else:
        enable_activation = settings.enable_activation

    if settings.enable_last_activation == None:
        enable_last_activation = checkpoint['enable_last_activation']
    else:
        enable_last_activation = settings.enable_last_activation

    if settings.dropout_value == None:
        dropout_value = checkpoint['dropout_value']
    else:
        dropout_value = settings.dropout_value

    if settings.enable_track_stats == None:
        enable_track_stats = checkpoint['enable_track_stats']
    else:
        enable_track_stats = settings.enable_track_stats

    if settings.version == None:
        version = checkpoint['version']
    else:
        version = settings.version

    if settings.eps_batchorm == None:
        eps_batchnorm = checkpoint['eps_batchnorm']
    else:
        eps_batchnorm = settings.eps_batchorm

    network = nw.Network(dna=checkpoint['dna'],
                         cuda_flag=settings.cuda,
                         momentum=momentum,
                         weight_decay=weight_decay,
                         enable_activation=enable_activation,
                         enable_last_activation=enable_last_activation,
                         dropout_value=dropout_value,
                         dropout_function=settings.dropout_function,
                         enable_track_stats=enable_track_stats,
                         version=version,
                         eps_batchnorm=eps_batchnorm)

    network.load_parameters(checkpoint=checkpoint)
    return network