def create_objects(status): status.Data_gen=GeneratorFromCIFAR.GeneratorFromCIFAR( status.Comp, status.S, cuda=status.cuda) status.Data_gen.dataConv2d() dataGen=status.Data_gen x = dataGen.size[1] y = dataGen.size[2] max_layers=status.max_layer max_filters=status.max_filter def condition(DNA): return max_filter(max_layer(DNA,max_layers),max_filters) version=status.typos_version center=((-1,1,3,x,y), (0,3, 15, 3 , 3), (0,18, 15, 3, 3), (0,33, 15, x, y), (1, 15,10), (2,), (3,-1,0), (3,0,1),(3,-1,1), (3,1,2),(3,0,2),(3,-1,2), (3,2,3), (3,3,4)) selector=status.Selector_creator(condition=condition, directions=version) status.Selector=selector creator=status.Creator selector.update(center) actions=selector.get_predicted_actions() space=DNA_Graph(center,1,(x,y),condition,actions, version,creator) if status.Alai: stream=TorchStream(status.Data_gen,status.log_size, min_size=status.min_log_size, Alai=status.Alai) else: stream=TorchStream(status.Data_gen,status.log_size, min_size=status.min_log_size) status.stream=stream Phase_space=DNA_Phase_space(space, stream=stream) Dynamics=Dynamic_DNA(space,Phase_space,status.dx, Creator=creator,Selector=selector, update_velocity=velocity_updater, update_space=space_updater,version=version) Phase_space.create_particles(status.n) Phase_space.beta=status.beta Phase_space.alpha=status.alpha Phase_space.influence=status.influence status.Dynamics=Dynamics status.objects=Dynamics.objects
def create_objects(status, loaded_network): settings = status.settings status.Alai = Alai(min=status.dt_min, max=status.dt_Max, max_time=status.restart_period) if loaded_network == True: status.Alai.time = 14077 - 14076 status.Alai.reset_count = 1 status.Data_gen = GeneratorFromCIFAR.GeneratorFromCIFAR( status.Comp, status.S, cuda=status.cuda, threads=status.threads, dataAugmentation=settings.enable_augmentation, transforms_mode=settings.transformations_compose) status.Data_gen.dataConv2d() dataGen = status.Data_gen x = dataGen.size[1] y = dataGen.size[2] condition = status.condition mutations = status.mutations version = status.version center = status.Center num_actions = status.num_actions selector = status.Selector_creator(condition=condition, directions=version, num_actions=num_actions, mutations=mutations) status.Selector = selector creator = status.Creator selector.update(center) actions = selector.get_predicted_actions() space = DNA_Graph(center, 1, (x, y), condition, actions, version, creator, selector=status.Selector, num_morphisms=5) if status.Alai: stream = TorchStream(status.Data_gen, status.log_size, min_size=status.min_log_size, Alai=status.Alai, status=status) else: stream = TorchStream(status.Data_gen, status.log_size, min_size=status.min_log_size, status=status) status.stream = stream Phase_space = DNA_Phase_space(space, stream=stream, status=status) Dynamics = Dynamic_DNA( space, Phase_space, status.dx, Creator=creator, Selector=selector, update_velocity=velocity_updater, update_space=space_updater, version=version, mutation_coefficient=status.mutation_coefficient, clear_period=status.clear_period, update_force_field=status.update_force_field, ) Phase_space.create_particles(status.n) Phase_space.beta = status.beta Phase_space.alpha = status.alpha Phase_space.influence = status.influence status.Dynamics = Dynamics status.objects = Dynamics.objects
def TestMemoryManager(): epochs = 0.2 batch_size = 64 def dropout_function(base_p, total_layers, index_layer, isPool=False): value = 0 if index_layer != 0 and isPool == False: value = base_p if index_layer == total_layers - 2: value = base_p print("conv2d: ", index_layer, " - dropout: ", value, " - isPool: ", isPool) return value settings = ExperimentSettings.ExperimentSettings() settings.momentum = 0.9 settings.dropout_value = 0.05 settings.weight_decay = 0.0005 settings.enable_activation = True settings.enable_last_activation = True settings.enable_track_stats = True settings.version = directions_version.CONVEX_VERSION settings.eps_batchorm = 0.001 settings.dropout_function = dropout_function settings.ricap = Augmentation.Ricap(beta=0.3) dataGen = GeneratorFromCIFAR.GeneratorFromCIFAR(2, batch_size, threads=0, dataAugmentation=True) dataGen.dataConv2d() memoryManager = MemoryManager.MemoryManager() mutation_manager = MutationManager.MutationManager( directions_version=settings.version) dna = test_DNAs.DNA_base e = 50000 / batch_size e = math.ceil(e) print("e: ", e) print("total iterations: ", int(epochs * e)) dt_array = Alaising(1.2, 99, int(epochs * e)) input("press to continue: before load network") network = nw_dendrites.Network( dna, cuda_flag=True, momentum=settings.momentum, weight_decay=settings.weight_decay, enable_activation=settings.enable_activation, enable_track_stats=settings.enable_track_stats, dropout_value=settings.dropout_value, dropout_function=settings.dropout_function, enable_last_activation=settings.enable_last_activation, version=settings.version, eps_batchnorm=settings.eps_batchorm) input("press to continue: before training network") network.training_custom_dt(dataGenerator=dataGen, dt_array=dt_array, ricap=settings.ricap, evalLoss=True) network.generate_accuracy(dataGen) print("net acc: ", network.get_accuracy()) input("press to continue: before save network") memoryManager.saveTempNetwork(network) input("press to continue: after save network") input("press to continue: before load temp network") network_loaded = memoryManager.loadTempNetwork(dna, settings) input("press to continue: after load temp network") network_loaded.generate_accuracy(dataGen) print("loaded acc: ", network_loaded.get_accuracy()) input("press to continue: before mutate network (add filters layer 1)") dna_mutate = direction_dna.increase_filters(1, network_loaded.dna) network_mutate = mutation_manager.execute_mutation(network_loaded, dna_mutate) input("press to continue: after mutate network") input("press to continue: before delete old network") memoryManager.deleteNetwork(network_loaded) input("press to continue: after delete old network") network_mutate.generate_accuracy(dataGen) print("mutated acc: ", network_mutate.get_accuracy()) input("press to conitnue: before training mutate network") network_mutate.training_custom_dt(dataGenerator=dataGen, dt_array=dt_array, ricap=settings.ricap, evalLoss=True) input("press to conitnue: after training mutate network") network_mutate.generate_accuracy(dataGen) print("mutate net acc: ", network_mutate.get_accuracy()) input("press to continue: before save network") memoryManager.saveTempNetwork(network_mutate) input("press to continue: after save network") input("press to continue: before load network") network_loaded = memoryManager.loadTempNetwork(dna_mutate, settings) input("press to continue: after load network") network_loaded.generate_accuracy(dataGen) print("loaded acc: ", network_loaded.get_accuracy()) input("press to continue: before mutate network (add layer pool 1)") dna_mutate_2 = direction_dna.add_pool_layer(1, network_loaded.dna) network_mutate = mutation_manager.execute_mutation(network_loaded, dna_mutate_2) input("press to continue: after mutate network") input("press to continue: before delete old network") memoryManager.deleteNetwork(network_loaded) input("press to continue: after delete old network") network_mutate.generate_accuracy(dataGen) print("mutated acc: ", network_mutate.get_accuracy()) input("press to conitnue: before training mutate network") network_mutate.training_custom_dt(dataGenerator=dataGen, dt_array=dt_array, ricap=settings.ricap, evalLoss=True) input("press to conitnue: after training mutate network") network_mutate.generate_accuracy(dataGen) print("mutate net acc: ", network_mutate.get_accuracy()) input("press to continue: before save network") memoryManager.saveTempNetwork(network_mutate) input("press to continue: after save network")
def Test_Mutacion(): memoryManager = MemoryManager.MemoryManager() augSettings = AugmentationSettings.AugmentationSettings() list_transform = { augSettings.randomHorizontalFlip: True, augSettings.translate: True, } PARENT_DNA = ((-1, 1, 3, 32, 32), (0, 3, 64, 3, 3), (0, 64, 128, 3, 3, 2), (0, 192, 256, 3, 3, 2), (0, 256, 256, 13, 13), (1, 256, 10), (2, ), (3, -1, 0), (3, 0, 1), (3, 1, 2), (3, 0, 2), (3, 2, 3), (3, 3, 4), (3, 4, 5)) MUTATE_DNA = direction_dna.spread_convex_dendrites(1, PARENT_DNA) print("MUTATED DNA: ", MUTATE_DNA) transform_compose = augSettings.generateTransformCompose( list_transform, False) dataGen = GeneratorFromCIFAR.GeneratorFromCIFAR( 2, 64, threads=0, dataAugmentation=True, transforms_mode=transform_compose) dataGen.dataConv2d() version = directions_version.POOL_VERSION mutation_manager = MutationManager.MutationManager( directions_version=version) parent_network = nw_dendrites.Network(dna=PARENT_DNA, cuda_flag=True, momentum=0.9, weight_decay=0, enable_activation=True, enable_track_stats=True, dropout_value=0.2, dropout_function=None, version=version) parent_network.training_cosine_dt(dataGenerator=dataGen, max_dt=0.001, min_dt=0.001, epochs=1, restart_dt=1) parent_network.generate_accuracy(dataGen) print("original acc: ", parent_network.get_accuracy()) mutate_network = mutation_manager.execute_mutation(parent_network, MUTATE_DNA) mutate_network.generate_accuracy(dataGen) print("mutated acc: ", mutate_network.get_accuracy()) mutate_network.training_cosine_dt(dataGenerator=dataGen, max_dt=0.001, min_dt=0.001, epochs=1, restart_dt=1) mutate_network.generate_accuracy(dataGen) print("mutated acc after training: ", mutate_network.get_accuracy())
def Test_Convex(): augSettings = AugmentationSettings.AugmentationSettings() list_transform = { augSettings.randomHorizontalFlip: True, augSettings.translate: True, } version = directions_version.CONVEX_VERSION mutation_manager = MutationManager.MutationManager( directions_version=version) transform_compose = augSettings.generateTransformCompose( list_transform, False) dataGen = GeneratorFromCIFAR.GeneratorFromCIFAR( 2, 128, threads=0, dataAugmentation=True, transforms_mode=transform_compose) dataGen.dataConv2d() DNA = ((-1, 1, 3, 32, 32), (0, 3, 32, 3, 3), (0, 32, 64, 3, 3, 2), (0, 64, 128, 3, 3, 2), (0, 128, 256, 3, 3), (0, 256, 128, 2, 2), (0, 128, 128, 3, 3, 2), (0, 256, 256, 3, 3), (0, 384, 256, 3, 3), (0, 256, 128, 3, 3), (0, 384, 128, 3, 3), (0, 128, 128, 3, 3), (0, 256, 256, 3, 3), (0, 256, 128, 8, 8), (1, 128, 10), (2, ), (3, -1, 0), (3, 0, 1), (3, 1, 2), (3, 2, 3), (3, 3, 4), (3, 4, 5), (3, 3, 6), (3, 5, 7), (3, 6, 7), (3, 7, 8), (3, 8, 9), (3, 3, 9), (3, 4, 10), (3, 10, 11), (3, 9, 11), (3, 11, 12), (3, 13, 14)) MUTATE_DNA = ((-1, 1, 3, 32, 32), (0, 3, 32, 3, 3), (0, 32, 64, 3, 3, 2), (0, 64, 128, 3, 3, 2), (0, 128, 256, 3, 3), (0, 256, 128, 2, 2), (0, 128, 128, 3, 3, 2), (0, 256, 256, 3, 3), (0, 128, 128, 3, 3), (0, 384, 256, 3, 3), (0, 256, 128, 3, 3), (0, 384, 128, 3, 3), (0, 128, 128, 3, 3), (0, 256, 256, 3, 3), (0, 256, 128, 8, 8), (1, 128, 10), (2, ), (3, -1, 0), (3, 0, 1), (3, 1, 2), (3, 2, 3), (3, 3, 4), (3, 4, 5), (3, 3, 6), (3, 5, 7), (3, 7, 8), (3, 6, 8), (3, 8, 9), (3, 9, 10), (3, 3, 10), (3, 4, 11), (3, 11, 12), (3, 10, 12), (3, 12, 13), (3, 14, 15)) ((-1, 1, 3, 32, 32), (0, 3, 32, 3, 3), (0, 32, 64, 3, 3, 2), (0, 64, 128, 3, 3, 2), (0, 128, 256, 3, 3), (0, 256, 256, 8, 8), (1, 128, 10), (2, ), (3, -1, 0), (3, 0, 1), (3, 1, 2), (3, 2, 3), (3, 3, 4), (3, 5, 6)) parent_network = nw_dendrites.Network(dna=DNA, cuda_flag=True, momentum=0.9, weight_decay=0, enable_activation=True, enable_track_stats=True, dropout_value=0, dropout_function=None, version=version) print("starting mutation") mutate_network = mutation_manager.execute_mutation(parent_network, MUTATE_DNA)