Ejemplo n.º 1
0
    def test_assembles_with_pretrained_module_as_first_module(self):
        train, _ = mnist_configure(self.classes)

        self.sub_module.keras_operation = assemble(self.sub_module,
                                                   self.in_shape, self.classes)
        train([self.sub_module], 1, 1024)

        sub_module_copy = deepcopy(self.sub_module)
        sub_module_copy = mutation_ops.insert(sub_module_copy,
                                              sub_module_copy.children[2],
                                              sub_module_copy.children[0],
                                              dense.Dropout())
        sub_module_copy = mutation_ops.transfer_predecessor_weights(
            sub_module_copy, self.in_shape, self.classes)

        self.module = mutation_ops.insert(self.module,
                                          self.op1,
                                          self.op2,
                                          self.sub_module,
                                          between=True)
        self.module = mutation_ops.insert(self.module,
                                          self.sub_module,
                                          self.op4,
                                          sub_module_copy,
                                          between=False)
        self.module = mutation_ops.remove(self.module, self.op1)

        self.module.keras_operation = assemble(self.module, self.in_shape,
                                               self.classes)
        train([self.module], 1, 1024)
Ejemplo n.º 2
0
    def test_keeps_weights_after_copy_and_insert(self):
        in_shape = (784, )
        classes = 10

        # Creating keras module:
        self.module.keras_tensor = assemble(self.module, in_shape, classes)

        module_copy = deepcopy(self.module)
        module_copy = mutation_ops.insert(module_copy, module_copy.children[0],
                                          module_copy.children[-1], Dropout())

        transfer_predecessor_weights(module_copy, in_shape, classes)

        weights, bias = self.module.children[0].keras_operation.get_weights()
        copied_weights, copied_bias = module_copy.children[
            0].keras_operation.get_weights()

        for row, vals in enumerate(weights):
            for col, val in enumerate(vals):
                self.assertEqual(val, copied_weights[row][col],
                                 "Weights not the same after copy")

        for i, val in enumerate(bias):
            self.assertEqual(val, copied_bias[i],
                             "Bias not the same after copy")

        train, eval = mnist_configure(classes)
        train([module_copy])
Ejemplo n.º 3
0
    def test_keeps_weights_after_copy(self):
        in_shape = (784, )
        classes = 10

        # Creating keras module:
        self.module.keras_tensor = assemble(self.module, in_shape, classes)

        module_copy = deepcopy(self.module)

        # Precondtion to this copy method, layers in the actual model needs to be the same objects as in the model.
        transfer_predecessor_weights(module_copy, in_shape, classes)
        self.assertEqual(module_copy.keras_operation.layers[1],
                         module_copy.children[0].keras_operation)

        weights, bias = self.module.children[0].keras_operation.get_weights()
        copied_weights, copied_bias = module_copy.children[
            0].keras_operation.get_weights()

        for row, vals in enumerate(weights):
            for col, val in enumerate(vals):
                self.assertEqual(val, copied_weights[row][col],
                                 "Weights not the same after copy")

        for i, val in enumerate(bias):
            self.assertEqual(val, copied_bias[i],
                             "Bias not the same after copy")

        for row, vals in enumerate(weights):
            for col, val in enumerate(vals):
                pass

        train, eval = mnist_configure(classes)
        train([module_copy])
Ejemplo n.º 4
0
    def test_assemble_with_pooling_op(self):
        with open("./datasets/cifar10-home-ssh.json", "r") as f:
            config = json.load(f)
        server = config.servers[0]

        individ = Module()
        individ = mutation_ops.append(individ, Conv3x3())
        individ = mutation_ops.append(individ, Conv3x3())
        individ = mutation_ops.append(individ, dense.DenseL())
        individ = mutation_ops.insert(individ, individ.children[1],
                                      individ.children[2], dense.DenseL())
        individ = mutation_ops.insert(individ, individ.children[1],
                                      individ.children[2], MaxPooling2x2())
        individ = mutation_ops.insert(individ, individ.children[1],
                                      individ.children[2], AvgPooling2x2())
        individ = mutation_ops.append(individ, dense.DenseL())

        training, evalutation, name, inputs = cifar10.configure(
            config.classes_in_classifier, server)

        model = assemble(individ, config.input_format,
                         config.classes_in_classifier)

        training_history = training(
            model=model,
            device="/gpu:0",  # server['device'],
            epochs=0,
            batch_size=1000)
Ejemplo n.º 5
0
    def test_training_integration(self):
        import os
        os.chdir("..")
        from src.buildingblocks.module import Module
        from src.ea_nas.evolutionary_operations import mutation_operators as op

        from src.buildingblocks.ops.convolution import Conv3x3
        from src.buildingblocks.ops.dense import (
            DenseL as DenseLarge,
            Dropout,
        )
        from src.buildingblocks.ops.pooling import MaxPooling2x2

        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

        module = Module()
        module = op.append(module, Conv3x3())
        module = op.append(module, Dropout())
        module = op.append(module, Conv3x3())
        module = op.append(module, Dropout())
        module = op.append(module, Conv3x3())
        module = op.append(module, Dropout())
        Module = op.append(module, MaxPooling2x2())
        module = op.append(module, DenseLarge())
        module = op.append(module, Dropout())
        module = op.append(module, DenseLarge())

        (x_train, y_train), (x_test,
                             y_test) = keras.datasets.cifar10.load_data()
        x_val = x_train[45000:] / 255
        y_val = y_train[45000:]
        x_train = x_train[:45000] / 255
        y_train = y_train[:45000]
        x_test = x_test / 255

        y_train = keras.utils.to_categorical(y_train, num_classes=10)
        y_test = keras.utils.to_categorical(y_test, num_classes=10)
        y_val = keras.utils.to_categorical(y_val, num_classes=10)

        labels, data = shuffle(x_train, y_train)
        with tf.device("/GPU:0"):
            keras.backend.set_session(
                tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                    allow_growth=False,
                    per_process_gpu_memory_fraction=1.0,
                ),
                                                 allow_soft_placement=True,
                                                 log_device_placement=True)))
            model = assemble(module, in_shape=(32, 32, 3), classes=10)
            metrics = model.fit(
                data,
                labels,
                epochs=10,
                batch_size=250,
                verbose=0,
                validation_data=(x_val, y_val),
            )

            results = model.evaluate(x_test, y_test)
Ejemplo n.º 6
0
    def test_assembles_with_first_layer_as_module(self):
        # Genotype after assembly:
        # sub_module -> op2    ->     op3
        #                 \          /
        #                  -> op4 ->

        self.module = mutation_ops.insert(self.module,
                                          self.op1,
                                          self.op2,
                                          self.sub_module,
                                          between=True)
        self.module = mutation_ops.remove(self.module, self.op1)

        train, _ = mnist_configure(self.classes)
        self.module.keras_operation = assemble(self.module, self.in_shape,
                                               self.classes)
        train([self.module], 1, 1024)
Ejemplo n.º 7
0
 def test_assembles_with_branched_network(self):
     train, _ = mnist_configure(self.classes)
     self.module.keras_operation = assemble(self.module, self.in_shape,
                                            self.classes)
     train([self.module], 1, 1024)