def test_generate_model_from_phenotype_3():
    allel1 = ConvNetworkBlock(block_type="Conv", filter_size=3, num_filter=8, has_pooling_layer=True)
    allel2 = ConvNetworkBlock(block_type="Conv", filter_size=5, num_filter=16, has_pooling_layer=False)
    phenotype = Phenotype(coding_network_blocks=[allel1, allel2])

    # Expected model:
    # input_layer = layers.Input(shape=(32, 32, 1))
    # model = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(input_layer)
    # model = layers.MaxPooling2D(pool_size=(2, 2))(model)
    # model = layers.Conv2D(16, (5, 5), activation='relu', padding='same')(model)
    # model = layers.Conv2D(16, (5, 5), activation='relu', padding='same')(model)
    # model = layers.UpSampling2D((2,2)) (model)
    # model = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(model)
    # output_layer = layers.Conv2D(1, (3, 3), activation=None, padding='same')(model)
    # autoencoder_model = models.Model(input_layer, output_layer)
    # autoencoder_model.compile(loss='mse',optimizer="adam")
    # expected_config = autoencoder_model.get_config()

    model = generate_model_from_phenotype(phenotype, input_layer_shape=(None, 32, 32, 1))

    config = model.get_config()

    # Check input layer
    assert config["layers"][0]["class_name"] == 'InputLayer'

    # Check encoding layer
    assert config["layers"][1]["class_name"] == 'Conv2D'
    assert config["layers"][1]["config"]["kernel_size"] == (3, 3)
    assert config["layers"][1]["config"]["activation"] == "relu"
    assert config["layers"][1]["config"]["padding"] == "same"
    assert config["layers"][1]["config"]["filters"] == 8
    assert config["layers"][2]["class_name"] == 'MaxPooling2D'
    assert config["layers"][2]["config"]["pool_size"] == (2, 2)
    assert config["layers"][3]["class_name"] == 'Conv2D'
    assert config["layers"][3]["config"]["kernel_size"] == (5, 5)
    assert config["layers"][3]["config"]["padding"] == "same"
    assert config["layers"][3]["config"]["filters"] == 16

    # Check decoding layer
    assert config["layers"][4]["class_name"] == 'Conv2D'
    assert config["layers"][4]["config"]["kernel_size"] == (5, 5)
    assert config["layers"][4]["config"]["padding"] == "same"
    assert config["layers"][4]["config"]["filters"] == 16
    assert config["layers"][5]["class_name"] == 'UpSampling2D'
    assert config["layers"][5]["config"]["size"] == (2, 2)
    assert config["layers"][6]["class_name"] == 'Conv2D'
    assert config["layers"][6]["config"]["activation"] == "relu"
    assert config["layers"][6]["config"]["kernel_size"] == (3, 3)
    assert config["layers"][6]["config"]["padding"] == "same"
    assert config["layers"][6]["config"]["filters"] == 8

    # Check output layer
    assert config["layers"][7]["class_name"] == 'Conv2D'
    assert config["layers"][7]["config"]["kernel_size"] == (3, 3)
    assert config["layers"][7]["config"]["padding"] == "same"
    assert config["layers"][7]["config"]["filters"] == 1

    # check the output layer is actually of the same size as the input layer
    assert tuples_are_equal(model.layers[0].output_shape[0], model.layers[7].output_shape)
def test_generate_model_from_phenotype_2():
    block = ConvNetworkBlock(block_type="Conv", filter_size=3, num_filter=8, has_pooling_layer=True)
    phenotype = Phenotype(coding_network_blocks=[block])

    # for only one allel, the model is expected to have 6 layers:
    # 1. Input Layer
    # 2. Conv Layer (Specified by allel)
    # 3. Pool Layer
    # 4. Upsampling Layer
    # 5. Decoding Layer (Specified by allel)
    # 6. Output Layer (which is a convolutional layer with only one filter to get the original size back)

    # input_layer = layers.Input(shape = (32, 32, 1))
    # model = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(input_layer)
    # model = layers.MaxPooling2D(pool_size=(2, 2))(model)
    # model = layers.UpSampling2D((2,2)) (model)
    # model = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(model)
    # output_layer = layers.Conv2D(1, (3, 3), activation=None, padding='same')(model)
    # autoencoder_model = models.Model(input_layer, output_layer)
    # autoencoder_model.compile(loss='mse',optimizer="adam")

    model = generate_model_from_phenotype(phenotype, input_layer_shape=(None, 32, 32, 1))

    config = model.get_config()

    # Check input layer
    assert config["layers"][0]["class_name"] == 'InputLayer'

    # Check encoding layer
    assert config["layers"][1]["class_name"] == 'Conv2D'
    assert config["layers"][1]["config"]["kernel_size"] == (3, 3)
    assert config["layers"][1]["config"]["padding"] == "same"
    assert config["layers"][1]["config"]["filters"] == 8
    assert config["layers"][2]["class_name"] == 'MaxPooling2D'
    assert config["layers"][2]["config"]["pool_size"] == (2, 2)

    # Check decoding layer
    assert config["layers"][3]["class_name"] == 'UpSampling2D'
    assert config["layers"][3]["config"]["size"] == (2, 2)
    assert config["layers"][4]["class_name"] == 'Conv2D'
    assert config["layers"][4]["config"]["kernel_size"] == (3, 3)
    assert config["layers"][4]["config"]["padding"] == "same"
    assert config["layers"][4]["config"]["filters"] == 8

    # Check output layer
    assert config["layers"][5]["class_name"] == 'Conv2D'
    assert config["layers"][5]["config"]["kernel_size"] == (3, 3)
    assert config["layers"][5]["config"]["padding"] == "same"
    assert config["layers"][5]["config"]["filters"] == 1

    # check the output layer is actually of the same size as the input layer
    assert tuples_are_equal(model.layers[0].output_shape[0], model.layers[5].output_shape)
def test_integration_genereate_and_decode_individual():
    config_dict = {"filter_size": [1, 3, 5], "num_filter": [8, 16, 32, 64, 128], "has_pooling_layer": [False, True]}
    network_table = NetworkBlockTable(config_dict)

    for i in range(10):
        print(f"Initalize individual number {i}")
        individual = initialize_individual_default(network_table, num_rows=3, num_columns=20, levelback=5)
        individual: Individual
        phenotype = individual.phenotype
        model = generate_model_from_phenotype(phenotype, input_layer_shape=(None, 32, 32, 1))
        assert model is not None, "Error occurred during model creation."
        assert tuples_are_equal(model.layers[0].output_shape[0], model.layers[-1].output_shape)
        assert tuples_are_equal(model.layers[1].output_shape, model.layers[-2].output_shape)
        K.clear_session()
def test_generate_model_from_phenotype_dropout():
    allel1 = ConvNetworkBlock(block_type="Conv", filter_size=3, num_filter=8, has_pooling_layer=False, dropout=0.5)
    phenotype = Phenotype(coding_network_blocks=[allel1])

    # Expected model:
    input_layer = layers.Input(shape=(32, 32, 1))
    model = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(input_layer)
    model = layers.Dropout(rate=0.1)(model)
    model = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(model)
    output_layer = layers.Conv2D(1, (3, 3), activation=None, padding='same')(model)
    autoencoder_model = models.Model(input_layer, output_layer)
    autoencoder_model.compile(loss='mse',optimizer="adam")
    expected_config = autoencoder_model.get_config()

    model = generate_model_from_phenotype(phenotype, input_layer_shape=(None, 32, 32, 1))

    config = model.get_config()
    print(model.summary())
Ejemplo n.º 5
0
def train_individual(individual,
                     X_train,
                     batch_size,
                     epochs,
                     input_layer_shape,
                     model_info_logger,
                     network_type="Conv"):
    model = generate_model_from_phenotype(individual.phenotype,
                                          input_layer_shape,
                                          model_info_logger=model_info_logger)
    model_callbacks = initalize_callbacks(model_info_logger, network_type)

    model.fit(X_train,
              X_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=model_info_logger.keras_verbose,
              callbacks=model_callbacks)
    return model
Ejemplo n.º 6
0
 def best_architecture(self):
     return generate_model_from_phenotype(self.best_individual.phenotype, self.input_shape)