Exemple #1
0
def test_validation_order(test_config):
    """Test that calls to model.read_data() produce identical data when shuffle is false"""
    test_config["train"]["shuffle"] = False

    #Create class
    mod = main.AttentionModel()

    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value

    #Create model
    mod.create()
    mod.read_data(validation_split=True)

    first_sample = []
    for image, label in mod.val_split:
        first_sample.append(label)

    first_sample = np.vstack(first_sample)

    #Second call
    mod.read_data(validation_split=True)

    second_sample = []
    for image, label in mod.val_split:
        second_sample.append(label)

    second_sample = np.vstack(second_sample)

    assert np.array_equal(first_sample, second_sample)
Exemple #2
0
def test_evaluate(test_config):
    #Create class
    mod = main.AttentionModel()

    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value

    #Create
    mod.create()
    mod.read_data(validation_split=True)

    #Method 1, class eval method
    print("Before evaluation")
    y_pred, y_true = mod.evaluate(mod.val_split)

    print("evaluated")

    test_acc = keras_metrics.CategoricalAccuracy()
    test_acc.update_state(y_true=y_true, y_pred=y_pred)
    method1_eval_accuracy = test_acc.result().numpy()

    assert y_pred.shape == y_true.shape

    #Method 2, keras eval method
    metric_list = mod.model.evaluate(mod.val_split)
    metric_dict = {}
    for index, value in enumerate(metric_list):
        metric_dict[mod.model.metrics_names[index]] = value

    assert method1_eval_accuracy == metric_dict["acc"]

    #F1 requires integer, not softmax
    f1s = metrics.f1_scores(y_true, y_pred)
Exemple #3
0
def test_AttentionModel(test_config, validation_split):

    #Create class
    mod = main.AttentionModel()

    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value

    #Create model
    mod.create()
    mod.read_data(validation_split=validation_split)

    #initial weights
    initial_weight = mod.model.layers[1].get_weights()

    #How many batches and ensure no overlap in data
    train_image_data = []
    test_image_data = []
    if validation_split:
        train_counter = 0
        for data, label in mod.train_split:
            train_image_data.append(data)
            train_counter += data.shape[0]

        test_counter = 0
        for data, label in mod.val_split:
            test_image_data.append(data)
            test_counter += data.shape[0]

        assert train_counter > test_counter

        #No test in train batches
        assert all([
            not np.array_equal(y, x) for x in train_image_data
            for y in test_image_data
        ])

        #Spatial block of train and test
        test_center_pixel = test_image_data[0][0, 2, 2, 0].numpy()
        test_pixel_image = test_image_data[0][0, :, :, 0].numpy()
        for x in train_image_data:
            assert not test_center_pixel in x[:, 2, 2, 0]

    #train
    mod.train()

    final_weight = mod.model.layers[1].get_weights()

    #assert training took place
    assert not np.array_equal(final_weight, initial_weight)

    #assert val acc exists if split
    if validation_split:
        assert "val_acc" in list(mod.model.history.history.keys())
Exemple #4
0
def test_split_data(test_config):
    #Create class
    mod = main.AttentionModel()      
    
    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value
            
    mod.read_data(validation_split=True)
    
    #Assert tfrecords are split
    assert all([x not in mod.train_split_records for x in mod.test_split_records])
Exemple #5
0
def test_submodel_AttentionModel(test_config, submodel):
    #Create class
    mod = main.AttentionModel()      
    
    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value
        
    #Create model
    mod.create()
    mod.read_data(mode="submodel")
    mod.train(submodel=submodel)
    
    for batch in mod.train_split:
        len(batch)
Exemple #6
0
def test_predict_raster(test_config, predict_tfrecords):
    #Create class
    mod = main.AttentionModel()    
    
    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value
    
    #Create
    mod.create()
    mod.read_data()
    results = mod.predict_raster(predict_tfrecords, batch_size=2)
    predicted_raster = visualize.create_raster(results)
    
    #Equals size of the input raster
    assert predicted_raster.shape == (12,15)
Exemple #7
0
def test_evaluate(test_config):
    #Create class
    mod = main.AttentionModel()    
    
    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value
    
    #Create
    mod.create()
    mod.read_data(validation_split=True)
    
    metric_list = [
        keras_metrics.TopKCategoricalAccuracy(k=2, name="top_k"),
        keras_metrics.CategoricalAccuracy(name="acc")
    ]
    
    mod.model.compile(loss="categorical_crossentropy",
                               optimizer=tf.keras.optimizers.Adam(
                                   lr=float(mod.config['train']['learning_rate'])),
                               metrics=metric_list)
    
    #Method 1, class eval method
    print("Before evaluation")
    y_pred, y_true = mod.evaluate(mod.val_split)
    
    print("evaluated")
    test_acc = keras_metrics.CategoricalAccuracy()
    test_acc.update_state(y_true=y_true, y_pred = y_pred)
    method1_eval_accuracy = test_acc.result().numpy()
    
    assert y_pred.shape == y_true.shape

    #Method 2, keras eval method
    metric_list = mod.model.evaluate(mod.val_split)
    metric_dict = {}
    for index, value in enumerate(metric_list):
        metric_dict[mod.model.metrics_names[index]] = value
    
    assert method1_eval_accuracy == metric_dict["acc"]   
    
    #F1 requires integer, not softmax
    f1s = metrics.f1_scores( y_true, y_pred)