Esempio n. 1
0
    def test_binary_accuracy(self):
        acc_obj = metrics.BinaryAccuracy(name='my_acc')

        # check config
        assert acc_obj.name == 'my_acc'
        assert acc_obj.stateful
        assert len(acc_obj.weights) == 2
        assert acc_obj.dtype == 'float32'

        # verify that correct value is returned
        result_t = acc_obj([[1], [0]], [[1], [0]])
        result = K.eval(result_t)
        assert result == 1  # 2/2

        # check y_pred squeeze
        result_t = acc_obj([[1], [1]], [[[1]], [[0]]])
        result = K.eval(result_t)
        assert np.isclose(result, 3. / 4., atol=1e-3)

        # check y_true squeeze
        result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
        result = K.eval(result_t)
        assert np.isclose(result, 4. / 6., atol=1e-3)

        # check with sample_weight
        result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
        result = K.eval(result_t)
        assert np.isclose(result, 4.5 / 6.7, atol=1e-3)
Esempio n. 2
0
    def test_metrics_correctness_with_dataset(self):
        layers = [
            keras.layers.Dense(8,
                               activation='relu',
                               input_dim=4,
                               kernel_initializer='ones'),
            keras.layers.Dense(1,
                               activation='sigmoid',
                               kernel_initializer='ones')
        ]

        model = testing_utils.get_model_from_layers(layers, (4, ))

        model.compile(loss='binary_crossentropy',
                      metrics=['accuracy',
                               metrics_module.BinaryAccuracy()],
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly())

        np.random.seed(123)
        x = np.random.randint(10, size=(100, 4)).astype(np.float32)
        y = np.random.randint(2, size=(100, 1)).astype(np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((x, y))
        dataset = dataset.batch(10)
        outs = model.evaluate(dataset, steps=10)
        self.assertEqual(np.around(outs[1], decimals=1), 0.5)
        self.assertEqual(np.around(outs[2], decimals=1), 0.5)

        y = np.zeros((100, 1), dtype=np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        outs = model.evaluate(dataset, steps=10)
        self.assertEqual(outs[1], 0.)
        self.assertEqual(outs[2], 0.)
Esempio n. 3
0
def TrainModel(model, base_model, model_name):

    task = Task.init(project_name="Ex3ModelTrains", task_name=model_name)
    reporter = TrainsReporter()
    # Show a summary of the model. Check the number of trainable parameters
    model.summary()

    # Compile the model
    model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True),
                  optimizer=keras.optimizers.Adam(),
                  metrics=[metrics.BinaryAccuracy()])

    # Train the model
    model.fit(train_ds,
              steps_per_epoch=train_ds.samples / train_ds.batch_size,
              epochs=20,
              validation_data=valid_ds,
              validation_steps=valid_ds.samples / valid_ds.batch_size,
              callbacks=[reporter],
              verbose=1)

    # Unfreeze the base_model. Note that it keeps running in inference mode
    # since we passed `training=False` when calling it. This means that
    # the batchnorm layers will not update their batch statistics.
    # This prevents the batchnorm layers from undoing all the training
    # we've done so far.
    base_model.trainable = True
    reporter.epoch_ref = 20

    score = model.evaluate(test_ds)
    print('Test evaluation Score:', model.evaluate(test_ds))
    print('validation evaluation Score:', model.evaluate(valid_ds))

    model.compile(
        optimizer=keras.optimizers.Adam(1e-5),  # Low learning rate
        loss=keras.losses.BinaryCrossentropy(from_logits=True),
        metrics=[keras.metrics.BinaryAccuracy()],
    )

    model.fit(train_ds,
              steps_per_epoch=train_ds.samples / train_ds.batch_size,
              epochs=10,
              validation_data=valid_ds,
              validation_steps=valid_ds.samples / valid_ds.batch_size,
              callbacks=[reporter],
              verbose=1)

    score = model.evaluate(test_ds)
    print('Test evaluation Score:', model.evaluate(test_ds))
    print('validation evaluation Score:', model.evaluate(valid_ds))
Esempio n. 4
0
def all_experiment_model(hp):
    PRE_TRAINED_MODEL = Xception(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                                 include_top=False,
                                 pooling='avg',
                                 weights='imagenet')

    for layer in PRE_TRAINED_MODEL.layers:
        layer.trainable = False

    x = layers.Flatten()(PRE_TRAINED_MODEL.output)
    for i in range(
            hp.Int('number_of_dense_dropout_blocks', min_value=0,
                   max_value=2)):
        x = layers.Dense(hp.Int(f'dense_units_{i}',
                                min_value=1024,
                                max_value=4096,
                                step=512),
                         activation='relu')(x)
        x = layers.Dropout(
            hp.Float(f'dropout_probability_{i}',
                     min_value=0,
                     max_value=0.3,
                     step=0.05))(x)
    x = layers.Dense(hp.Int(f'penultimate_dense_unit',
                            min_value=1024,
                            max_value=4096,
                            step=512),
                     activation='relu')(x)
    x = layers.Dense(1, activation='sigmoid')(x)

    MODEL = Model(PRE_TRAINED_MODEL.input, x)

    chosen_loss = hp.Choice(
        'loss', values=['SigmoidFocalCrossEntropy', 'BinaryCrossentropy'])
    MODEL.compile(optimizer=hp.Choice('optimiser',
                                      values=['Adam', 'RMSprop', 'SGD']),
                  loss=eval(chosen_loss)(),
                  metrics=[
                      metrics.BinaryAccuracy(name='acc'),
                      metrics.AUC(name='auc'),
                      metrics.FalsePositives(name='fp')
                  ])
    return MODEL
Esempio n. 5
0
def optimiser_experiment_model(hp):
    PRE_TRAINED_MODEL = Xception(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                                 include_top=False,
                                 pooling='avg',
                                 weights='imagenet')

    for layer in PRE_TRAINED_MODEL.layers:
        layer.trainable = False

    x = layers.Flatten()(PRE_TRAINED_MODEL.output)
    x = layers.Dense(1, activation='sigmoid')(x)

    MODEL = Model(PRE_TRAINED_MODEL.input, x)

    MODEL.compile(optimizer=hp.Choice('optimiser',
                                      values=['Adam', 'RMSprop', 'SGD']),
                  loss='binary_crossentropy',
                  metrics=[
                      metrics.BinaryAccuracy(name='acc'),
                      metrics.AUC(name='auc'),
                      metrics.FalsePositives(name='fp')
                  ])
    return MODEL
Esempio n. 6
0
def loss_experiment_model(hp):
    PRE_TRAINED_MODEL = Xception(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                                 include_top=False,
                                 pooling='avg',
                                 weights='imagenet')

    for layer in PRE_TRAINED_MODEL.layers:
        layer.trainable = False

    x = layers.Flatten()(PRE_TRAINED_MODEL.output)
    x = layers.Dense(1, activation='sigmoid')(x)

    MODEL = Model(PRE_TRAINED_MODEL.input, x)

    chosen_loss = hp.Choice(
        'loss', values=['SigmoidFocalCrossEntropy', 'BinaryCrossentropy'])
    MODEL.compile(optimizer='adam',
                  loss=eval(chosen_loss)(),
                  metrics=[
                      metrics.BinaryAccuracy(name='acc'),
                      metrics.AUC(name='auc'),
                      metrics.FalsePositives(name='fp')
                  ])
    return MODEL
Esempio n. 7
0
model.add(Dense(units_9, activation=activation_9))
model.add(Dropout(dropout_9))

model.add(Dense(units_10, activation=activation_10))
model.add(Dropout(dropout_10))

model.add(Dense(units_11, activation=activation_11))
model.add(Dropout(dropout_11))

model.add(Dense(units_12, activation=activation_12))
model.add(Dropout(dropout_12))

model.add(Dense(nb_classes, activation='sigmoid'))

METRICS = [
            metrics.BinaryAccuracy(name='ACCURACY'),
            metrics.Precision(name='PRECISION'),
            metrics.Recall(name='RECALL'),
            metrics.AUC(name='AUC'),
            metrics.TruePositives(name='TP'),
            metrics.TrueNegatives(name='TN'),
            metrics.FalsePositives(name='FP'),
            metrics.FalseNegatives(name='FN')]

model.compile(loss='binary_crossentropy',
                optimizer=compile_optimizer,
                metrics=METRICS)

# GENERATORS
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
                                weights = 'imagenet')

for layer in PRE_TRAINED_MODEL.layers:
    layer.trainable = False

x = layers.Flatten()(PRE_TRAINED_MODEL.output)  
x = layers.Dense(1024, activation = 'relu')(x) 
x = layers.Dropout(0.2)(x)      
x = layers.Dense(1, activation = 'sigmoid')(x)

MODEL = Model(PRE_TRAINED_MODEL.input, x) 

MODEL.compile(optimizer = RMSprop(lr = LEARNING_RATE), 
              loss = 'binary_crossentropy', 
              metrics = [
                         metrics.BinaryAccuracy(name = 'acc'),
                         metrics.AUC(name = 'auc'),
                         metrics.FalsePositives(name = 'fp')
                         ])

EARLY_STOPPING = EarlyStopping(monitor='fp', 
                               verbose=1,
                               patience=50,
                               mode='max',
                               restore_best_weights=True)

CLASS_WEIGHT = {0: 0.001694915254237288, 
                1: 0.00017733640716439085}
# CLASS_WEIGHT = {0: 9, 
#                 1: 1}
# CLASS_WEIGHT = {0: 1, 
Esempio n. 9
0
def set_model_prediction_multi_label(model_path, test_set, custom_objects, with_f1=False):
    """Compute metrics of a set of models for multilabel model. Computed metrics are : Loss, Accuracy, F1-Score, Macro F1-Score.

    Args:
        model_path (string): path of folder that contains models.
        test_set (pandas.DataFrame): DataFrame with column path for images path and label.
        custom_objects (dict): Dict of custom objects to load with model.
        with_f1 (bool, optional): True if the model is train with F1-Score metrics, otherwise False. Defaults to False.
    """
    test_generator =  generator(test_set['path'].to_numpy(), 
                            test_set[['label_culture','label_coffee']].to_numpy(), 
                            eurosat_params['mean'], 
                            eurosat_params['std'], 
                            batch_size=len(test_set))
    
    prediction_set = []
    evaluate = []
    
    X, y = next(test_generator)
    
    for path in os.listdir(model_path):
        if path.split(".")[1] == 'h5':
            restored_model = None
            if with_f1:
                restored_model = load_model(os.path.join(model_path, path), custom_objects, compile=False)
                restored_model.compile(optimizer=Adam(learning_rate=0.00001), loss='binary_crossentropy',metrics=[metrics.BinaryAccuracy(name='accuracy'),metrics.Precision(name='precision'),metrics.Recall(name='recall'),f1_score_keras])
            else : 
                restored_model = load_model(os.path.join(model_path, path), custom_objects)
            
            evaluate.append(restored_model.evaluate(test_generator, steps=1))
            prediction_set.append(np.where(restored_model.predict(X) > 0.5, 1, 0))
            
    predictions = []
    for pred in zip(*prediction_set):
        culture_pred, coffee_pred = zip(*pred)

        predictions.append(np.array([np.argmax(np.bincount(culture_pred)),
                                    np.argmax(np.bincount(coffee_pred))]))
    
    cm = multilabel_confusion_matrix(y, predictions)
    
    plot_confusion_matrix(cm[0], ["Culture", "No-Culture"],"Confusion Matrix\nCulture vs No-Culture\nDenseNet 64x64")
    plot_confusion_matrix(cm[1], ["Coffee", "Other"],"Confusion Matrix\nCoffee vs other\nDenseNet 64x64")
    
    if with_f1:
        losses, accs, precisions, recalls, f1 = zip(*evaluate)
    else :
        losses, accs, precisions, recalls = zip(*evaluate)

    print("Global metrics")
    print(f"Mean accuracy : {round(np.mean(accs),4)}")
    print(f"Stdev accuracy : {round(np.std(accs),4)}")
    print("\n")
    print(f"Mean loss : {round(np.mean(losses),4)}")
    print(f"Stdev loss : {round(np.std(losses),4)}")
    print("\n")
    culture_pred, coffee_pred = zip(*predictions)
    culture_true, coffee_true = zip(*y)
    print("Culture vs no-culture")
    print(f"F1-Score Culture: {round(f1_score(culture_true, culture_pred, pos_label=0),4)}")
    print(f"F1-Score No culture: {round(f1_score(culture_true, culture_pred, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(culture_true, culture_pred, average='macro'),4)}")
    print(f"\n")
    print("Coffee vs other")
    print(f"F1-Score Coffee: {round(f1_score(coffee_true, coffee_pred, pos_label=0),4)}")
    print(f"F1-Score Other: {round(f1_score(coffee_true, coffee_pred, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(coffee_true, coffee_pred, average='macro'),4)}")
Esempio n. 10
0
def set_model_prediction(model_path, test_set, custom_objects, with_f1=False, labels=["Coffee", "Other"], title="title"):
    """Compute metrics of a set of models. Computed metrics are : Loss, Accuracy, F1-Score, Macro F1-Score.

    Args:
        model_path (string): path of folder that contains models.
        test_set (pandas.DataFrame): DataFrame with column path for images path and label.
        custom_objects (dict): Dict of custom objects to load with model.
        with_f1 (bool, optional): True if the model is train with F1-Score metrics, otherwise False. Defaults to False.
        labels (list, optional): Label for confusion Matrix. Defaults to ["Coffee", "Other"].
        title (str, optional): Title of confusion matrix. Defaults to "title".
    """
    test_generator =  generator(test_set['path'].to_numpy(), 
                            test_set['label'].to_numpy(), 
                            eurosat_params['mean'], 
                            eurosat_params['std'], 
                            batch_size=len(test_set))
    
    prediction_set = []
    evaluate = []
    
    X, y = next(test_generator)
    
    for path in os.listdir(model_path):
        if path.split(".")[1] == 'h5':
            restored_model = None
            if with_f1:
                restored_model = load_model(os.path.join(model_path, path), custom_objects, compile=False)
                restored_model.compile(optimizer=Adam(learning_rate=0.00001), loss='binary_crossentropy',metrics=[metrics.BinaryAccuracy(name='accuracy'),metrics.Precision(name='precision'),metrics.Recall(name='recall'),f1_score_keras])
            else : 
                restored_model = load_model(os.path.join(model_path, path), custom_objects)
            
            evaluate.append(restored_model.evaluate(test_generator, steps=1))
            prediction_set.append(np.where(restored_model.predict(X) > 0.5, 1, 0).reshape(-1).tolist())
            
    predictions = []
    for pred in zip(*prediction_set):
        predictions.append(np.argmax(np.bincount(pred)))
    
    cm = confusion_matrix(y, predictions)
    plot_confusion_matrix(cm, labels,title)
    
    if with_f1:
        losses, accs, precisions, recalls, f1 = zip(*evaluate)
    else :
        losses, accs, precisions, recalls = zip(*evaluate)

    print(f"Mean accuracy : {round(np.mean(accs),4)}")
    print(f"Stdev accuracy : {round(np.std(accs),4)}")
    print("\n")
    print(f"Mean loss : {round(np.mean(losses),4)}")
    print(f"Stdev loss : {round(np.std(losses),4)}")
    print("\n")
    print(f"F1-Score {labels[0]}: {round(f1_score(y, predictions, pos_label=0),4)}")
    print(f"F1-Score {labels[1]}: {round(f1_score(y, predictions, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(y, predictions, average='macro'),4)}")
Esempio n. 11
0
 def test_binary_accuracy_threshold(self):
     acc_obj = metrics.BinaryAccuracy(threshold=0.7)
     result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
     result = K.eval(result_t)
     assert np.isclose(result, 0.5, atol=1e-3)