Example #1
0
def _test_tf(avg, beta, act, pred, threshold):
    act = tf.constant(act, tf.float32)
    pred = tf.constant(pred, tf.float32)

    fbeta = FBetaScore(3, avg, beta, threshold)
    fbeta.update_state(act, pred)
    return fbeta.result().numpy()
Example #2
0
    def _test_tf(self, avg, beta, act, pred, threshold):
        act = tf.constant(act, tf.float32)
        pred = tf.constant(pred, tf.float32)

        fbeta = FBetaScore(3, avg, beta, threshold)
        self.evaluate(tf.compat.v1.variables_initializer(fbeta.variables))
        self.evaluate(fbeta.update_state(act, pred))
        return self.evaluate(fbeta.result())
Example #3
0
 def test_config(self):
     fbeta_obj = FBetaScore(num_classes=3, beta=0.5, average=None)
     self.assertEqual(fbeta_obj.beta, 0.5)
     self.assertEqual(fbeta_obj.average, None)
     self.assertEqual(fbeta_obj.num_classes, 3)
     self.assertEqual(fbeta_obj.dtype, tf.float32)
     # Check save and restore config
     fbeta_obj2 = FBetaScore.from_config(fbeta_obj.get_config())
     self.assertEqual(fbeta_obj2.beta, 0.5)
     self.assertEqual(fbeta_obj2.average, None)
     self.assertEqual(fbeta_obj2.num_classes, 3)
     self.assertEqual(fbeta_obj2.dtype, tf.float32)
Example #4
0
    def test_eq(self):
        f1 = F1Score(3)
        fbeta = FBetaScore(3, beta=1.0)
        self.evaluate(tf.compat.v1.variables_initializer(f1.variables))
        self.evaluate(tf.compat.v1.variables_initializer(fbeta.variables))

        preds = [[0.9, 0.1, 0], [0.2, 0.6, 0.2], [0, 0, 1], [0.4, 0.3, 0.3],
                 [0, 0.9, 0.1], [0, 0, 1]]
        actuals = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0],
                   [0, 0, 1]]

        self.evaluate(fbeta.update_state(actuals, preds))
        self.evaluate(f1.update_state(actuals, preds))
        self.assertAllClose(
            self.evaluate(fbeta.result()), self.evaluate(f1.result()))
Example #5
0
def test_config_fbeta():
    fbeta_obj = FBetaScore(num_classes=3, beta=0.5, threshold=0.3, average=None)
    assert fbeta_obj.beta == 0.5
    assert fbeta_obj.average is None
    assert fbeta_obj.threshold == 0.3
    assert fbeta_obj.num_classes == 3
    assert fbeta_obj.dtype == tf.float32

    # Check save and restore config
    fbeta_obj2 = FBetaScore.from_config(fbeta_obj.get_config())
    assert fbeta_obj2.beta == 0.5
    assert fbeta_obj2.average is None
    assert fbeta_obj2.threshold == 0.3
    assert fbeta_obj2.num_classes == 3
    assert fbeta_obj2.dtype == tf.float32
Example #6
0
    def initialize_vars(self, beta_val, average):
        # initialize variables
        fbeta_obj = FBetaScore(num_classes=3, beta=beta_val, average=average)

        self.evaluate(tf.compat.v1.variables_initializer(fbeta_obj.variables))

        return fbeta_obj
Example #7
0
def test_eq():
    f1 = F1Score(3)
    fbeta = FBetaScore(3, beta=1.0)

    preds = [
        [0.9, 0.1, 0],
        [0.2, 0.6, 0.2],
        [0, 0, 1],
        [0.4, 0.3, 0.3],
        [0, 0.9, 0.1],
        [0, 0, 1],
    ]
    actuals = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1]]

    fbeta.update_state(actuals, preds)
    f1.update_state(actuals, preds)
    np.testing.assert_allclose(fbeta.result().numpy(), f1.result().numpy())
Example #8
0
 def test_keras_model(self):
     model = tf.keras.Sequential()
     model.add(layers.Dense(64, activation='relu'))
     model.add(layers.Dense(64, activation='relu'))
     model.add(layers.Dense(1, activation='softmax'))
     fb = FBetaScore(1, 'macro')
     model.compile(optimizer='rmsprop',
                   loss='categorical_crossentropy',
                   metrics=['acc', fb])
     # data preparation
     data = np.random.random((10, 3))
     labels = np.random.random((10, 1))
     labels = np.where(labels > 0.5, 1, 0)
     model.fit(data, labels, epochs=1, batch_size=32, verbose=0)
Example #9
0
 def test_keras_model(self):
     fbeta = FBetaScore(5, "micro", 1.0)
     utils._get_model(fbeta, 5)
 # Load the model
 # model = ResNet(pretrain_dataset=pretrain_dataset, pooling="max", task=task)
 model = func_resnet(pretrain_dataset=pretrain_dataset,
                     pooling="max",
                     task=task)
 if task == "orig_labels":
     n_outputs = 17
 elif task == "deforestation":
     n_outputs = 1
 else:
     raise Exception(
         f'ERROR: Unrecognized task "{task}". Please select one of "orig_labels" or "deforestation".'
     )
 model_metrics = [
     "accuracy",
     FBetaScore(num_classes=n_outputs, average="macro", beta=2.0),
 ]
 wandb.init(
     project="fsdl_deforestation_detection",
     entity="fsdl-andre-karthik",
     tags="mvp",
     reinit=True,
     config={
         **vars(args),
         **dict(current_task=task)
     },
 )
 # The epoch on which to start the full model training
 initial_epoch = 0
 if args.pretrained or count > 0:
     # Train initially the final layer