def test_FBetaMetric_macro_average_metric(self):
        beta = 0.5
        metric = FBeta(beta=beta, average='macro')
        metric(F1MetricTest.fake_predictions, F1MetricTest.fake_targets)
        fscores = metric.get_metric()

        desired_precisions = [1.00, 0.25, 0.00, 1.00, 0.00]
        desired_recalls = [0.33, 1.00, 0.00, 1.00, 0.00]
        desired_fscores = [((1 + beta**2) * p * r) /
                           (beta**2 * p + r) if p + r != 0.0 else 0.0
                           for p, r in zip(desired_precisions, desired_recalls)
                           ]

        macro_fscore = numpy.mean(desired_fscores)

        numpy.testing.assert_almost_equal(fscores, macro_fscore, decimal=2)
Beispiel #2
0
    def test_fbeta_multiclass_macro_average_metric_multireturn(self):
        fbeta = FBeta(average='macro')
        fbeta(self.predictions, self.targets)
        fscore, precision, recall = fbeta.get_metric()

        macro_precision = numpy.mean(self.desired_precisions)
        macro_recall = numpy.mean(self.desired_recalls)
        macro_fscore = numpy.mean(self.desired_fscores)

        # check type
        assert isinstance(precision, float)
        assert isinstance(recall, float)
        assert isinstance(fscore, float)

        # check value
        numpy.testing.assert_almost_equal(precision,
                                          macro_precision,
                                          decimal=2)
        numpy.testing.assert_almost_equal(recall, macro_recall, decimal=2)
        numpy.testing.assert_almost_equal(fscore, macro_fscore, decimal=2)
Beispiel #3
0
    def test_fbeta_multiclass_with_mask(self):
        mask = torch.Tensor([1, 1, 1, 1, 1, 0])

        fbeta = FBeta()
        fbeta(self.predictions, (self.targets, mask))

        numpy.testing.assert_almost_equal(fbeta._pred_sum.tolist(),
                                          [1, 3, 0, 1, 0])
        numpy.testing.assert_almost_equal(fbeta._true_sum.tolist(),
                                          [2, 1, 0, 1, 1])
        numpy.testing.assert_almost_equal(fbeta._true_positive_sum.tolist(),
                                          [1, 1, 0, 1, 0])
Beispiel #4
0
    def test_fbeta_multiclass_state(self):
        fbeta = FBeta()
        fbeta(self.predictions, self.targets)

        # check state
        numpy.testing.assert_almost_equal(fbeta._pred_sum.tolist(),
                                          self.pred_sum)
        numpy.testing.assert_almost_equal(fbeta._true_sum.tolist(),
                                          self.true_sum)
        numpy.testing.assert_almost_equal(fbeta._true_positive_sum.tolist(),
                                          self.true_positive_sum)
        numpy.testing.assert_almost_equal(fbeta._total_sum.tolist(),
                                          self.total_sum)
def get_baseline_experiment(experiment_name):
    generability_baseline_model = HAN(20, 10, 300, 2, nb_layers, .25).eval()
    generability_baseline_experiment = Experiment(
        experiment_name,
        generability_baseline_model,
        monitor_metric="val_fscore_macro",
        monitor_mode="max",
        loss_function='cross_entropy',
        task="classification",
        epoch_metrics=[FBeta(average='macro')],
        device=0)
    generability_baseline_experiment.load_checkpoint('best')

    return generability_baseline_experiment
Beispiel #6
0
    def test_fbeta_handles_batch_size_of_one(self):
        predictions = torch.Tensor([[0.2862, 0.3479, 0.1627, 0.2033]])
        targets = torch.Tensor([1])
        mask = torch.Tensor([1])

        fbeta = FBeta()
        fbeta(predictions, (targets, mask))

        numpy.testing.assert_almost_equal(fbeta._pred_sum.tolist(),
                                          [0.0, 1.0, 0.0, 0.0])
        numpy.testing.assert_almost_equal(fbeta._true_sum.tolist(),
                                          [0.0, 1.0, 0.0, 0.0])
        numpy.testing.assert_almost_equal(fbeta._true_positive_sum.tolist(),
                                          [0.0, 1.0, 0.0, 0.0])
        numpy.testing.assert_almost_equal(fbeta._total_sum.tolist(),
                                          [1.0, 1.0, 1.0, 1.0])
Beispiel #7
0
 def test_runtime_errors(self):
     fbeta = FBeta()
     # Metric was never called.
     self.assertRaises(RuntimeError, fbeta.get_metric)
Beispiel #8
0
 def test_names(self):
     fbeta = FBeta(average='macro')
     self.assertEqual(['fscore_macro', 'precision_macro', 'recall_macro'],
                      fbeta.__name__)
     fbeta = FBeta(average='micro')
     self.assertEqual(['fscore_micro', 'precision_micro', 'recall_micro'],
                      fbeta.__name__)
     fbeta = FBeta(average='micro', names=['f', 'p', 'r'])
     self.assertEqual(['f', 'p', 'r'], fbeta.__name__)
     fbeta = FBeta(average=0)
     self.assertEqual(['fscore_0', 'precision_0', 'recall_0'],
                      fbeta.__name__)
     fbeta = FBeta(metric='fscore', average='macro')
     self.assertEqual('fscore_macro', fbeta.__name__)
     fbeta = FBeta(metric='fscore', average='micro')
     self.assertEqual('fscore_micro', fbeta.__name__)
     fbeta = FBeta(metric='fscore', average=0)
     self.assertEqual('fscore_0', fbeta.__name__)
     fbeta = FBeta(metric='precision', average='macro')
     self.assertEqual('precision_macro', fbeta.__name__)
     fbeta = FBeta(metric='precision', average='micro')
     self.assertEqual('precision_micro', fbeta.__name__)
     fbeta = FBeta(metric='precision', average=0)
     self.assertEqual('precision_0', fbeta.__name__)
     fbeta = FBeta(metric='recall', average='macro')
     self.assertEqual('recall_macro', fbeta.__name__)
     fbeta = FBeta(metric='recall', average='micro')
     self.assertEqual('recall_micro', fbeta.__name__)
     fbeta = FBeta(metric='recall', average=0)
     self.assertEqual('recall_0', fbeta.__name__)
     fbeta = FBeta(metric='fscore', average='macro', names='f')
     self.assertEqual('f', fbeta.__name__)
     fbeta = FBeta(average='macro', names=['f', "p", "r"])
     self.assertEqual(["f", "p", "r"], fbeta.__name__)
Beispiel #9
0
 def _compute(self, *args, **kwargs):
     fbeta = FBeta(*args, **kwargs)
     fbeta(self.predictions, self.targets)
     return fbeta.get_metric()