def test_torch_class_index_label(self): trace = Accuracy(true_key="label", pred_key="pred", output_name=self.acc_key) batch = {"label": torch.tensor([0, 1])} # class index prediction = {"pred": torch.tensor([[1, 2, 3], [0.2, 0.5, 0.3]])} sim = TraceRun(trace=trace, batch=batch, prediction=prediction) sim.run_trace() self.assertEqual(sim.data_on_epoch_end[self.acc_key], 0.5)
def test_passing_kwarg(self): with self.subTest("illegal kwargs"): with self.assertRaises(ValueError): # `average` is illegal karg trace = Recall(true_key="label", pred_key="pred", output_name=self.p_key, average="binary") with self.subTest("check if kwargs pass to recall_score"): with unittest.mock.patch( "fastestimator.trace.metric.recall.recall_score") as fake: kwargs = {"e1": "extra1", "e2": "extra2"} trace = Recall(true_key="label", pred_key="pred", output_name=self.p_key, **kwargs) batch = {"label": tf.constant([0, 1, 0, 1])} pred = { "pred": tf.constant([[0.2], [0.6], [0.8], [0.1]]) } # [[0], [1], [1], [0]] run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() fake_kwargs = fake.call_args[1] for key, val in kwargs.items(): self.assertTrue(key in fake_kwargs) self.assertEqual(val, fake_kwargs[key])
def test_tf_one_hot_label(self): trace = Accuracy(true_key="label", pred_key="pred", output_name=self.acc_key) batch = {"label": tf.constant([[1, 0, 0], [0, 1, 0]])} # one-hot prediction = {"pred": tf.constant([[1, 2, 3], [0.2, 0.5, 0.3]])} sim = TraceRun(trace=trace, batch=batch, prediction=prediction) sim.run_trace() self.assertEqual(sim.data_on_epoch_end[self.acc_key], 0.5)
def test_torch_binary_class(self): with self.subTest("ordinal label"): trace = Recall(true_key="label", pred_key="pred", output_name=self.p_key) # tp, tn, fp, fn = [1, 1, 1, 1] batch = {"label": torch.tensor([0, 1, 0, 1])} pred = { "pred": torch.tensor([[0.2], [0.6], [0.8], [0.1]]) } # [[0], [1], [1], [0]] run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.p_key], 0.5) # recall = tp / (tp + fn) = 0.5 with self.subTest("one-hot label"): trace = Recall(true_key="label", pred_key="pred", output_name=self.p_key) # tp, tn, fp, fn = [2, 1, 0, 1] batch = { "label": torch.tensor([[1, 0], [0, 1], [0, 1], [0, 1]]) } # [0, 1, 1, 1] pred = { "pred": torch.tensor([[0.2], [0.6], [0.8], [0.1]]) } # [[0], [1], [1], [0]] run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.p_key], 2 / 3) # recall = tp / (tp + fn) = 2/3
def test_torch_binary_class(self): with self.subTest("ordinal label"): trace = MCC(true_key="label", pred_key="pred", output_name=self.mcc_key) batch = {"label": torch.tensor([0, 1, 0, 1])} pred = { "pred": torch.tensor([[0.2], [0.6], [0.8], [0.1]]) } # [[0], [1], [1], [0]] run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() tp, tn, fp, fn = [1, 1, 1, 1] ans = mcc_func(tp, tn, fp, fn) self.assertEqual(run.data_on_epoch_end[self.mcc_key], ans) with self.subTest("one-hot label"): trace = MCC(true_key="label", pred_key="pred", output_name=self.mcc_key) batch = { "label": torch.tensor([[1, 0], [0, 1], [0, 1], [0, 1]]) } # [0, 1, 1, 1] pred = { "pred": torch.tensor([[0.2], [0.6], [0.8], [0.1]]) } # [[0], [1], [1], [0]] run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() tp, tn, fp, fn = [2, 1, 0, 1] ans = mcc_func(tp, tn, fp, fn) self.assertEqual(run.data_on_epoch_end[self.mcc_key], ans)
def test_torch_multi_class(self): with self.subTest("ordinal label"): trace = Recall(true_key="label", pred_key="pred", output_name=self.p_key) batch = {"label": torch.tensor([0, 0, 0, 1, 1, 2])} pred = { "pred": torch.tensor([[0.2, 0.1, -0.6], [0.6, 2.0, 0.1], [0.1, 0.1, 0.8], [0.4, 0.1, -0.3], [0.2, 0.7, 0.1], [0.3, 0.6, 1.5]]) # [[0], [1], [2], [0], [1], [2]] } run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual( run.data_on_epoch_end[self.p_key][0], 1 / 3) # for 0, [tp, tn, fp, fn] = [1, 2, 1, 2], recall = 1/3 self.assertEqual( run.data_on_epoch_end[self.p_key][1], 1 / 2) # for 1, [tp, tn, fp, fn] = [1, 3, 1, 1], recall = 1/2 self.assertEqual( run.data_on_epoch_end[self.p_key][2], 1) # for 2, [tp, tn, fp, fn] = [1, 4, 1, 0], recall = 1 with self.subTest("one-hot label"): trace = Recall(true_key="label", pred_key="pred", output_name=self.p_key) batch = { "label": torch.tensor([[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1]]) } # [0, 0, 0, 1, 1, 2] pred = { "pred": torch.tensor([[0.2, 0.1, -0.6], [0.6, 2.0, 0.1], [0.1, 0.1, 0.8], [0.4, 0.1, -0.3], [0.2, 0.7, 0.1], [0.3, 0.6, 1.5]]) # [[0], [1], [2], [0], [1], [2]] } run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual( run.data_on_epoch_end[self.p_key][0], 1 / 3) # for 0, [tp, tn, fp, fn] = [1, 2, 1, 2], recall = 1/3 self.assertEqual( run.data_on_epoch_end[self.p_key][1], 1 / 2) # for 1, [tp, tn, fp, fn] = [1, 3, 1, 1], recall = 1/2 self.assertEqual( run.data_on_epoch_end[self.p_key][2], 1) # for 2, [tp, tn, fp, fn] = [1, 4, 1, 0], recall = 1
def test_torch_binary_class(self): with self.subTest("from_logit=False"): trace = Accuracy(true_key="label", pred_key="pred", output_name=self.acc_key, from_logits=False) batch = {"label": torch.tensor([0, 1])} prediction = {"pred": torch.tensor([[0.3], [0.6]])} # pred > 0.5 => class 1 sim = TraceRun(trace=trace, batch=batch, prediction=prediction) sim.run_trace() self.assertEqual(sim.data_on_epoch_end[self.acc_key], 1.0) with self.subTest("from_logit=True"): trace = Accuracy(true_key="label", pred_key="pred", output_name=self.acc_key, from_logits=True) batch = {"label": torch.tensor([0, 1])} prediction = {"pred": torch.tensor([[-1], [1]])} # 1 / 1 + exp(-pred) > 0.5 => class 1 sim = TraceRun(trace=trace, batch=batch, prediction=prediction) sim.run_trace() self.assertEqual(sim.data_on_epoch_end[self.acc_key], 1.0)
def test_tf_binary_class(self): with self.subTest("ordinal label"): trace = F1Score(true_key="label", pred_key="pred", output_name=self.f1_key) # tp, tn, fp, fn = [1, 1, 1, 1] batch = {"label": tf.constant([0, 1, 0, 1])} pred = {"pred": tf.constant([[0.2], [0.6], [0.8], [0.1]])} # [[0], [1], [1], [0]] run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.f1_key], 0.5) # f1_score = 2*tp / 2*tp + fn + fp = 0.5 with self.subTest("one-hot label"): trace = F1Score(true_key="label", pred_key="pred", output_name=self.f1_key) # tp, tn, fp, fn = [2, 1, 0, 1] batch = {"label": tf.constant([[1, 0], [0, 1], [0, 1], [0, 1]])} # [0, 1, 1, 1] pred = {"pred": tf.constant([[0.2], [0.6], [0.8], [0.1]])} # [[0], [1], [1], [0]] run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.f1_key], 0.8) # f1_score = 2*tp / 2*tp + fn + fp = 0.8
def test_torch_multi_class(self): with self.subTest("ordinal label"): trace = MCC(true_key="label", pred_key="pred", output_name=self.mcc_key) batch = {"label": torch.tensor([0, 0, 0, 1, 1, 2])} pred = { "pred": torch.tensor([[0.2, 0.1, -0.6], [0.6, 2.0, 0.1], [0.1, 0.1, 0.8], [0.4, 0.1, -0.3], [0.2, 0.7, 0.1], [0.3, 0.6, 1.5]]) # [[0], [1], [2], [0], [1], [2]] } run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.mcc_key], 0.26111648393354675) with self.subTest("one-hot label"): trace = MCC(true_key="label", pred_key="pred", output_name=self.mcc_key) batch = { "label": torch.tensor([[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1]]) } # [0, 0, 0, 1, 1, 2] pred = { "pred": torch.tensor([[0.2, 0.1, -0.6], [0.6, 2.0, 0.1], [0.1, 0.1, 0.8], [0.4, 0.1, -0.3], [0.2, 0.7, 0.1], [0.3, 0.6, 1.5]]) # [[0], [1], [2], [0], [1], [2]] } run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.mcc_key], 0.26111648393354675)
def test_tf_multi_class(self): with self.subTest("ordinal label"): trace = F1Score(true_key="label", pred_key="pred", output_name=self.f1_key) batch = {"label": tf.constant([0, 0, 0, 1, 1, 2])} pred = { "pred": tf.constant([[0.2, 0.1, -0.6], [0.6, 2.0, 0.1], [0.1, 0.1, 0.8], [0.4, 0.1, -0.3], [0.2, 0.7, 0.1], [0.3, 0.6, 1.5]]) # [[0], [1], [2], [0], [1], [2]] } run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.f1_key][0], 2 / 5) # for 0, [tp, tn, fp, fn] = [1, 2, 1, 2], f1 = 2/5 self.assertEqual(run.data_on_epoch_end[self.f1_key][1], 2 / 4) # for 1, [tp, tn, fp, fn] = [1, 3, 1, 1], f1 = 2/4 self.assertEqual(run.data_on_epoch_end[self.f1_key][2], 2 / 3) # for 2, [tp, tn, fp, fn] = [1, 4, 1, 0], f1 = 2/3 with self.subTest("one-hot label"): trace = F1Score(true_key="label", pred_key="pred", output_name=self.f1_key) batch = { "label": tf.constant([[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1]]) } # [0, 0, 0, 1, 1, 2] pred = { "pred": tf.constant([[0.2, 0.1, -0.6], [0.6, 2.0, 0.1], [0.1, 0.1, 0.8], [0.4, 0.1, -0.3], [0.2, 0.7, 0.1], [0.3, 0.6, 1.5]]) # [[0], [1], [2], [0], [1], [2]] } run = TraceRun(trace=trace, batch=batch, prediction=pred) run.run_trace() self.assertEqual(run.data_on_epoch_end[self.f1_key][0], 2 / 5) # for 0, [tp, tn, fp, fn] = [1, 2, 1, 2], f1 = 2/5 self.assertEqual(run.data_on_epoch_end[self.f1_key][1], 2 / 4) # for 1, [tp, tn, fp, fn] = [1, 3, 1, 1], f1 = 2/4 self.assertEqual(run.data_on_epoch_end[self.f1_key][2], 2 / 3) # for 2, [tp, tn, fp, fn] = [1, 4, 1, 0], f1 = 2/3