def test_attack(self): """Test individual attack function.""" # losses only, both metrics loss_train, loss_test, _, _, _, _ = get_test_inputs() res = mia.run_attack(loss_train, loss_test, metric=('auc', 'advantage')) self.assertBetween(res['thresh_loss_auc'], 0.7, 0.75) self.assertBetween(res['thresh_loss_advantage'], 0.3, 0.35)
def test_attack(self): # only logits, single metric, no classifiers, split by deciles _, _, logits_train, logits_test, _, _ = get_test_inputs() res = mia.run_attack(logits_train=logits_train, logits_test=logits_test, by_percentile=True, metric='auc') for k in res: self.assertStartsWith(k, 'percentile') self.assertBetween(res[k], 0.60, 0.75)
def test_attack(self): # logits and labels, single metric, single classifier, misclassified only (_, _, logits_train, logits_test, labels_train, labels_test) = get_test_inputs() res = mia.run_attack(logits_train=logits_train, logits_test=logits_test, labels_train=labels_train, labels_test=labels_test, only_misclassified=True, attack_classifiers=('lr', ), metric='advantage') self.assertBetween(res['misclassified_lr_logits_test_advantage'], 0.3, 0.8) self.assertEqual(res['misclassified_n_examples'], 802)
def test_attack(self): """Test individual attack function.""" # losses and logits, two classifiers, single metric loss_train, loss_test, logits_train, logits_test, _, _ = get_test_inputs( ) res = mia.run_attack(loss_train, loss_test, logits_train, logits_test, attack_classifiers=('rf', 'knn'), metric='auc') self.assertBetween(res['rf_logits_test_auc'], 0.7, 0.9) self.assertBetween(res['knn_logits_test_auc'], 0.7, 0.9) self.assertBetween(res['rf_logits_loss_test_auc'], 0.7, 0.9) self.assertBetween(res['knn_logits_loss_test_auc'], 0.7, 0.9)
def test_attack(self): # losses and labels, single metric, split by class loss_train, loss_test, _, _, labels_train, labels_test = get_test_inputs( ) n_train = loss_train.shape[0] n_test = loss_test.shape[0] res = mia.run_attack(loss_train, loss_test, labels_train=labels_train, labels_test=labels_test, by_class=True, metric='auc') self.assertLen(res, 10) for k in res: self.assertStartsWith(k, 'class_') if k.endswith('n_examples'): self.assertEqual(int(res[k]), (n_train + n_test) // 5) else: self.assertBetween(res[k], 0.65, 0.75)
def test_attack(self): # losses and labels, both metrics, single class loss_train, loss_test, _, _, labels_train, labels_test = get_test_inputs( ) n_train = loss_train.shape[0] n_test = loss_test.shape[0] res = mia.run_attack(loss_train, loss_test, labels_train=labels_train, labels_test=labels_test, by_class=2, metric=('auc', 'advantage')) self.assertLen(res, 3) for k in res: self.assertStartsWith(k, 'class_2') if k.endswith('n_examples'): self.assertEqual(int(res[k]), (n_train + n_test) // 5) elif k.endswith('advantage'): self.assertBetween(res[k], 0.3, 0.5) elif k.endswith('auc'): self.assertBetween(res[k], 0.7, 0.75)