def test_tf1_apricot(): detector_module = import_module("armory.baseline_models.tf_graph.mscoco_frcnn") detector_fn = getattr(detector_module, "get_art_model") detector = detector_fn(model_kwargs={}, wrapper_kwargs={}) test_dataset = adversarial_datasets.apricot_dev_adversarial( split_type="adversarial", epochs=1, batch_size=1, dataset_dir=DATASET_DIR, shuffle_files=False, ) list_of_ys = [] list_of_ypreds = [] for x, y in test_dataset: y_pred = detector.predict(x)[0] list_of_ys.append(y) list_of_ypreds.append(y_pred) average_precision_by_class = object_detection_AP_per_class( list_of_ys, list_of_ypreds ) mAP = np.fromiter(average_precision_by_class.values(), dtype=float).mean() for class_id in [13, 15, 64]: assert average_precision_by_class[class_id] > 0.79 assert mAP > 0.08
def test_tf1_coco(): if not os.path.exists(os.path.join(DATASET_DIR, "coco", "2017", "1.1.0")): pytest.skip("coco2017 dataset not downloaded.") detector_module = import_module( "armory.baseline_models.tf_graph.mscoco_frcnn") detector_fn = getattr(detector_module, "get_art_model") detector = detector_fn(model_kwargs={}, wrapper_kwargs={}) NUM_TEST_SAMPLES = 10 dataset = datasets.coco2017(split="validation", shuffle_files=False) list_of_ys = [] list_of_ypreds = [] for _ in range(NUM_TEST_SAMPLES): x, y = dataset.get_batch() y_pred = detector.predict(x) list_of_ys.extend(y) list_of_ypreds.extend(y_pred) average_precision_by_class = object_detection_AP_per_class( list_of_ys, list_of_ypreds) mAP = np.fromiter(average_precision_by_class.values(), dtype=float).mean() for class_id in [0, 2, 5, 9, 10]: assert average_precision_by_class[class_id] > 0.6 assert mAP > 0.1
def test_mAP(): labels = {"labels": np.array([2]), "boxes": np.array([[0.1, 0.1, 0.7, 0.7]])} preds = { "labels": np.array([2, 9]), "boxes": np.array([[0.1, 0.1, 0.7, 0.7], [0.5, 0.4, 0.9, 0.9]]), "scores": np.array([0.8, 0.8]), } ap_per_class = metrics.object_detection_AP_per_class([labels], [preds]) assert ap_per_class[9] == 0 assert ap_per_class[2] >= 0.99
def test_pytorch_xview_pretrained(): detector_module = import_module( "armory.baseline_models.pytorch.xview_frcnn") detector_fn = getattr(detector_module, "get_art_model") weights_path = maybe_download_weights_from_s3( "xview_model_state_dict_epoch_99_loss_0p67") detector = detector_fn( model_kwargs={}, wrapper_kwargs={}, weights_path=weights_path, ) NUM_TEST_SAMPLES = 250 dataset_config = { "batch_size": 1, "framework": "numpy", "module": "armory.data.datasets", "name": "xview", } test_dataset = load_dataset( dataset_config, epochs=1, split="test", num_batches=NUM_TEST_SAMPLES, shuffle_files=False, ) list_of_ys = [] list_of_ypreds = [] for x, y in test_dataset: y_pred = detector.predict(x) list_of_ys.extend(y) list_of_ypreds.extend(y_pred) average_precision_by_class = object_detection_AP_per_class( list_of_ys, list_of_ypreds) mAP = np.fromiter(average_precision_by_class.values(), dtype=float).mean() for class_id in [4, 23, 33, 39]: assert average_precision_by_class[class_id] > 0.9 assert mAP > 0.25
def test_tf1_apricot(): if not os.path.isdir(os.path.join(DATASET_DIR, "apricot_dev", "1.0.1")): pytest.skip("apricot dataset not locally available.") detector_module = import_module( "armory.baseline_models.tf_graph.mscoco_frcnn") detector_fn = getattr(detector_module, "get_art_model") detector = detector_fn(model_kwargs={}, wrapper_kwargs={}) dev_dataset = adversarial_datasets.apricot_dev_adversarial( split="frcnn+ssd+retinanet", epochs=1, batch_size=1, dataset_dir=DATASET_DIR, shuffle_files=False, ) list_of_ys = [] list_of_ypreds = [] for x, y in dev_dataset: y_pred = detector.predict(x) list_of_ys.append(y) list_of_ypreds.append(y_pred) average_precision_by_class = object_detection_AP_per_class( list_of_ys, list_of_ypreds) mAP = np.fromiter(average_precision_by_class.values(), dtype=float).mean() for class_id in [13, 15, 64]: assert average_precision_by_class[class_id] > 0.79 assert mAP > 0.08 patch_targeted_AP_by_class = apricot_patch_targeted_AP_per_class( list_of_ys, list_of_ypreds) expected_patch_targeted_AP_by_class = { 1: 0.18, 17: 0.18, 27: 0.27, 33: 0.55, 44: 0.14, } for class_id, expected_AP in expected_patch_targeted_AP_by_class.items(): assert np.abs(patch_targeted_AP_by_class[class_id] - expected_AP) < 0.03 test_dataset = adversarial_datasets.apricot_test_adversarial( split="frcnn", epochs=1, batch_size=1, dataset_dir=DATASET_DIR, shuffle_files=False, ) list_of_ys = [] list_of_ypreds = [] for x, y in test_dataset: y_pred = detector.predict(x) list_of_ys.append(y) list_of_ypreds.append(y_pred) average_precision_by_class = object_detection_AP_per_class( list_of_ys, list_of_ypreds) mAP = np.fromiter(average_precision_by_class.values(), dtype=float).mean() for class_id in [2, 3, 4, 6, 15, 72, 76]: assert average_precision_by_class[class_id] > 0.3 assert mAP > 0.08 patch_targeted_AP_by_class = apricot_patch_targeted_AP_per_class( list_of_ys, list_of_ypreds) expected_patch_targeted_AP_by_class = { 1: 0.22, 17: 0.18, 27: 0.4, 44: 0.09, 53: 0.27, 85: 0.43, } for class_id, expected_AP in expected_patch_targeted_AP_by_class.items(): assert np.abs(patch_targeted_AP_by_class[class_id] - expected_AP) < 0.03