def test_clean_acc_jsons_fast(self):
        config = get_test_config()
        n_ex = 200
        x_test, y_test = load_cifar10(n_ex, config['data_dir'])

        for norm in model_dicts.keys():
            print('Test models robust wrt {}'.format(norm))
            models = list(model_dicts[norm].keys())
            models.remove(
                'Standard'
            )  # removed temporarily to avoid an error for pytorch 1.4.0

            n_tests_passed = 0
            for model_name in models:
                model = load_model(model_name, config['model_dir'],
                                   norm).cuda().eval()

                acc = clean_accuracy(model,
                                     x_test,
                                     y_test,
                                     batch_size=config['batch_size'])

                self.assertGreater(round(acc * 100., 2), 70.0)
                success = round(acc * 100., 2) > 70.0
                n_tests_passed += success
                print(
                    '{}: clean accuracy {:.2%} (on {} examples), test passed: {}'
                    .format(model_name, acc, n_ex, success))

            print('Test is passed for {}/{} models.'.format(
                n_tests_passed, len(models)))
Exemplo n.º 2
0
def _accuracy_computation(success_criterion: Callable[[str, float, str, str],
                                                      bool],
                          n_ex: int) -> None:
    config = get_test_config()
    device = torch.device(config["device"])

    tot_models = 0
    n_tests_passed = 0

    for dataset, dataset_dict in model_dicts.items():
        print(f"Test models trained on {dataset.value}")
        x_test, y_test = load_clean_dataset(dataset, n_ex, config["data_dir"])

        for threat_model, threat_model_dict in dataset_dict.items():
            print(f"Test models robust wrt {threat_model.value}")
            models = list(threat_model_dict.keys())
            tot_models += len(models)

            for model_name in models:
                model = load_model(model_name, config["model_dir"], dataset,
                                   threat_model).to(device)
                acc = clean_accuracy(model,
                                     x_test,
                                     y_test,
                                     batch_size=config["batch_size"],
                                     device=device)

                success = success_criterion(model_name, acc, dataset.value,
                                            threat_model.value)
                n_tests_passed += int(success)
                print(
                    f"{model_name}: clean accuracy {acc:.2%} (on {n_ex} examples),"
                    f" test passed: {success}")

    print(f"Test is passed for {n_tests_passed}/{tot_models} models.")
Exemplo n.º 3
0
    def test_clean_acc_jsons_exact(self):
        config = get_test_config()
        device = torch.device(config['device'])
        n_ex = 10000
        x_test, y_test = load_cifar10(n_ex, config['data_dir'])

        for norm in model_dicts.keys():
            print('Test models robust wrt {}'.format(norm))
            models = list(model_dicts[norm].keys())
            models.remove(
                'Standard'
            )  # removed temporarily to avoid an error for pytorch 1.4.0

            n_tests_passed = 0
            for model_name in models:
                model = load_model(model_name, config['model_dir'],
                                   norm).to(device)

                acc = clean_accuracy(model,
                                     x_test,
                                     y_test,
                                     batch_size=config['batch_size'],
                                     device=device)
                with open('./model_info/{}/{}.json'.format(norm, model_name),
                          'r') as model_info:
                    json_dict = json.load(model_info)

                success = abs(
                    round(acc * 100., 2) -
                    float(json_dict['clean_acc'])) <= 0.05
                print('{}: clean accuracy {:.2%}, test passed: {}'.format(
                    model_name, acc, success))
                self.assertLessEqual(
                    abs(round(acc * 100., 2) - float(json_dict['clean_acc'])),
                    0.05)
                n_tests_passed += success

            print('Test is passed for {}/{} models.'.format(
                n_tests_passed, len(models)))
Exemplo n.º 4
0
 def test_load_model(self):
     config = get_test_config()
     model_name = "Standard"
     load_model(model_name, model_dir=config["model_dir"])
Exemplo n.º 5
0
 def test_load_model_norm(self):
     model_name = "Standard"
     config = get_test_config()
     with self.assertWarns(DeprecationWarning):
         load_model(model_name, model_dir=config["model_dir"], norm="L2")