def get_art_model(model_kwargs, wrapper_kwargs, weights_file):
    del wrapper_kwargs
    del weights_file
    defense_name = model_kwargs['defense']
    defense_path = get_defense_path(defense_name)
    model, _, _, dataset_name = load_defense_and_attack(
        defense_path, 'attack_linf.py')
    assert dataset_name == 'cifar10', 'Only CIFAR10 dataset is supported for Armory.'
    # TODO: maybe add support for adversarial detector
    return DefenseWrapper(model)
 def __init__(self, *args, **kwargs):
     super().__init__(estimator='placeholder')
     self._targeted = None
     defense_path = get_defense_path(kwargs['defense'])
     attack_name = kwargs['attack']
     if not attack_name.endswith('.py'):
         attack_name = attack_name + '.py'
     defense_model, attack_cls, _, _ = load_defense_and_attack(
         defense_path, attack_name)
     self._model = defense_model
     self._attack_cls = attack_cls
def test_solution(defense_path, attack_name):
    torch = 'torch' in attack_name
    defense_model, attack_cls, task_def, dataset_name = load_defense_and_attack(
        defense_path, attack_name, torch)

    _, (x_test, y_test), _ = load_dataset(dataset_name, torch)
    x_test = x_test[:NUM_EXAMPLES]
    y_test = y_test[:NUM_EXAMPLES]

    failed_examples = evaluate_defense(x_test, y_test, BATCH_SIZE, attack_cls,
                                       defense_model, task_def)
    num_failed = len(failed_examples)
    # NOTE: if attack succeed on all examples then num_failed == 0
    # here we just expect that attack succeed at least on one example
    assert True
def main(argv):
    # Parse arguments
    success_parse_argv, defense_path, attack_name = parse_argv(argv)
    if not success_parse_argv:
        show_usage()
        return
    print('Evaluation parameters:')
    print('  Defense path: ', defense_path)
    print('  Attack name: ', attack_name)

    TORCH = 'torch' in attack_name

    defense_model, attack_cls, task_def, dataset_name = load_defense_and_attack(
        defense_path, attack_name, TORCH)

    if FLAGS.ignore_threshold:
        task_def.threshold = np.inf

    # Loading dataset
    print('  Dataset: ', dataset_name)
    _, (x_test, y_test), _ = data.load_dataset(dataset_name, TORCH)

    use_examples = np.arange(len(x_test))
    if FLAGS.example_list is not None:
        # We've got a specific set of examples to attack
        use_examples = list(map(int, FLAGS.example_list.split(",")))
    else:
        # Attack a sequential set of examples
        if FLAGS.num_examples > 0:
            use_examples = np.arange(FLAGS.num_examples)

    x_test = x_test[use_examples]
    y_test = y_test[use_examples]

    print('  Number of examples:', len(use_examples))

    batch_size = FLAGS.batch_size if FLAGS.batch_size > 0 else len(x_test)

    if FLAGS.test:
        evaluate_clean(defense_model, x_test, y_test, batch_size)
        exit(0)

    if FLAGS.tune_fpr is not None:
        tune_fpr(defense_model, x_test, y_test, batch_size, FLAGS.tune_fpr)
        exit(0)

    failed_examples = evaluate_defense(x_test, y_test, batch_size, attack_cls,
                                       defense_model, task_def, FLAGS.verbose)

    if len(failed_examples) == 0:
        print('SUCCESS!')
    else:
        print('FAIL')
        print('{0} out of {1} examples failed task'.format(
            len(failed_examples), len(x_test)))
        print('Indices of failed examples: ',
              [use_examples[x] for x in failed_examples])
        print(
            'To re-run the attack on just these examples pass',
            '--example_list=' +
            ",".join(str(use_examples[x]) for x in failed_examples))
        if not FLAGS.verbose:
            print(
                "Run with --verbose for more information on why each adversarial example failed."
            )