예제 #1
0
def generate_universal_perturbation_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
    """
    Untargeted attack. Y is not needed.
    """

    # TODO: insert a uint8 filter to f.
    f, grad_fs = prepare_attack(sess, model, x, y, X, Y)

    params = {'delta': 0.2,
              'max_iter_uni': np.inf,
              'xi': 10,
              'p': np.inf,
              'num_classes': 10,
              'overshoot': 0.02,
              'max_iter_df': 10,
              }

    params = override_params(params, attack_params)

    if not verbose:
        disablePrint(attack_log_fpath)

    # X is randomly shuffled in unipert.
    X_copy = X.copy()
    v = universal_perturbation(X_copy, f, grad_fs, **params)
    del X_copy

    if not verbose:
        enablePrint()

    return X + v
예제 #2
0
def generate_carlini_l2_examples(sess, model, x, y, X, Y, attack_params,
                                 verbose, attack_log_fpath):
    model_wrapper = wrap_to_tohinz_model(model, X, Y)

    accepted_params = [
        'batch_size', 'confidence', 'targeted', 'learning_rate',
        'binary_search_steps', 'max_iterations', 'abort_early', 'initial_const'
    ]
    for k in attack_params:
        if k not in accepted_params:
            raise NotImplementedError("Unsuporrted params in Carlini L2: %s" %
                                      k)

    # assert batch_size <= len(X)
    if 'batch_size' in attack_params and attack_params['batch_size'] > len(X):
        attack_params['batch_size'] = len(X)

    if 'binary_search_steps' in attack_params:
        attack_params['binary_search_steps'] = int(
            attack_params['binary_search_steps'])

    attack = CarliniL2(sess, model_wrapper, **attack_params)

    if not verbose:
        disablePrint(attack_log_fpath)
    # The input range is [0, 1], convert to [-0.5, 0.5] by subtracting 0.5.
    # The return range is [-0.5, 0.5]. Convert back to [0,1] by adding 0.5.
    X_adv = attack.attack(X - 0.5, Y) + 0.5
    if not verbose:
        enablePrint()

    return X_adv
def generate_carlini_li_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
    model_wrapper = wrap_to_carlini_model(model, X, Y)

    if 'batch_size' in attack_params:
        batch_size = attack_params['batch_size']
        del attack_params['batch_size']
    else:
        batch_size= 10

    accepted_params = ['targeted', 'learning_rate', 'max_iterations', 'abort_early', 'initial_const', 'largest_const', 'reduce_const', 'decrease_factor', 'const_factor', 'confidence']
    for k in attack_params:
        if k not in accepted_params:
            raise NotImplementedError("Unsuporrted params in Carlini Li: %s" % k)

    attack = CarliniLi(sess, model_wrapper, **attack_params)
    
    X_adv_list = []

    with click.progressbar(list(range(0, len(X))), file=sys.stderr, show_pos=True, 
                           width=40, bar_template='  [%(bar)s] Carlini Li Attacking %(info)s', 
                           fill_char='>', empty_char='-') as bar:
        for i in bar:
            if i % batch_size == 0:
                X_sub = X[i:min(i+batch_size, len(X)),:]
                Y_sub = Y[i:min(i+batch_size, len(X)),:]
                if not verbose:
                    disablePrint(attack_log_fpath)
                X_adv_sub = attack.attack(X_sub - 0.5, Y_sub) + 0.5
                if not verbose:
                    enablePrint()
                X_adv_list.append(X_adv_sub)

    X_adv = np.vstack(X_adv_list)
    return X_adv
def generate_deepfool_examples(sess, model, x, y, X, Y, attack_params, verbose,
                               attack_log_fpath):
    """
    Untargeted attack. Y is not needed.
    """

    # TODO: insert a uint8 filter to f.
    f, grad_fs = prepare_attack(sess, model, x, y, X, Y)

    params = {'num_classes': 10, 'overshoot': 0.02, 'max_iter': 50}
    params = override_params(params, attack_params)

    adv_x_list = []
    aux_info = {}
    aux_info['r_tot'] = []
    aux_info['loop_i'] = []
    aux_info['k_i'] = []

    with click.progressbar(
            list(range(0, len(X))),
            file=sys.stderr,
            show_pos=True,
            width=40,
            bar_template='  [%(bar)s] DeepFool Attacking %(info)s',
            fill_char='>',
            empty_char='-') as bar:
        # Loop over the samples we want to perturb into adversarial examples
        for i in bar:
            image = X[i:i + 1, :, :, :]

            if not verbose:
                disablePrint(attack_log_fpath)

            r_tot, loop_i, k_i, pert_image = deepfool(image, f, grad_fs,
                                                      **params)

            if not verbose:
                enablePrint()

            adv_x_list.append(pert_image)

            aux_info['r_tot'].append(r_tot)
            aux_info['loop_i'].append(loop_i)
            aux_info['k_i'].append(k_i)

    return np.vstack(adv_x_list), aux_info