Ejemplo n.º 1
0
session = tf.Session()

model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                          '../example/cifar10/resnet56.py')
rs_model = load_model_from_path(model_path)
model = rs_model.load(session)

_, (xs_test, ys_test) = load_data()
xs_test = (xs_test / 255.0) * (model.x_max - model.x_min) + model.x_min
ys_test = ys_test.reshape(len(ys_test))

xs_ph = tf.placeholder(model.x_dtype, shape=(batch_size, *model.x_shape))
lgs, lbs = model.logits_and_labels(xs_ph)

loss = CrossEntropyLoss(model)
attack = FGSM(model=model,
              batch_size=batch_size,
              loss=loss,
              goal='ut',
              distance_metric='l_inf',
              session=session)
attack.config(magnitude=8.0 / 255.0)

for hi in range(batch_size, 5 * batch_size, batch_size):
    xs = xs_test[hi - batch_size:hi]
    ys = ys_test[hi - batch_size:hi]

    xs_adv = attack.batch_attack(xs, ys=ys)

    lbs_pred = session.run(lbs, feed_dict={xs_ph: xs})
Ejemplo n.º 2
0
    print('Loading attack...')
    attack_name, batch_size, dataset_name = args.method, args.batch_size, args.dataset
    goal, distance_metric = args.goal, args.distance_metric

    kwargs = dict()
    for kwarg in ('learning_rate', 'cw_loss_c', 'samples_per_draw',
                  'init_distortion'):
        attr = getattr(args, kwarg)
        if attr is not None:
            kwargs[kwarg] = attr
    if args.dimension_reduction_height is not None and args.dimension_reduction_width is not None:
        kwargs['dimension_reduction'] = (args.dimension_reduction_height,
                                         args.dimension_reduction_width)
    if attack_name in ('fgsm', 'bim', 'pgd', 'mim'):
        kwargs['loss'] = CrossEntropyLoss(model)
    elif attack_name in ('nes', 'spsa', 'nattack'):
        kwargs['loss'] = CWLoss(model)

    benchmark = AttackBenchmark(attack_name, model, batch_size, dataset_name,
                                goal, distance_metric, session, **kwargs)

    print('Configuring attack...')
    benchmark.config(**config_kwargs)

    print('Running benchmark...')
    acc, acc_adv, total, succ, dist = benchmark.run(dataset, logger)
    print('n={}, acc={:3f}, adv_acc={:3f}, succ={:3f}, dist_mean={:3f}'.format(
        args.count, np.mean(acc.astype(np.float)),
        np.mean(acc_adv.astype(np.float)),
        np.sum(succ.astype(np.float)) / np.sum(total.astype(np.float)),
Ejemplo n.º 3
0
from ares import BIM, CrossEntropyLoss, EnsembleCrossEntropyLoss, EnsembleRandomnessCrossEntropyLoss
from ares.dataset import cifar10, dataset_to_iterator
from ares.model.loader import load_model_from_path

batch_size = 1000

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)

model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                          '../example/cifar10/resnet56.py')
model = load_model_from_path(model_path).load(session)

loss_op = CrossEntropyLoss(model)
e_loss_op = EnsembleCrossEntropyLoss([model, model], [0.5, 0.5])
er_loss_op = EnsembleRandomnessCrossEntropyLoss(model, 10, session)

ds = cifar10.load_dataset_for_classifier(model).batch(batch_size).take(1)
_, xs, ys = next(dataset_to_iterator(ds, session))

xs_ph = tf.placeholder(model.x_dtype, shape=(batch_size, *model.x_shape))
ys_ph = tf.placeholder(model.y_dtype, shape=batch_size)

loss = loss_op(xs_ph, ys_ph)
e_loss = e_loss_op(xs_ph, ys_ph)
er_loss = er_loss_op(xs_ph, ys_ph)

dloss_dxs = tf.gradients(loss, xs_ph)[0]
de_loss_dxs = tf.gradients(e_loss, xs_ph)[0]