コード例 #1
0
def run_attack(checkpoint, x_adv, epsilon):

  raw_data = data_input.Data(one_hot=True)

  x_input = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
  y_input = tf.placeholder(tf.int64, shape=[None, 100])

  model = Model(x_input, y_input, mode='eval')
  loss = model.xent
  grad = tf.gradients(loss, model.x_input)[0]
  sense = tf.reduce_sum(tf.sqrt(tf.reduce_sum(tf.reduce_sum(tf.reduce_sum(tf.square(grad), -1), -1), -1)))
  print(sense.shape)

  saver = tf.train.Saver()

  num_eval_examples = x_adv.shape[0]
  eval_batch_size = 100

  num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
  total_corr = 0

  x_nat = raw_data.eval_data.xs
  l_inf = np.amax(np.abs(x_nat - x_adv))

  if l_inf > epsilon + 0.0001:
    print('maximum perturbation found: {}'.format(l_inf))
    print('maximum perturbation allowed: {}'.format(epsilon))
    #return

  y_pred = [] # label accumulator

  with tf.Session() as sess:
    # Restore the checkpoint
    saver.restore(sess, checkpoint)
    sense_val = 0
    # Iterate over the samples batch-by-batch
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = x_adv[bstart:bend, :]
      y_batch = raw_data.eval_data.ys[bstart:bend]
      
      x_batch = batch_brightness(x_batch, c=-0.15)
      
      dict_adv = {model.x_input: x_batch,
                  model.y_input: y_batch}
      
      sense_val += sess.run(sense,  feed_dict=dict_adv)
      
      
    print(sense_val/(num_eval_examples)) 
  
  
  #print('Accuracy: {:.2f}%'.format(100.0 * accuracy))
  #y_pred = np.concatenate(y_pred, axis=0)
  #print(y_pred.shape)
  #print(Counter(y_pred))
  '''
コード例 #2
0
ファイル: run_attack_test.py プロジェクト: littlefish12/RLFAT
def run_attack(checkpoint, x_adv, epsilon):

    raw_data = data_input.Data(one_hot=True)

    x_input = tf.placeholder(tf.float32, shape=[None, 96, 96, 3])
    y_input = tf.placeholder(tf.int64, shape=[None, 10])

    model = Model(x_input, y_input, mode='eval')

    saver = tf.train.Saver()

    num_eval_examples = x_adv.shape[0]
    eval_batch_size = 100

    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    total_corr = 0

    x_nat = raw_data.eval_data.xs
    l_inf = np.amax(np.abs(x_nat - x_adv))

    if l_inf > epsilon + 0.0001:
        print('maximum perturbation found: {}'.format(l_inf))
        print('maximum perturbation allowed: {}'.format(epsilon))
        #return

    y_pred = []  # label accumulator

    with tf.Session() as sess:
        # Restore the checkpoint
        saver.restore(sess, checkpoint)

        # Iterate over the samples batch-by-batch
        for ibatch in range(num_batches):
            bstart = ibatch * eval_batch_size
            bend = min(bstart + eval_batch_size, num_eval_examples)

            x_batch = x_adv[bstart:bend, :]
            y_batch = raw_data.eval_data.ys[bstart:bend]

            dict_adv = {model.x_input: x_batch, model.y_input: y_batch}
            cur_corr, y_pred_batch = sess.run(
                [model.num_correct, model.y_pred], feed_dict=dict_adv)

            total_corr += cur_corr
            y_pred.append(y_pred_batch)

    accuracy = total_corr / num_eval_examples

    print('Accuracy: {:.2f}%'.format(100.0 * accuracy))
    y_pred = np.concatenate(y_pred, axis=0)
    print(y_pred.shape)
    print(Counter(y_pred))
コード例 #3
0
ファイル: train_trades.py プロジェクト: littlefish12/RLFAT
with open('config.json') as config_file:
    config = json.load(config_file)

# Setting up training parameters
tf.set_random_seed(config['random_seed'])

max_num_training_steps = config['max_num_training_steps']
num_output_steps = config['num_output_steps']
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']

batch_size = config['training_batch_size']

# Setting up the data and the model
raw_data = data_input.Data(one_hot=True)
global_step = tf.contrib.framework.get_or_create_global_step()

x_input = tf.placeholder(tf.float32, shape=[None, 96, 96, 3])
adv_x_input = tf.placeholder(tf.float32, shape=[None, 96, 96, 3])
y_input = tf.placeholder(tf.int64, shape=[None, 10])

model = Model(x_input, y_input, mode='train')

model_adv = Model(adv_x_input, y_input, mode='train', reuse=True)

# Setting up the optimizer
loss = model.mean_xent + 6.0 * tf.reduce_mean(
    kullback_leibler_divergence(model.prob, model_adv.prob))

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
コード例 #4
0
ファイル: nattack.py プロジェクト: littlefish12/RLFAT
def main():
    with open('config.json') as config_file:
        config = json.load(config_file)

    model_file = tf.train.latest_checkpoint(config['model_dir'])
    if model_file is None:
        print('No model found')
        sys.exit()

    totalImages = 0
    succImages = 0
    faillist = []

    input_xs = tf.placeholder(tf.float32, [None, 96, 96, 3])
    y_input = tf.placeholder(tf.int64, shape=[None, 10])
    model = Model(input_xs, y_input, mode='eval')

    real_logits_pre = model.pre_softmax
    real_logits = tf.nn.softmax(real_logits_pre)

    saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    saver.restore(sess, model_file)

    start = 0
    end = 1500
    successlist = []
    printlist = []
    attack_start = time.time()

    fashion_mnist = data_input.Data(one_hot=False)

    for i in range(start, end):
        success = False
        print('evaluating %d of [%d, %d)' % (i, start, end), file=sys.stderr)

        inputs, targets = fashion_mnist.eval_data.xs[
            i], fashion_mnist.eval_data.ys[i]
        modify = np.random.randn(1, 3, 96, 96) * 0.001

        logits = sess.run(real_logits, feed_dict={input_xs: [inputs]})
        print(logits)

        if np.argmax(logits) != targets:
            print('skip the wrong example ', i)
            continue
        totalImages += 1
        for runstep in range(200):
            Nsample = np.random.randn(npop, 3, 96, 96)

            modify_try = modify.repeat(npop, 0) + sigma * Nsample

            newimg = torch_arctanh(
                (inputs - boxplus) / boxmul).transpose(2, 0, 1)

            inputimg = np.tanh(newimg + modify_try) * boxmul + boxplus
            if runstep % 10 == 0:
                realinputimg = np.tanh(newimg + modify) * boxmul + boxplus
                realdist = realinputimg - (np.tanh(newimg) * boxmul + boxplus)
                realclipdist = np.clip(realdist, -epsi, epsi)
                realclipinput = realclipdist + (np.tanh(newimg) * boxmul +
                                                boxplus)
                l2real = np.sum((realclipinput -
                                 (np.tanh(newimg) * boxmul + boxplus))**2)**0.5
                #l2real =  np.abs(realclipinput - inputs.numpy())
                #print(inputs.shape)
                outputsreal = sess.run(
                    real_logits,
                    feed_dict={input_xs: realclipinput.transpose(0, 2, 3, 1)})
                #print(outputsreal)

                #print('lireal: ',np.abs(realclipdist).max())
                #print('l2real: '+str(l2real.max()))
                #print(outputsreal)
                if (np.argmax(outputsreal) !=
                        targets) and (np.abs(realclipdist).max() <= epsi):
                    succImages += 1
                    success = True
                    #print('clipimage succImages: '+str(succImages)+'  totalImages: '+str(totalImages))
                    #print('lirealsucc: '+str(realclipdist.max()))
                    successlist.append(i)
                    printlist.append(runstep)

                    steps.append(runstep)
                    #                     imsave(folder+classes[targets[0]]+'_'+str("%06d" % batch_idx)+'.jpg',inputs.transpose(1,2,0))
                    break
            dist = inputimg - (np.tanh(newimg) * boxmul + boxplus)
            clipdist = np.clip(dist, -epsi, epsi)
            clipinput = (clipdist +
                         (np.tanh(newimg) * boxmul + boxplus)).reshape(
                             npop, 3, 96, 96)
            target_onehot = np.zeros((1, 10))

            target_onehot[0][targets] = 1.

            outputs = sess.run(
                real_logits,
                feed_dict={input_xs: clipinput.transpose(0, 2, 3, 1)})

            target_onehot = target_onehot.repeat(npop, 0)

            real = np.log((target_onehot * outputs).sum(1) + 1e-30)
            other = np.log(((1. - target_onehot) * outputs -
                            target_onehot * 10000.).max(1)[0] + 1e-30)

            loss1 = np.clip(real - other, 0., 1000)

            Reward = 0.5 * loss1

            Reward = -Reward

            A = (Reward - np.mean(Reward)) / (np.std(Reward) + 1e-7)

            modify = modify + (alpha / (npop * sigma)) * (
                (np.dot(Nsample.reshape(npop, -1).T, A)).reshape((3, 96, 96)))
        if not success:
            faillist.append(i)
            print('failed:', faillist)
        else:
            print('successed:', successlist)
            print('runstep :', printlist)
        print('now id', i)
        print('failed num', len(faillist))
    print('failed num', len(faillist))
    success_rate = succImages / float(totalImages)
    print('attack time : ', time.time() - attack_start, flush=True)
    print('succ rate', success_rate)
    print(model_file)
コード例 #5
0
ファイル: pgd_attack_test.py プロジェクト: littlefish12/RLFAT
    model_file = tf.train.latest_checkpoint(config['model_dir'])
    if model_file is None:
        print('No model found')
        sys.exit()

    x_input = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_input = tf.placeholder(tf.float32, shape=[None, 100])

    model = Model(x_input, y_input, mode='eval')

    attack = LinfPGDAttack(model, config['epsilon'], config['k'], config['a'],
                           config['random_start'], config['loss_func'])

    saver = tf.train.Saver()

    fashion_mnist = data_input.Data(one_hot=True)

    with tf.Session() as sess:
        # Restore the checkpoint
        saver.restore(sess, model_file)

        # Iterate over the samples batch-by-batch
        num_eval_examples = fashion_mnist.eval_data.xs.shape[0]
        eval_batch_size = config['eval_batch_size']
        num_batches = int(math.ceil(num_eval_examples / eval_batch_size))

        x_adv = []  # adv accumulator

        print('Iterating over {} batches'.format(num_batches))

        for ibatch in range(num_batches):