def __init__(self, sess):
        self._sess = sess

        self._x = tf.placeholder(tf.float32, (1, 32, 32, 3))
        self._encode = discretize_uniform(self._x / 255.0,
                                          levels=LEVELS,
                                          thermometer=True)

        self._model = Model('../models/thermometer_advtrain/',
                            sess,
                            tiny=False,
                            mode='eval',
                            thermometer=True,
                            levels=LEVELS)

        self._dataset = robustml.dataset.CIFAR10()
        self._threat_model = robustml.threat_model.Linf(epsilon=8.0 / 255.0)
示例#2
0
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']
step_size_schedule = config['step_size_schedule']
weight_decay = config['weight_decay']
# data_path = config['data_path']
data_path = '../cifar10_data/'

momentum = config['momentum']
batch_size = config['training_batch_size']

levels = 16

# Setting up the data and the model
raw_cifar = cifar10_input.CIFAR10Data(data_path)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model(mode='eval', tiny=False, thermometer=False, levels=levels)

# Setting up the optimizer
boundaries = [int(sss[0]) for sss in step_size_schedule]
boundaries = boundaries[1:]
values = [sss[1] for sss in step_size_schedule]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                            boundaries, values)
total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
train_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(
    total_loss, global_step=global_step)

xin = tf.placeholder(tf.float32, (None, 32, 32, 3))

steps = 7
eps = 0.031
示例#3
0
max_num_training_steps = config['max_num_training_steps']
num_output_steps = config['num_output_steps']
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']
step_size_schedule = config['step_size_schedule']
weight_decay = config['weight_decay']
data_path = config['data_path']
momentum = config['momentum']
batch_size = config['training_batch_size']

levels = 16

# Setting up the data and the model
raw_cifar = cifar10_input.CIFAR10Data(data_path)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model(mode='train', tiny=False, thermometer=True, levels=levels)

# Setting up the optimizer
boundaries = [int(sss[0]) for sss in step_size_schedule]
boundaries = boundaries[1:]
values = [sss[1] for sss in step_size_schedule]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                            boundaries, values)
total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
train_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(
    total_loss, global_step=global_step)

xin = tf.placeholder(tf.float32, (None, 32, 32, 3))

steps = 7
eps = 0.031
示例#4
0
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()

provider = robustml.provider.CIFAR10(args.cifar_path)
# saver = tf.train.Saver(max_to_keep=3)

start = 0
end = 100

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

model = Model('./models/adv_train_clean/',
              sess,
              mode='eval',
              tiny=False,
              thermometer=False,
              levels=levels)
# initialize data augmentation

input_xs = tf.placeholder(tf.float32, [None, 32, 32, 3])
real_logits = tf.nn.softmax(model(input_xs))

# saver.restore(sess,
#             os.path.join("models/adv_train_clean", 'checkpoint-65000'))

for i in range(start, end):
    x_batch, y_batch = provider[i]

    logits = sess.run(real_logits, feed_dict={input_xs: [x_batch]})
    #     nat_dict = {model.x_input: [x_batch],model.y_input: [y_batch]}
示例#5
0
            nquery += 1
            #print("size of image:",x0.shape)
            #print("size of modifier,",np.array(lbd_mid*theta).shape )
            if self.model.predict(x0 + np.array(lbd_mid * theta)) != y0:
                lbd_hi = lbd_mid
            else:
                lbd_lo = lbd_mid
        return lbd_hi, nquery

levels = 16

sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
cifar = cifar10_input.CIFAR10Data()
model = Model('../models/thermometer_advtrain/',
              sess,
              tiny=False,
              mode='eval',
              thermometer=True,
              levels=levels)
model = MyModel(model, sess, levels, [0.0, 255.0])

images = np.array(cifar.eval_data.xs[:100], dtype=np.float32)
labels = cifar.eval_data.ys[:100]

new_img = images / 255.0

count = []
for i in range(20):
    label = model.predict(new_img[i])
    if label == labels[i]:
        count.append(1)
    else:
示例#6
0
        lbd_hi = lbd
        lbd_lo = 0.0

        while (lbd_hi - lbd_lo) > 1e-5:
            lbd_mid = (lbd_lo + lbd_hi) / 2.0
            nquery += 1
            if self.model.predict_label(x0 + np.array(lbd_mid * theta)) != y0:
                lbd_hi = lbd_mid
            else:
                lbd_lo = lbd_mid
        return lbd_hi, nquery

cifar = cifar10_input.CIFAR10Data("../cifar10_data")
sess = tf.InteractiveSession()
orig_model = Model("../models/standard/", tiny=False, mode='eval', sess=sess)
model = PyModel(orig_model, sess, [0.0, 255.0])
images = cifar.eval_data.xs[20:1000] / 255
labels = cifar.eval_data.ys[20:1000]
pre_labs = []
count = 0
for i in range(20):
    pre_lab = model.predict_label(images[i])
    pre_labs.append(pre_lab)
    if labels[i] == pre_lab:
        count += 1
print("original labels:", labels[:20])
print("predicted labels:", pre_labs)
print("accuracy of 20 images :", count / 20)
#count = 0
#pre_labs = []
import keras
import imageio
import tensorflow as tf

num_classes = 10

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

x_test_tensor = K.variable(x_test)

model = Model(input_shape=x_train.shape[1:], num_classes=num_classes).model
wrap = KerasModelWrapper(model)

target = keras.utils.to_categorical([0], num_classes)
target = np.repeat(target, 10000, axis=0)

fgsm_params = {
    'eps': 0.05,
    'clip_min': 0.,
    'clip_max': 1.,
    'y_target': target
}

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    fgsm = FastGradientMethod(wrap, sess=sess)
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

model = Model(input_shape=x_train.shape[1:], num_classes=num_classes).model

callbacks = [
    ModelCheckpoint('models/cifar10.h5',
                    save_best_only=True,
                    save_weights_only=True)
]

model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(x_test, y_test),
          callbacks=callbacks,
          shuffle=True)