def cw_attack(file_name, norm, sess, num_image=10, cifar = False, tinyimagenet = False): np.random.seed(1215) tf.set_random_seed(1215) random.seed(1215) if norm == '1': attack = EADL1 norm_fn = lambda x: np.sum(np.abs(x),axis=(1,2,3)) elif norm == '2': attack = CarliniL2 norm_fn = lambda x: np.sum(x**2,axis=(1,2,3)) elif norm == 'i': attack = CarliniLi norm_fn = lambda x: np.max(np.abs(x),axis=(1,2,3)) if cifar: data = CIFAR() elif tinyimagenet: data = tinyImagenet() else: data = MNIST() model = load_model(file_name, custom_objects={'fn':loss,'tf':tf, 'ResidualStart' : ResidualStart, 'ResidualStart2' : ResidualStart2}) inputs, targets, true_labels, true_ids, img_info = generate_data(data, samples=num_image, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.predict, start=0) model.predict = model model.num_labels = 10 if cifar: model.image_size = 32 model.num_channels = 3 elif tinyimagenet: model.image_size = 64 model.num_channels = 3 model.num_labels = 200 else: model.image_size = 28 model.num_channels = 1 start_time = timer.time() attack = attack(sess, model, max_iterations = 1000) perturbed_input = attack.attack(inputs, targets) UB = np.average(norm_fn(perturbed_input-inputs)) return UB, (timer.time()-start_time)/len(inputs)
def run(file_name, n_samples, p_n, q_n, activation = 'relu', cifar=False, tinyimagenet=False): np.random.seed(1215) tf.set_random_seed(1215) random.seed(1215) keras_model = load_model(file_name, custom_objects={'fn':fn, 'tf':tf}) if tinyimagenet: model = CNNModel(keras_model, inp_shape = (64,64,3)) elif cifar: model = CNNModel(keras_model, inp_shape = (32,32,3)) else: model = CNNModel(keras_model) #Set correct linear_bounds function global linear_bounds if activation == 'relu': linear_bounds = relu_linear_bounds elif activation == 'ada': linear_bounds = ada_linear_bounds elif activation == 'sigmoid': linear_bounds = sigmoid_linear_bounds elif activation == 'tanh': linear_bounds = tanh_linear_bounds elif activation == 'arctan': linear_bounds = atan_linear_bounds upper_bound_conv.recompile() lower_bound_conv.recompile() compute_bounds.recompile() if cifar: inputs, targets, true_labels, true_ids, img_info = generate_data(CIFAR(), samples=n_samples, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.model.predict, start=0) elif tinyimagenet: inputs, targets, true_labels, true_ids, img_info = generate_data(tinyImagenet(), samples=n_samples, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.model.predict, start=0) else: inputs, targets, true_labels, true_ids, img_info = generate_data(MNIST(), samples=n_samples, targeted=True, random_and_least_likely = True, target_type = 0b0010, predictor=model.model.predict, start=0) #0b01111 <- all #0b0010 <- random #0b0001 <- top2 #0b0100 <- least steps = 15 eps_0 = 0.05 summation = 0 warmup(model, inputs[0].astype(np.float32), eps_0, p_n, find_output_bounds) start_time = time.time() for i in range(len(inputs)): print('--- CNN-Cert: Computing eps for input image ' + str(i)+ '---') predict_label = np.argmax(true_labels[i]) target_label = np.argmax(targets[i]) weights = model.weights[:-1] biases = model.biases[:-1] shapes = model.shapes[:-1] W, b, s = model.weights[-1], model.biases[-1], model.shapes[-1] last_weight = (W[predict_label,:,:,:]-W[target_label,:,:,:]).reshape([1]+list(W.shape[1:])) weights.append(last_weight) biases.append(np.asarray([b[predict_label]-b[target_label]])) shapes.append((1,1,1)) #Perform binary search log_eps = np.log(eps_0) log_eps_min = -np.inf log_eps_max = np.inf for j in range(steps): LB, UB = find_output_bounds(weights, biases, shapes, model.pads, model.strides, inputs[i].astype(np.float32), np.exp(log_eps), p_n) print("Step {}, eps = {:.5f}, {:.6s} <= f_c - f_t <= {:.6s}".format(j,np.exp(log_eps),str(np.squeeze(LB)),str(np.squeeze(UB)))) if LB > 0: #Increase eps log_eps_min = log_eps log_eps = np.minimum(log_eps+1, (log_eps_max+log_eps_min)/2) else: #Decrease eps log_eps_max = log_eps log_eps = np.maximum(log_eps-1, (log_eps_max+log_eps_min)/2) if p_n == 105: str_p_n = 'i' else: str_p_n = str(p_n) print("[L1] method = CNN-Cert-{}, model = {}, image no = {}, true_id = {}, target_label = {}, true_label = {}, norm = {}, robustness = {:.5f}".format(activation,file_name, i, true_ids[i],target_label,predict_label,str_p_n,np.exp(log_eps_min))) summation += np.exp(log_eps_min) K.clear_session() eps_avg = summation/len(inputs) total_time = (time.time()-start_time)/len(inputs) print("[L0] method = CNN-Cert-{}, model = {}, total images = {}, norm = {}, avg robustness = {:.5f}, avg runtime = {:.2f}".format(activation,file_name,len(inputs),str_p_n,eps_avg,total_time)) return eps_avg, total_time
elif args.model == "cifar": data = CIFAR() if args.cnnmodel: model = nl.CNNModel(modelfile) elif args.filename: model = nl.NLayerModel(args.layers, modelfile, image_size=32, image_channel=3) else: model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, image_size=32, image_channel=3) elif args.model == "tiny": data = tinyImagenet() if args.cnnmodel: model = nl.CNNModel(modelfile, inp_shape=(64, 64, 3)) elif args.filename: model = nl.NLayerModel(args.layers, modelfile, image_size=64, image_channel=3) else: model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, image_size=64, image_channel=3) else: raise (RuntimeError("unknown model: " + args.model))
#train_cnn_7layer(tinyImagenet(), file_name="models/tiny_cnn_7layer_1", params=[32,32,64,64,200,200], num_epochs=30, lr=0.0001, decay=0, activation="relu", optimizer_name="adam") # train tiny imagenet: 2 #train_cnn_7layer(tinyImagenet(), file_name="models/tiny_cnn_7layer_2", params=[32,32,64,64,200,200], num_epochs=30, lr=0.0005, decay=0, activation="relu", optimizer_name="adam") # train tiny imagenet: 3 #train_cnn_7layer(tinyImagenet(), file_name="models/tiny_cnn_7layer_3", params=[64,64,64,64,200,200], num_epochs=30, lr=0.0001, decay=0, activation="relu", optimizer_name="adam") # train tiny imagenet: 4 #train_cnn_7layer(tinyImagenet(), file_name="models/tiny_cnn_7layer_4", params=[100,100,50,50,200,200], num_epochs=30, lr=0.0001, decay=0, activation="relu", optimizer_name="adam") # train tiny imagenet: 5 #train_cnn_7layer(tinyImagenet(), file_name="models/tiny_cnn_7layer_5", params=[32,32,64,64,200,200], num_epochs=50, lr=0.001, decay=11e-5, activation="relu", optimizer_name="adam") # train tiny imagenet: 6 train_cnn_7layer(tinyImagenet(), file_name="models/tiny_cnn_7layer_6", params=[32, 32, 64, 64, 200, 200], num_epochs=60, lr=0.0001, decay=1e-5, activation="relu", optimizer_name="adam") # train tiny imagenet: 7 train_cnn_7layer(tinyImagenet(), file_name="models/tiny_cnn_7layer_7", params=[32, 32, 64, 64, 200, 200], num_epochs=100, lr=0.00005, decay=0,