def MIM(model, sess, epsilon, num_steps, data=MNIST()):

    image_size = data.test_data.shape[1]
    num_channels = data.test_data.shape[3]
    num_labels = data.test_labels.shape[1]

    shape = (None, image_size, image_size, num_channels)
    model.x_input = tf.placeholder(tf.float32, shape)
    model.y_input = tf.placeholder(tf.float32, [None, num_labels])

    pre_softmax = model(model.x_input)
    y_loss = tf.nn.softmax_cross_entropy_with_logits(labels=model.y_input,
                                                     logits=pre_softmax)
    model.xent = tf.reduce_sum(y_loss)

    attack = Mim.MimAttack(model, epsilon, num_steps)
    if data.dataset == "cifar":
        sets = np.split(data.test_data, 2)
        label_sets = np.split(data.test_labels, 2)

        p1 = attack.perturb(sets[0], label_sets[0], sess)
        p2 = attack.perturb(sets[1], label_sets[1], sess)
        return np.concatenate((p1, p2), axis=0)

    return attack.perturb(np.array(data.test_data), np.array(data.test_labels),
                          sess)
コード例 #2
0
def get_accuracy(file_name, sess, epsilon, num_steps, step_size, data=MNIST()):
    model = load_model(file_name, custom_objects={'fn': loss, 'tf': tf, 'atan': tf.math.atan})
    start_time = time.time()
    adversaries = PGD(model, sess, epsilon, num_steps, step_size, data)
    predictions = model.predict(adversaries)
    accuracy = np.mean(np.equal(np.argmax(predictions, 1), np.argmax(data.test_labels, 1)))
    print(f"The accuracy was {accuracy}", flush=True)
    time_used = time.time() - start_time

    return accuracy, time_used
def get_data(dataset):
    if dataset == "mnist":
        data = MNIST()
    elif dataset == "cifar":
        data = CIFAR()
    elif dataset == "tinyImagenet":
        data = TinyImagenet()
    elif dataset == "caltechSilhouettes":
        data = CaltechSiluettes()
    elif dataset == "GTSRB":
        data = GTSRB()
    elif dataset == "sign-language":
        data = SignLanguage()
    elif dataset == "rockpaperscissors":
        data = RockPaperScissors()
    else:
        raise NameError(f"{dataset} is not a valid dataset")
    return data
コード例 #4
0
def convert(file_name, new_name, cifar=False):
    if not cifar:
        eq_weights, new_params = get_weights(file_name)
        data = MNIST()
    else:
        eq_weights, new_params = get_weights(file_name, inp_shape=(32, 32, 3))
        data = CIFAR()
    model = Sequential()
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    for param in new_params:
        model.add(Dense(param))
        model.add(Lambda(lambda x: tf.nn.relu(x)))
    model.add(Dense(10))

    for i in range(len(eq_weights)):
        try:
            print(eq_weights[i][0].shape)
        except:
            pass
        model.layers[i].set_weights(eq_weights[i])

    sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.save(new_name)
    acc = model.evaluate(data.validation_data, data.validation_labels)[1]
    printlog("Converting CNN to MLP")
    nlayer = file_name.split('_')[-3][0]
    filters = file_name.split('_')[-2]
    kernel_size = file_name.split('_')[-1]
    printlog(
        "model name = {0}, numlayer = {1}, filters = {2}, kernel size = {3}".
        format(file_name, nlayer, filters, kernel_size))
    printlog("Model accuracy: {:.3f}".format(acc))
    printlog("-----------------------------------")
    return acc
コード例 #5
0
                args.numlayer) + "layer_relu" + suffix
            # if still not found, try models/mnist_3layer_relu_1024_best
            if not os.path.isfile(modelfile):
                modelfile = "models/" + args.model + "_" + str(
                    args.numlayer) + "layer_relu_" + str(
                        nhidden) + "_best" + suffix
                if not os.path.isfile(modelfile):
                    modelfile = args.filename
                    if not os.path.isfile(modelfile):
                        raise (RuntimeError("cannot find model file"))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        if args.model == "mnist":
            data = MNIST()
            if args.cnnmodel:
                model = nl.CNNModel(modelfile)
            elif args.filename:
                model = nl.NLayerModel(args.layers, modelfile)
            else:
                model = nl.NLayerModel([nhidden] * (args.numlayer - 1),
                                       modelfile)
        elif args.model == "cifar":
            data = CIFAR()
            if args.cnnmodel:
                model = nl.CNNModel(modelfile)
            elif args.filename:
                model = nl.NLayerModel(args.layers,
                                       modelfile,
                                       image_size=32,
def run(file_name,
        n_samples,
        p_n,
        q_n,
        activation='relu',
        cifar=False,
        tinyimagenet=False):
    np.random.seed(1215)
    tf.set_random_seed(1215)
    random.seed(1215)
    keras_model = load_model(file_name,
                             custom_objects={
                                 'fn': fn,
                                 'tf': tf,
                                 'atan': tf.math.atan
                             })
    if tinyimagenet:
        model = CNNModel(keras_model, inp_shape=(64, 64, 3))
    elif cifar:
        model = CNNModel(keras_model, inp_shape=(32, 32, 3))
    else:
        model = CNNModel(keras_model)

    #Set correct linear_bounds function
    global linear_bounds
    if activation == 'relu':
        linear_bounds = relu_linear_bounds
    elif activation == 'ada':
        linear_bounds = ada_linear_bounds
    elif activation == 'sigmoid':
        linear_bounds = sigmoid_linear_bounds
    elif activation == 'tanh':
        linear_bounds = tanh_linear_bounds
    elif activation == 'arctan':
        linear_bounds = atan_linear_bounds

    start_time = time.time()
    upper_bound_conv.recompile()
    lower_bound_conv.recompile()
    compute_bounds.recompile()
    total_time = time.time() - start_time

    print("spent {} sec compiling bounds".format(total_time))

    if cifar:
        inputs, targets, true_labels, true_ids, img_info = generate_data(
            CIFAR(),
            samples=n_samples,
            targeted=True,
            random_and_least_likely=True,
            target_type=0b0001,
            predictor=model.model.predict,
            start=0)
    elif tinyimagenet:
        inputs, targets, true_labels, true_ids, img_info = generate_data(
            tinyImagenet(),
            samples=n_samples,
            targeted=True,
            random_and_least_likely=True,
            target_type=0b0001,
            predictor=model.model.predict,
            start=0)
    else:
        inputs, targets, true_labels, true_ids, img_info = generate_data(
            MNIST(),
            samples=n_samples,
            targeted=True,
            random_and_least_likely=True,
            target_type=0b0001,
            predictor=model.model.predict,
            start=0)
    #0b01111 <- all
    #0b0010 <- random
    #0b0001 <- top2
    #0b0100 <- least

    steps = 15
    eps_0 = 0.05
    summation = 0
    start_time = time.time()
    warmup(model, inputs[0].astype(np.float32), eps_0, p_n, find_output_bounds)
    total_time = time.time() - start_time

    print("spent {} sec warming up".format(total_time))

    start_time = time.time()
    for i in range(len(inputs)):
        #print('--- CNN-Cert: Computing eps for input image ' + str(i)+ '---')
        predict_label = np.argmax(true_labels[i])
        target_label = np.argmax(targets[i])
        weights = model.weights[:-1]
        biases = model.biases[:-1]
        shapes = model.shapes[:-1]
        W, b, s = model.weights[-1], model.biases[-1], model.shapes[-1]
        last_weight = (W[predict_label, :, :, :] -
                       W[target_label, :, :, :]).reshape([1] +
                                                         list(W.shape[1:]))
        weights.append(last_weight)
        biases.append(np.asarray([b[predict_label] - b[target_label]]))
        shapes.append((1, 1, 1))

        #Perform binary search
        log_eps = np.log(eps_0)
        log_eps_min = -np.inf
        log_eps_max = np.inf
        for j in range(steps):
            LB, UB = find_output_bounds(weights, biases, shapes, model.pads,
                                        model.strides,
                                        inputs[i].astype(np.float32),
                                        np.exp(log_eps), p_n)
            #print("Step {}, eps = {:.5f}, {:.6s} <= f_c - f_t <= {:.6s}".format(j,np.exp(log_eps),str(np.squeeze(LB)),str(np.squeeze(UB))))
            if LB > 0:  #Increase eps
                log_eps_min = log_eps
                log_eps = np.minimum(log_eps + 1,
                                     (log_eps_max + log_eps_min) / 2)
            else:  #Decrease eps
                log_eps_max = log_eps
                log_eps = np.maximum(log_eps - 1,
                                     (log_eps_max + log_eps_min) / 2)

        if p_n == 105:
            str_p_n = 'i'
        else:
            str_p_n = str(p_n)

        #print("[L1] method = CNN-Cert-{}, model = {}, image no = {}, true_id = {}, target_label = {}, true_label = {}, norm = {}, robustness = {:.5f}".format(activation,file_name, i, true_ids[i],target_label,predict_label,str_p_n,np.exp(log_eps_min)))
        summation += np.exp(log_eps_min)
    K.clear_session()

    eps_avg = summation / len(inputs)
    total_time = (time.time() - start_time) / len(inputs)
    print(
        "[L0] method = CNN-Cert-{}, model = {}, total images = {}, norm = {}, avg robustness = {:.5f}, avg runtime = {:.2f}"
        .format(activation, file_name, len(inputs), str_p_n, eps_avg,
                total_time))
    return eps_avg, total_time
              shuffle=True)
    

    # save model to a file
    if file_name != None:
        model.save(file_name)
    
    return {'model':model, 'history':history}



if __name__ == '__main__':
    if not os.path.isdir('models'):
        os.makedirs('models')

    train(MNIST(), file_name="models/mnist_resnet_2", nlayer=2, activation = tf.nn.relu)
    train(MNIST(), file_name="models/mnist_resnet_3", nlayer=3, activation = tf.nn.relu)
    train(MNIST(), file_name="models/mnist_resnet_4", nlayer=4, activation = tf.nn.relu)
    train(MNIST(), file_name="models/mnist_resnet_5", nlayer=5, activation = tf.nn.relu)

    train(MNIST(), file_name="models/mnist_resnet_2_sigmoid", nlayer=2, activation = tf.sigmoid)
    train(MNIST(), file_name="models/mnist_resnet_3_sigmoid", nlayer=3, activation = tf.sigmoid)
    train(MNIST(), file_name="models/mnist_resnet_4_sigmoid", nlayer=4, activation = tf.sigmoid)
    train(MNIST(), file_name="models/mnist_resnet_5_sigmoid", nlayer=5, activation = tf.sigmoid)

    train(MNIST(), file_name="models/mnist_resnet_2_tanh", nlayer=2, activation = tf.tanh)
    train(MNIST(), file_name="models/mnist_resnet_3_tanh", nlayer=3, activation = tf.tanh)
    train(MNIST(), file_name="models/mnist_resnet_4_tanh", nlayer=4, activation = tf.tanh)
    train(MNIST(), file_name="models/mnist_resnet_5_tanh", nlayer=5, activation = tf.tanh)

    train(MNIST(), file_name="models/mnist_resnet_2_atan", nlayer=2, activation = tf.atan)
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    if file_name != None:
        model.save(file_name)

    return model


if not os.path.isdir('models'):
    os.makedirs('models')

if __name__ == '__main__':
    train(MNIST(),
          "models/mnist_cnn_lenet_nopool", [6, 16, 100],
          num_epochs=10,
          pool=False)
    train(MNIST(), "models/mnist_cnn_lenet", [6, 16, 100], num_epochs=10)