def test_lenet_mnist_coverage_ascend():
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
    # load network
    net = Net()
    model = Model(net)

    # initialize fuzz test with training dataset
    training_data = (np.random.random((10000, 10))*20).astype(np.float32)
    model_fuzz_test = ModelCoverageMetrics(model, 10, 1000, training_data)

    # fuzz test with original test data
    # get test data
    test_data = (np.random.random((2000, 10))*20).astype(np.float32)
    test_labels = np.random.randint(0, 10, 2000)
    test_labels = (np.eye(10)[test_labels]).astype(np.float32)
    model_fuzz_test.calculate_coverage(test_data)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())

    # generate adv_data
    attack = FastGradientSignMethod(net, eps=0.3)
    adv_data = attack.batch_generate(test_data, test_labels, batch_size=32)
    model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())
def test_lenet_mnist_coverage_cpu():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
    # load network
    net = Net()
    model = Model(net)

    # initialize fuzz test with training dataset
    neuron_num = 10
    segmented_num = 1000
    training_data = (np.random.random((10000, 10)) * 20).astype(np.float32)
    model_fuzz_test = ModelCoverageMetrics(model, neuron_num, segmented_num,
                                           training_data)

    # fuzz test with original test data
    # get test data
    test_data = (np.random.random((2000, 10)) * 20).astype(np.float32)
    test_labels = np.random.randint(0, 10, 2000).astype(np.int32)
    model_fuzz_test.calculate_coverage(test_data)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())

    # generate adv_data
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss)
    adv_data = attack.batch_generate(test_data, test_labels, batch_size=32)
    model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())
示例#3
0
def test_ead():
    """UT for ensemble adversarial defense."""
    num_classes = 10
    batch_size = 64

    sparse = False
    context.set_context(mode=context.GRAPH_MODE)
    context.set_context(device_target='Ascend')

    # create test data
    inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
    labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
    if not sparse:
        labels = np.eye(num_classes)[labels].astype(np.float32)

    net = Net()
    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse)
    optimizer = Momentum(net.trainable_params(), 0.001, 0.9)

    net = Net()
    fgsm = FastGradientSignMethod(net, loss_fn=loss_fn)
    pgd = ProjectedGradientDescent(net, loss_fn=loss_fn)
    ead = EnsembleAdversarialDefense(net, [fgsm, pgd],
                                     loss_fn=loss_fn,
                                     optimizer=optimizer)
    LOGGER.set_level(logging.DEBUG)
    LOGGER.debug(TAG, '---start ensemble adversarial defense--')
    loss = ead.defense(inputs, labels)
    LOGGER.debug(TAG, '---end ensemble adversarial defense--')
    assert np.any(loss >= 0.0)
def test_fast_gradient_sign_method():
    """
    Fast gradient sign method unit test.
    """
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
    input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
    label = np.asarray([2], np.int32)
    label = np.eye(3)[label].astype(np.float32)

    attack = FastGradientSignMethod(Net())
    ms_adv_x = attack.generate(input_np, label)

    assert np.any(ms_adv_x != input_np), 'Fast gradient sign method: generate' \
                                         ' value must not be equal to' \
                                         ' original value.'
def test_lenet_mnist_coverage():
    # upload trained network
    ckpt_path = '../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)
    model = Model(net)

    # get training data
    data_list = "../common/dataset/MNIST/train"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=True)
    train_images = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        train_images.append(images)
    train_images = np.concatenate(train_images, axis=0)

    # initialize fuzz test with training dataset
    model_fuzz_test = ModelCoverageMetrics(model, 10, 1000, train_images)

    # fuzz test with original test data
    # get test data
    data_list = "../common/dataset/MNIST/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size, sparse=True)
    test_images = []
    test_labels = []
    for data in ds.create_tuple_iterator(output_numpy=True):
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
    test_images = np.concatenate(test_images, axis=0)
    test_labels = np.concatenate(test_labels, axis=0)
    model_fuzz_test.calculate_coverage(test_images)
    LOGGER.info(TAG, 'KMNC of this test is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this test is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac())

    # generate adv_data
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss)
    adv_data = attack.batch_generate(test_images, test_labels, batch_size=32)
    model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5)
    LOGGER.info(TAG, 'KMNC of this adv data is : %s', model_fuzz_test.get_kmnc())
    LOGGER.info(TAG, 'NBC of this adv data is : %s', model_fuzz_test.get_nbc())
    LOGGER.info(TAG, 'SNAC of this adv data is : %s', model_fuzz_test.get_snac())
def test_fast_gradient_sign_method():
    """
    FGSM-Attack test
    """
    context.set_context(mode=context.GRAPH_MODE)
    # get network
    net = resnet50_cifar10(10)

    # create test data
    test_images = np.random.rand(64, 3, 224, 224).astype(np.float32)
    test_labels = np.random.randint(10, size=64).astype(np.int32)
    # attacking
    loss_fn = CrossEntropyLoss()
    attack = FastGradientSignMethod(net, eps=0.1, loss_fn=loss_fn)
    adv_data = attack.batch_generate(test_images, test_labels, batch_size=32)
    assert np.any(adv_data != test_images)
示例#7
0
def test_nad_method():
    """
    NAD-Defense test.
    """
    mnist_path = "../../common/dataset/MNIST"
    batch_size = 32
    # 1. train original model
    ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"),
                                      batch_size=batch_size,
                                      repeat_size=1)
    net = LeNet5()
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    opt = nn.Momentum(net.trainable_params(), 0.01, 0.09)
    model = Model(net, loss, opt, metrics=None)
    model.train(10,
                ds_train,
                callbacks=[LossMonitor()],
                dataset_sink_mode=False)

    # 2. get test data
    ds_test = generate_mnist_dataset(os.path.join(mnist_path, "test"),
                                     batch_size=batch_size,
                                     repeat_size=1)
    inputs = []
    labels = []
    for data in ds_test.create_tuple_iterator():
        inputs.append(data[0].asnumpy().astype(np.float32))
        labels.append(data[1].asnumpy())
    inputs = np.concatenate(inputs)
    labels = np.concatenate(labels)

    # 3. get accuracy of test data on original model
    net.set_train(False)
    acc_list = []
    batchs = inputs.shape[0] // batch_size
    for i in range(batchs):
        batch_inputs = inputs[i * batch_size:(i + 1) * batch_size]
        batch_labels = labels[i * batch_size:(i + 1) * batch_size]
        logits = net(Tensor(batch_inputs)).asnumpy()
        label_pred = np.argmax(logits, axis=1)
        acc_list.append(np.mean(batch_labels == label_pred))

    LOGGER.info(TAG, 'accuracy of TEST data on original model is : %s',
                np.mean(acc_list))

    # 4. get adv of test data
    attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss)
    adv_data = attack.batch_generate(inputs, labels)
    LOGGER.info(TAG, 'adv_data.shape is : %s', adv_data.shape)

    # 5. get accuracy of adv data on original model
    acc_list = []
    batchs = adv_data.shape[0] // batch_size
    for i in range(batchs):
        batch_inputs = adv_data[i * batch_size:(i + 1) * batch_size]
        batch_labels = labels[i * batch_size:(i + 1) * batch_size]
        logits = net(Tensor(batch_inputs)).asnumpy()
        label_pred = np.argmax(logits, axis=1)
        acc_list.append(np.mean(batch_labels == label_pred))

    LOGGER.info(TAG, 'accuracy of adv data on original model is : %s',
                np.mean(acc_list))

    # 6. defense
    ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"),
                                      batch_size=batch_size,
                                      repeat_size=1)
    inputs_train = []
    labels_train = []
    for data in ds_train.create_tuple_iterator():
        inputs_train.append(data[0].asnumpy().astype(np.float32))
        labels_train.append(data[1].asnumpy())
    inputs_train = np.concatenate(inputs_train)
    labels_train = np.concatenate(labels_train)
    net.set_train()
    nad = NaturalAdversarialDefense(net,
                                    loss_fn=loss,
                                    optimizer=opt,
                                    bounds=(0.0, 1.0),
                                    eps=0.3)
    nad.batch_defense(inputs_train, labels_train, batch_size=32, epochs=10)

    # 7. get accuracy of test data on defensed model
    net.set_train(False)
    acc_list = []
    batchs = inputs.shape[0] // batch_size
    for i in range(batchs):
        batch_inputs = inputs[i * batch_size:(i + 1) * batch_size]
        batch_labels = labels[i * batch_size:(i + 1) * batch_size]
        logits = net(Tensor(batch_inputs)).asnumpy()
        label_pred = np.argmax(logits, axis=1)
        acc_list.append(np.mean(batch_labels == label_pred))

    LOGGER.info(TAG, 'accuracy of TEST data on defensed model is : %s',
                np.mean(acc_list))

    # 8. get accuracy of adv data on defensed model
    acc_list = []
    batchs = adv_data.shape[0] // batch_size
    for i in range(batchs):
        batch_inputs = adv_data[i * batch_size:(i + 1) * batch_size]
        batch_labels = labels[i * batch_size:(i + 1) * batch_size]
        logits = net(Tensor(batch_inputs)).asnumpy()
        label_pred = np.argmax(logits, axis=1)
        acc_list.append(np.mean(batch_labels == label_pred))

    LOGGER.info(TAG, 'accuracy of adv data on defensed model is : %s',
                np.mean(acc_list))
def test_fast_gradient_sign_method():
    """
    FGSM-Attack test for CPU device.
    """
    # upload trained network
    ckpt_path = '../../../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)

    # get test data
    data_list = "../../../common/dataset/MNIST/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size)

    # prediction accuracy before attack
    model = Model(net)
    batch_num = 3  # the number of batches of attacking samples
    test_images = []
    test_labels = []
    predict_labels = []
    i = 0
    for data in ds.create_tuple_iterator(output_numpy=True):
        i += 1
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
        pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
                                axis=1)
        predict_labels.append(pred_labels)
        if i >= batch_num:
            break
    predict_labels = np.concatenate(predict_labels)
    true_labels = np.concatenate(test_labels)
    accuracy = np.mean(np.equal(predict_labels, true_labels))
    LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

    # attacking
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss)
    start_time = time.clock()
    adv_data = attack.batch_generate(np.concatenate(test_images),
                                     true_labels, batch_size=32)
    stop_time = time.clock()
    np.save('./adv_data', adv_data)
    pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
    # rescale predict confidences into (0, 1).
    pred_logits_adv = softmax(pred_logits_adv, axis=1)
    pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
    accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
    LOGGER.info(TAG, "prediction accuracy after attacking is : %s", accuracy_adv)
    attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
                                     np.eye(10)[true_labels],
                                     adv_data.transpose(0, 2, 3, 1),
                                     pred_logits_adv)
    LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
                attack_evaluate.mis_classification_rate())
    LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
                attack_evaluate.avg_conf_adv_class())
    LOGGER.info(TAG, 'The average confidence of true class is : %s',
                attack_evaluate.avg_conf_true_class())
    LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
                     'samples and adversarial samples are: %s',
                attack_evaluate.avg_lp_distance())
    LOGGER.info(TAG, 'The average structural similarity between original '
                     'samples and adversarial samples are: %s',
                attack_evaluate.avg_ssim())
    LOGGER.info(TAG, 'The average costing time is %s',
                (stop_time - start_time)/(batch_num*batch_size))
示例#9
0
def test_defense_evaluation():
    # load trained network
    current_dir = os.path.dirname(os.path.abspath(__file__))
    ckpt_path = os.path.abspath(
        os.path.join(
            current_dir,
            '../../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
        ))
    wb_net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(wb_net, load_dict)

    # get test data
    data_list = "../../common/dataset/MNIST/test"
    batch_size = 32
    ds_test = generate_mnist_dataset(data_list, batch_size=batch_size)
    inputs = []
    labels = []
    for data in ds_test.create_tuple_iterator(output_numpy=True):
        inputs.append(data[0].astype(np.float32))
        labels.append(data[1])
    inputs = np.concatenate(inputs).astype(np.float32)
    labels = np.concatenate(labels).astype(np.int32)

    target_label = np.random.randint(0, 10, size=labels.shape[0])
    for idx in range(labels.shape[0]):
        while target_label[idx] == labels[idx]:
            target_label[idx] = np.random.randint(0, 10)
    target_label = np.eye(10)[target_label].astype(np.float32)

    attacked_size = 50
    benign_size = 500

    attacked_sample = inputs[:attacked_size]
    attacked_true_label = labels[:attacked_size]
    benign_sample = inputs[attacked_size:attacked_size + benign_size]

    wb_model = ModelToBeAttacked(wb_net)

    # gen white-box adversarial examples of test data
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    wb_attack = FastGradientSignMethod(wb_net, eps=0.3, loss_fn=loss)
    wb_adv_sample = wb_attack.generate(attacked_sample, attacked_true_label)

    wb_raw_preds = softmax(wb_model.predict(wb_adv_sample), axis=1)
    accuracy_test = np.mean(
        np.equal(np.argmax(wb_model.predict(attacked_sample), axis=1),
                 attacked_true_label))
    LOGGER.info(TAG, "prediction accuracy before white-box attack is : %s",
                accuracy_test)
    accuracy_adv = np.mean(
        np.equal(np.argmax(wb_raw_preds, axis=1), attacked_true_label))
    LOGGER.info(TAG, "prediction accuracy after white-box attack is : %s",
                accuracy_adv)

    # improve the robustness of model with white-box adversarial examples
    opt = nn.Momentum(wb_net.trainable_params(), 0.01, 0.09)

    nad = NaturalAdversarialDefense(wb_net,
                                    loss_fn=loss,
                                    optimizer=opt,
                                    bounds=(0.0, 1.0),
                                    eps=0.3)
    wb_net.set_train(False)
    nad.batch_defense(inputs[:5000], labels[:5000], batch_size=32, epochs=10)

    wb_def_preds = wb_net(Tensor(wb_adv_sample)).asnumpy()
    wb_def_preds = softmax(wb_def_preds, axis=1)
    accuracy_def = np.mean(
        np.equal(np.argmax(wb_def_preds, axis=1), attacked_true_label))
    LOGGER.info(TAG, "prediction accuracy after defense is : %s", accuracy_def)

    # calculate defense evaluation metrics for defense against white-box attack
    wb_def_evaluate = DefenseEvaluate(wb_raw_preds, wb_def_preds,
                                      attacked_true_label)
    LOGGER.info(TAG, 'defense evaluation for white-box adversarial attack')
    LOGGER.info(
        TAG, 'classification accuracy variance (CAV) is : {:.2f}'.format(
            wb_def_evaluate.cav()))
    LOGGER.info(
        TAG, 'classification rectify ratio (CRR) is : {:.2f}'.format(
            wb_def_evaluate.crr()))
    LOGGER.info(
        TAG, 'classification sacrifice ratio (CSR) is : {:.2f}'.format(
            wb_def_evaluate.csr()))
    LOGGER.info(
        TAG, 'classification confidence variance (CCV) is : {:.2f}'.format(
            wb_def_evaluate.ccv()))
    LOGGER.info(
        TAG, 'classification output stability is : {:.2f}'.format(
            wb_def_evaluate.cos()))

    # calculate defense evaluation metrics for defense against black-box attack
    LOGGER.info(TAG, 'defense evaluation for black-box adversarial attack')
    bb_raw_preds = []
    bb_def_preds = []
    raw_query_counts = []
    raw_query_time = []
    def_query_counts = []
    def_query_time = []
    def_detection_counts = []

    # gen black-box adversarial examples of test data
    bb_net = LeNet5()
    load_param_into_net(bb_net, load_dict)
    bb_model = ModelToBeAttacked(bb_net, defense=False)
    attack_rm = GeneticAttack(model=bb_model,
                              pop_size=6,
                              mutation_rate=0.05,
                              per_bounds=0.5,
                              step_size=0.25,
                              temp=0.1,
                              sparse=False)
    attack_target_label = target_label[:attacked_size]
    true_label = labels[:attacked_size + benign_size]
    # evaluate robustness of original model
    # gen black-box adversarial examples of test data
    for idx in range(attacked_size):
        raw_st = time.time()
        _, raw_a, raw_qc = attack_rm.generate(
            np.expand_dims(attacked_sample[idx], axis=0),
            np.expand_dims(attack_target_label[idx], axis=0))
        raw_t = time.time() - raw_st
        bb_raw_preds.extend(softmax(bb_model.predict(raw_a), axis=1))
        raw_query_counts.extend(raw_qc)
        raw_query_time.append(raw_t)

    for idx in range(benign_size):
        raw_st = time.time()
        bb_raw_pred = softmax(bb_model.predict(
            np.expand_dims(benign_sample[idx], axis=0)),
                              axis=1)
        raw_t = time.time() - raw_st
        bb_raw_preds.extend(bb_raw_pred)
        raw_query_counts.extend([0])
        raw_query_time.append(raw_t)

    accuracy_test = np.mean(
        np.equal(np.argmax(bb_raw_preds[0:len(attack_target_label)], axis=1),
                 np.argmax(attack_target_label, axis=1)))
    LOGGER.info(TAG, "attack success before adv defense is : %s",
                accuracy_test)

    # improve the robustness of model with similarity-based detector
    bb_def_model = ModelToBeAttacked(bb_net,
                                     defense=True,
                                     train_images=inputs[0:6000])
    # attack defensed model
    attack_dm = GeneticAttack(model=bb_def_model,
                              pop_size=6,
                              mutation_rate=0.05,
                              per_bounds=0.5,
                              step_size=0.25,
                              temp=0.1,
                              sparse=False)
    for idx in range(attacked_size):
        def_st = time.time()
        _, def_a, def_qc = attack_dm.generate(
            np.expand_dims(attacked_sample[idx], axis=0),
            np.expand_dims(attack_target_label[idx], axis=0))
        def_t = time.time() - def_st
        det_res = bb_def_model.get_detected_result()
        def_detection_counts.append(np.sum(det_res[-def_qc[0]:]))
        bb_def_preds.extend(softmax(bb_def_model.predict(def_a), axis=1))
        def_query_counts.extend(def_qc)
        def_query_time.append(def_t)

    for idx in range(benign_size):
        def_st = time.time()
        bb_def_pred = softmax(bb_def_model.predict(
            np.expand_dims(benign_sample[idx], axis=0)),
                              axis=1)
        def_t = time.time() - def_st
        det_res = bb_def_model.get_detected_result()
        def_detection_counts.append(np.sum(det_res[-1]))
        bb_def_preds.extend(bb_def_pred)
        def_query_counts.extend([0])
        def_query_time.append(def_t)

    accuracy_adv = np.mean(
        np.equal(np.argmax(bb_def_preds[0:len(attack_target_label)], axis=1),
                 np.argmax(attack_target_label, axis=1)))
    LOGGER.info(TAG, "attack success rate after adv defense is : %s",
                accuracy_adv)

    bb_raw_preds = np.array(bb_raw_preds).astype(np.float32)
    bb_def_preds = np.array(bb_def_preds).astype(np.float32)
    # check evaluate data
    max_queries = 6000

    def_evaluate = BlackDefenseEvaluate(bb_raw_preds, bb_def_preds,
                                        np.array(raw_query_counts),
                                        np.array(def_query_counts),
                                        np.array(raw_query_time),
                                        np.array(def_query_time),
                                        np.array(def_detection_counts),
                                        true_label, max_queries)

    LOGGER.info(
        TAG, 'query count variance of adversaries is : {:.2f}'.format(
            def_evaluate.qcv()))
    LOGGER.info(
        TAG, 'attack success rate variance of adversaries '
        'is : {:.2f}'.format(def_evaluate.asv()))
    LOGGER.info(
        TAG, 'false positive rate (FPR) of the query-based detector '
        'is : {:.2f}'.format(def_evaluate.fpr()))
    LOGGER.info(
        TAG, 'the benign query response time variance (QRV) '
        'is : {:.2f}'.format(def_evaluate.qrv()))