コード例 #1
0
def test_projected_gradient_descent_method():
    """
    Projected gradient descent method unit test.
    """
    input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
    label = np.asarray([2], np.int32)
    label = np.eye(3)[label].astype(np.float32)

    for i in range(5):
        attack = ProjectedGradientDescent(Net(), nb_iter=i + 1)
        ms_adv_x = attack.generate(input_np, label)

        assert np.any(
            ms_adv_x != input_np), 'Projected gradient descent method: ' \
                                   'generate value must not be equal to' \
                                   ' original value.'
コード例 #2
0
def test_ead():
    """UT for ensemble adversarial defense."""
    num_classes = 10
    batch_size = 64

    sparse = False
    context.set_context(mode=context.GRAPH_MODE)
    context.set_context(device_target='Ascend')

    # create test data
    inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
    labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
    if not sparse:
        labels = np.eye(num_classes)[labels].astype(np.float32)

    net = Net()
    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse)
    optimizer = Momentum(net.trainable_params(), 0.001, 0.9)

    net = Net()
    fgsm = FastGradientSignMethod(net, loss_fn=loss_fn)
    pgd = ProjectedGradientDescent(net, loss_fn=loss_fn)
    ead = EnsembleAdversarialDefense(net, [fgsm, pgd],
                                     loss_fn=loss_fn,
                                     optimizer=optimizer)
    LOGGER.set_level(logging.DEBUG)
    LOGGER.debug(TAG, '---start ensemble adversarial defense--')
    loss = ead.defense(inputs, labels)
    LOGGER.debug(TAG, '---end ensemble adversarial defense--')
    assert np.any(loss >= 0.0)
コード例 #3
0
            print("coco_root not exits.")

    print('Start generate adversarial samples.')

    # build network and dataset
    ds = create_fasterrcnn_dataset(mindrecord_file, batch_size=config.test_batch_size, \
                                    repeat_num=1, is_training=True)
    net = Faster_Rcnn_Resnet50(config)
    param_dict = load_checkpoint(pre_trained)
    load_param_into_net(net, param_dict)
    net = net.set_train()

    # build attacker
    with_loss_cell = WithLossCell(net, LossNet())
    grad_with_loss_net = GradWrapWithLoss(with_loss_cell)
    attack = ProjectedGradientDescent(grad_with_loss_net, bounds=None, eps=0.1)

    # generate adversarial samples
    num = args.num
    num_batches = num // config.test_batch_size
    channel = 3
    adv_samples = [0] * (num_batches * config.test_batch_size)
    adv_id = 0
    for data in ds.create_dict_iterator(num_epochs=num_batches):
        img_data = data['image']
        img_metas = data['image_shape']
        gt_bboxes = data['box']
        gt_labels = data['label']
        gt_num = data['valid_num']

        adv_img = attack.generate((img_data.asnumpy(), \
コード例 #4
0
def test_projected_gradient_descent_method():
    """
    PGD-Attack test for CPU device.
    """
    # upload trained network
    ckpt_path = '../../../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
    net = LeNet5()
    load_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, load_dict)

    # get test data
    data_list = "../../../common/dataset/MNIST/test"
    batch_size = 32
    ds = generate_mnist_dataset(data_list, batch_size)

    # prediction accuracy before attack
    model = Model(net)
    batch_num = 32  # the number of batches of attacking samples
    test_images = []
    test_labels = []
    predict_labels = []
    i = 0
    for data in ds.create_tuple_iterator(output_numpy=True):
        i += 1
        images = data[0].astype(np.float32)
        labels = data[1]
        test_images.append(images)
        test_labels.append(labels)
        pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
                                axis=1)
        predict_labels.append(pred_labels)
        if i >= batch_num:
            break
    predict_labels = np.concatenate(predict_labels)
    true_labels = np.concatenate(test_labels)
    accuracy = np.mean(np.equal(predict_labels, true_labels))
    LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

    # attacking
    loss = SoftmaxCrossEntropyWithLogits(sparse=True)
    attack = ProjectedGradientDescent(net, eps=0.3, loss_fn=loss)
    start_time = time.process_time()
    adv_data = attack.batch_generate(np.concatenate(test_images),
                                     true_labels,
                                     batch_size=32)
    stop_time = time.process_time()
    np.save('./adv_data', adv_data)
    pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
    # rescale predict confidences into (0, 1).
    pred_logits_adv = softmax(pred_logits_adv, axis=1)
    pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
    accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
    LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
                accuracy_adv)
    attack_evaluate = AttackEvaluate(
        np.concatenate(test_images).transpose(0, 2, 3, 1),
        np.eye(10)[true_labels], adv_data.transpose(0, 2, 3, 1),
        pred_logits_adv)
    LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
                attack_evaluate.mis_classification_rate())
    LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
                attack_evaluate.avg_conf_adv_class())
    LOGGER.info(TAG, 'The average confidence of true class is : %s',
                attack_evaluate.avg_conf_true_class())
    LOGGER.info(
        TAG, 'The average distance (l0, l2, linf) between original '
        'samples and adversarial samples are: %s',
        attack_evaluate.avg_lp_distance())
    LOGGER.info(
        TAG, 'The average structural similarity between original '
        'samples and adversarial samples are: %s', attack_evaluate.avg_ssim())
    LOGGER.info(TAG, 'The average costing time is %s',
                (stop_time - start_time) / (batch_num * batch_size))