示例#1
0
def test_ead():
    """UT for ensemble adversarial defense."""
    num_classes = 10
    batch_size = 64

    sparse = False
    context.set_context(mode=context.GRAPH_MODE)
    context.set_context(device_target='Ascend')

    # create test data
    inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
    labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
    if not sparse:
        labels = np.eye(num_classes)[labels].astype(np.float32)

    net = Net()
    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse)
    optimizer = Momentum(net.trainable_params(), 0.001, 0.9)

    net = Net()
    fgsm = FastGradientSignMethod(net, loss_fn=loss_fn)
    pgd = ProjectedGradientDescent(net, loss_fn=loss_fn)
    ead = EnsembleAdversarialDefense(net, [fgsm, pgd],
                                     loss_fn=loss_fn,
                                     optimizer=optimizer)
    LOGGER.set_level(logging.DEBUG)
    LOGGER.debug(TAG, '---start ensemble adversarial defense--')
    loss = ead.defense(inputs, labels)
    LOGGER.debug(TAG, '---end ensemble adversarial defense--')
    assert np.any(loss >= 0.0)
示例#2
0
def test_pad():
    """UT for projected adversarial defense."""
    num_classes = 10
    batch_size = 32

    sparse = False
    context.set_context(mode=context.GRAPH_MODE)
    context.set_context(device_target='Ascend')

    # create test data
    inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
    labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
    if not sparse:
        labels = np.eye(num_classes)[labels].astype(np.float32)

    # construct network
    net = Net()
    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse)
    optimizer = Momentum(net.trainable_params(), 0.001, 0.9)

    # defense
    pad = ProjectedAdversarialDefense(net,
                                      loss_fn=loss_fn,
                                      optimizer=optimizer)
    LOGGER.set_level(logging.DEBUG)
    LOGGER.debug(TAG, '---start projected adversarial defense--')
    loss = pad.defense(inputs, labels)
    LOGGER.debug(TAG, '---end projected adversarial defense--')
    assert np.any(loss >= 0.0)
示例#3
0
def test_dp_model_with_graph_mode():
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
    norm_bound = 1.0
    initial_noise_multiplier = 0.01
    network = Net()
    batch_size = 32
    batches = 128
    epochs = 1
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
    noise_mech = NoiseMechanismsFactory().create(
        'Gaussian',
        norm_bound=norm_bound,
        initial_noise_multiplier=initial_noise_multiplier)
    clip_mech = ClipMechanismsFactory().create('Gaussian',
                                               decay_policy='Linear',
                                               learning_rate=0.01,
                                               target_unclipped_quantile=0.9,
                                               fraction_stddev=0.01)
    net_opt = nn.Momentum(network.trainable_params(),
                          learning_rate=0.1,
                          momentum=0.9)
    model = DPModel(micro_batches=2,
                    clip_mech=clip_mech,
                    norm_bound=norm_bound,
                    noise_mech=noise_mech,
                    network=network,
                    loss_fn=loss,
                    optimizer=net_opt,
                    metrics=None)
    ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches),
                                ['data', 'label'])
    model.train(epochs, ms_ds, dataset_sink_mode=False)
示例#4
0
def test_ad():
    """UT for adversarial defense."""
    num_classes = 10
    batch_size = 32

    sparse = False
    context.set_context(mode=context.GRAPH_MODE)
    context.set_context(device_target='Ascend')

    # create test data
    inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
    labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
    if not sparse:
        labels = np.eye(num_classes)[labels].astype(np.float32)

    net = Net()
    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse)
    optimizer = Momentum(learning_rate=Tensor(np.array([0.001], np.float32)),
                         momentum=0.9,
                         params=net.trainable_params())

    ad_defense = AdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer)
    LOGGER.set_level(logging.DEBUG)
    LOGGER.debug(TAG, '--start adversarial defense--')
    loss = ad_defense.defense(inputs, labels)
    LOGGER.debug(TAG, '--end adversarial defense--')
    assert np.any(loss >= 0.0)
示例#5
0
def test_get_membership_inference_object():
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
    opt = nn.Momentum(params=net.trainable_params(),
                      learning_rate=0.1,
                      momentum=0.9)
    model = Model(network=net, loss_fn=loss, optimizer=opt)
    inference_model = MembershipInference(model, -1)
    assert isinstance(inference_model, MembershipInference)
示例#6
0
def test_optimizer_cpu():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
    network = Net()
    lr = 0.01
    momentum = 0.9
    micro_batches = 2
    loss = nn.SoftmaxCrossEntropyWithLogits()
    factory = DPOptimizerClassFactory(micro_batches)
    factory.set_mechanisms('Gaussian', norm_bound=1.5, initial_noise_multiplier=5.0)
    net_opt = factory.create('SGD')(params=network.trainable_params(), learning_rate=lr,
                                    momentum=momentum)
    _ = Model(network, loss_fn=loss, optimizer=net_opt, metrics=None)
示例#7
0
def test_membership_inference_eval():
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
    opt = nn.Momentum(params=net.trainable_params(),
                      learning_rate=0.1,
                      momentum=0.9)
    model = Model(network=net, loss_fn=loss, optimizer=opt)
    inference_model = MembershipInference(model, -1)
    assert isinstance(inference_model, MembershipInference)

    batch_size = 16
    batches = 1
    eval_train = ds.GeneratorDataset(dataset_generator(batch_size, batches),
                                     ["image", "label"])
    eval_test = ds.GeneratorDataset(dataset_generator(batch_size, batches),
                                    ["image", "label"])

    metrics = ["precision", "accuracy", "recall"]
    inference_model.eval(eval_train, eval_test, metrics)
示例#8
0
def test_dp_monitor_gpu():
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
    batch_size = 16
    batches = 128
    epochs = 1
    rdp = PrivacyMonitorFactory.create(policy='rdp',
                                       num_samples=60000,
                                       batch_size=batch_size,
                                       initial_noise_multiplier=0.4,
                                       noise_decay_rate=6e-3)
    suggest_epoch = rdp.max_epoch_suggest()
    LOGGER.info(TAG, 'The recommended maximum training epochs is: %s',
                suggest_epoch)
    network = Net()
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)

    model = Model(network, net_loss, net_opt)

    LOGGER.info(TAG, "============== Starting Training ==============")
    ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches),
                              ["data", "label"])
    model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)
示例#9
0
def test_membership_inference_object_train():
    net = Net()
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
    opt = nn.Momentum(params=net.trainable_params(),
                      learning_rate=0.1,
                      momentum=0.9)
    model = Model(network=net, loss_fn=loss, optimizer=opt)
    inference_model = MembershipInference(model, -1)
    assert isinstance(inference_model, MembershipInference)

    config = [{
        "method": "KNN",
        "params": {
            "n_neighbors": [3, 5, 7],
        }
    }]
    batch_size = 16
    batches = 1
    ds_train = ds.GeneratorDataset(dataset_generator(batch_size, batches),
                                   ["image", "label"])
    ds_test = ds.GeneratorDataset(dataset_generator(batch_size, batches),
                                  ["image", "label"])
    inference_model.train(ds_train, ds_test, config)