def test_ad(): """UT for adversarial defense.""" num_classes = 10 batch_size = 32 sparse = False context.set_context(mode=context.GRAPH_MODE) context.set_context(device_target='Ascend') # create test data inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) if not sparse: labels = np.eye(num_classes)[labels].astype(np.float32) net = Net() loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(learning_rate=Tensor(np.array([0.001], np.float32)), momentum=0.9, params=net.trainable_params()) ad_defense = AdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) LOGGER.set_level(logging.DEBUG) LOGGER.debug(TAG, '--start adversarial defense--') loss = ad_defense.defense(inputs, labels) LOGGER.debug(TAG, '--end adversarial defense--') assert np.any(loss >= 0.0)
def test_pad(): """UT for projected adversarial defense.""" num_classes = 10 batch_size = 32 sparse = False context.set_context(mode=context.GRAPH_MODE) context.set_context(device_target='Ascend') # create test data inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) if not sparse: labels = np.eye(num_classes)[labels].astype(np.float32) # construct network net = Net() loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(net.trainable_params(), 0.001, 0.9) # defense pad = ProjectedAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) LOGGER.set_level(logging.DEBUG) LOGGER.debug(TAG, '---start projected adversarial defense--') loss = pad.defense(inputs, labels) LOGGER.debug(TAG, '---end projected adversarial defense--') assert np.any(loss >= 0.0)
def test_ead(): """UT for ensemble adversarial defense.""" num_classes = 10 batch_size = 64 sparse = False context.set_context(mode=context.GRAPH_MODE) context.set_context(device_target='Ascend') # create test data inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) if not sparse: labels = np.eye(num_classes)[labels].astype(np.float32) net = Net() loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(net.trainable_params(), 0.001, 0.9) net = Net() fgsm = FastGradientSignMethod(net, loss_fn=loss_fn) pgd = ProjectedGradientDescent(net, loss_fn=loss_fn) ead = EnsembleAdversarialDefense(net, [fgsm, pgd], loss_fn=loss_fn, optimizer=optimizer) LOGGER.set_level(logging.DEBUG) LOGGER.debug(TAG, '---start ensemble adversarial defense--') loss = ead.defense(inputs, labels) LOGGER.debug(TAG, '---end ensemble adversarial defense--') assert np.any(loss >= 0.0)
def test_dp_model_with_graph_mode(): context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") norm_bound = 1.0 initial_noise_multiplier = 0.01 network = Net() batch_size = 32 batches = 128 epochs = 1 loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) noise_mech = NoiseMechanismsFactory().create( 'Gaussian', norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier) clip_mech = ClipMechanismsFactory().create('Gaussian', decay_policy='Linear', learning_rate=0.01, target_unclipped_quantile=0.9, fraction_stddev=0.01) net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1, momentum=0.9) model = DPModel(micro_batches=2, clip_mech=clip_mech, norm_bound=norm_bound, noise_mech=noise_mech, network=network, loss_fn=loss, optimizer=net_opt, metrics=None) ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches), ['data', 'label']) model.train(epochs, ms_ds, dataset_sink_mode=False)
def test_get_membership_inference_object(): net = Net() loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) model = Model(network=net, loss_fn=loss, optimizer=opt) inference_model = MembershipInference(model, -1) assert isinstance(inference_model, MembershipInference)
def get_model(current_dir): ckpt_path = os.path.join( current_dir, '../../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') net = Net() load_dict = load_checkpoint(ckpt_path) load_param_into_net(net, load_dict) net.set_train(False) model = ModelToBeAttacked(net) return model
def get_model(): # upload trained network current_dir = os.path.dirname(os.path.abspath(__file__)) ckpt_path = os.path.join( current_dir, '../../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') net = Net() load_dict = load_checkpoint(ckpt_path) load_param_into_net(net, load_dict) net.set_train(False) model = ModelToBeAttacked(net) return model
def test_optimizer_cpu(): context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU") network = Net() lr = 0.01 momentum = 0.9 micro_batches = 2 loss = nn.SoftmaxCrossEntropyWithLogits() factory = DPOptimizerClassFactory(micro_batches) factory.set_mechanisms('Gaussian', norm_bound=1.5, initial_noise_multiplier=5.0) net_opt = factory.create('SGD')(params=network.trainable_params(), learning_rate=lr, momentum=momentum) _ = Model(network, loss_fn=loss, optimizer=net_opt, metrics=None)
def test_lbfgs_attack(): """ LBFGS-Attack test """ np.random.seed(123) # upload trained network current_dir = os.path.dirname(os.path.abspath(__file__)) ckpt_path = os.path.join( current_dir, '../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') net = Net() load_dict = load_checkpoint(ckpt_path) load_param_into_net(net, load_dict) # get one mnist image input_np = np.load( os.path.join(current_dir, '../../dataset/test_images.npy'))[:1] label_np = np.load( os.path.join(current_dir, '../../dataset/test_labels.npy'))[:1] LOGGER.debug(TAG, 'true label is :{}'.format(label_np[0])) classes = 10 target_np = np.random.randint(0, classes, 1) while target_np == label_np[0]: target_np = np.random.randint(0, classes) target_np = np.eye(10)[target_np].astype(np.float32) attack = LBFGS(net, is_targeted=True) LOGGER.debug(TAG, 'target_np is :{}'.format(target_np[0])) _ = attack.generate(input_np, target_np)
def test_pointwise_attack_method(): """ Pointwise attack method unit test. """ np.random.seed(123) # upload trained network current_dir = os.path.dirname(os.path.abspath(__file__)) ckpt_path = os.path.join( current_dir, '../../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt') net = Net() load_dict = load_checkpoint(ckpt_path) load_param_into_net(net, load_dict) # get one mnist image input_np = np.load( os.path.join(current_dir, '../../../dataset/test_images.npy'))[:3] labels = np.load( os.path.join(current_dir, '../../../dataset/test_labels.npy'))[:3] model = ModelToBeAttacked(net) pre_label = np.argmax(model.predict(input_np), axis=1) LOGGER.info(TAG, 'original sample predict labels are :{}'.format(pre_label)) LOGGER.info(TAG, 'true labels are: {}'.format(labels)) attack = PointWiseAttack(model, sparse=True, is_targeted=False) is_adv, adv_data, _ = attack.generate(input_np, pre_label) LOGGER.info( TAG, 'adv sample predict labels are: {}'.format( np.argmax(model.predict(adv_data), axis=1))) assert np.any(adv_data[is_adv][0] != input_np[is_adv][0]), 'Pointwise attack method: ' \ 'generate value must not be equal' \ ' to original value.'
def test_membership_inference_eval(): net = Net() loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) model = Model(network=net, loss_fn=loss, optimizer=opt) inference_model = MembershipInference(model, -1) assert isinstance(inference_model, MembershipInference) batch_size = 16 batches = 1 eval_train = ds.GeneratorDataset(dataset_generator(batch_size, batches), ["image", "label"]) eval_test = ds.GeneratorDataset(dataset_generator(batch_size, batches), ["image", "label"]) metrics = ["precision", "accuracy", "recall"] inference_model.eval(eval_train, eval_test, metrics)
def test_dp_monitor_gpu(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") batch_size = 16 batches = 128 epochs = 1 rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = rdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches), ["data", "label"]) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)
def test_membership_inference_object_train(): net = Net() loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) model = Model(network=net, loss_fn=loss, optimizer=opt) inference_model = MembershipInference(model, -1) assert isinstance(inference_model, MembershipInference) config = [{ "method": "KNN", "params": { "n_neighbors": [3, 5, 7], } }] batch_size = 16 batches = 1 ds_train = ds.GeneratorDataset(dataset_generator(batch_size, batches), ["image", "label"]) ds_test = ds.GeneratorDataset(dataset_generator(batch_size, batches), ["image", "label"]) inference_model.train(ds_train, ds_test, config)