def test_batch_generate_attack(): """ Attack with batch-generate. """ input_np = np.random.random((128, 10)).astype(np.float32) label = np.random.randint(0, 10, 128).astype(np.int32) label = np.eye(10)[label].astype(np.float32) attack = FastGradientMethod(Net(), loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) ms_adv_x = attack.batch_generate(input_np, label, batch_size=32) assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \ ' must not be equal to original value.'
def test_batch_generate(): """ Fast gradient method unit test. """ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") input_np = np.random.random([10, 3]).astype(np.float32) label = np.random.randint(0, 3, [10]) label = np.eye(3)[label].astype(np.float32) loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) attack = FastGradientMethod(Net(), loss_fn=loss_fn) ms_adv_x = attack.batch_generate(input_np, label, 4) assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \ ' must not be equal to original value.'
def test_batch_generate_attack_multi_inputs(): """ Attack with batch-generate by multi-inputs. """ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") inputs1 = np.random.random((128, 10)).astype(np.float32) inputs2 = np.random.random((128, 10)).astype(np.float32) labels1 = np.random.randint(0, 10, 128).astype(np.int32) labels2 = np.random.randint(0, 10, 128).astype(np.int32) labels1 = np.eye(10)[labels1].astype(np.float32) labels2 = np.eye(10)[labels2].astype(np.float32) with_loss_cell = WithLossCell(Net2(), LossNet()) grad_with_loss_net = GradWrapWithLoss(with_loss_cell) attack = FastGradientMethod(grad_with_loss_net) ms_adv_x = attack.batch_generate((inputs1, inputs2), (labels1, labels2), batch_size=32) assert np.any(ms_adv_x != inputs1), 'Fast gradient method: generate value' \ ' must not be equal to original value.'