Ejemplo n.º 1
0
def test_dp_model():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
    l2_norm_bound = 1.0
    initial_noise_multiplier = 0.01
    net = LeNet5()
    batch_size = 32
    batches = 128
    epochs = 1
    loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
    optim = SGD(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
    gaussian_mech = DPOptimizerClassFactory()
    gaussian_mech.set_mechanisms(
        'Gaussian',
        norm_bound=l2_norm_bound,
        initial_noise_multiplier=initial_noise_multiplier)
    model = DPModel(micro_batches=2,
                    norm_clip=l2_norm_bound,
                    dp_mech=gaussian_mech.mech,
                    network=net,
                    loss_fn=loss,
                    optimizer=optim,
                    metrics=None)
    ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches),
                                ['data', 'label'])
    ms_ds.set_dataset_size(batch_size * batches)
    model.train(epochs, ms_ds)
Ejemplo n.º 2
0
def test_dp_model_pynative_mode():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
    norm_clip = 1.0
    initial_noise_multiplier = 0.01
    network = LeNet5()
    batch_size = 32
    batches = 128
    epochs = 1
    micro_batches = 2
    loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
    factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches)
    factory_opt.set_mechanisms('Gaussian',
                               norm_bound=norm_clip,
                               initial_noise_multiplier=initial_noise_multiplier)
    net_opt = factory_opt.create('Momentum')(network.trainable_params(), learning_rate=0.1, momentum=0.9)
    model = DPModel(micro_batches=micro_batches,
                    norm_clip=norm_clip,
                    mech=None,
                    network=network,
                    loss_fn=loss,
                    optimizer=net_opt,
                    metrics=None)
    ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches), ['data', 'label'])
    ms_ds.set_dataset_size(batch_size * batches)
    model.train(epochs, ms_ds, dataset_sink_mode=False)
Ejemplo n.º 3
0
def test_optimizer_cpu():
    context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
    network = LeNet5()
    lr = 0.01
    momentum = 0.9
    micro_batches = 2
    loss = nn.SoftmaxCrossEntropyWithLogits()
    gaussian_mech = DPOptimizerClassFactory(micro_batches)
    gaussian_mech.set_mechanisms('Gaussian',
                                 norm_bound=1.5,
                                 initial_noise_multiplier=5.0)
    net_opt = gaussian_mech.create('SGD')(params=network.trainable_params(),
                                          learning_rate=lr,
                                          momentum=momentum)
    _ = Model(network, loss_fn=loss, optimizer=net_opt, metrics=None)
Ejemplo n.º 4
0
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
                                 directory='./trained_ckpt_file/',
                                 config=config_ck)

    # get training dataset
    ds_train = generate_mnist_dataset(os.path.join(cfg.data_path, "train"),
                                      cfg.batch_size)

    if cfg.micro_batches and cfg.batch_size % cfg.micro_batches != 0:
        raise ValueError(
            "Number of micro_batches should divide evenly batch_size")
    # Create a factory class of DP mechanisms, this method is adding noise in gradients while training.
    # Initial_noise_multiplier is suggested to be greater than 1.0, otherwise the privacy budget would be huge, which
    # means that the privacy protection effect is weak. Mechanisms can be 'Gaussian' or 'AdaGaussian', in which noise
    # would be decayed with 'AdaGaussian' mechanism while be constant with 'Gaussian' mechanism.
    dp_opt = DPOptimizerClassFactory(micro_batches=cfg.micro_batches)
    dp_opt.set_mechanisms(
        cfg.noise_mechanisms,
        norm_bound=cfg.norm_bound,
        initial_noise_multiplier=cfg.initial_noise_multiplier,
        decay_policy=None)
    # Create a factory class of clip mechanisms, this method is to adaptive clip
    # gradients while training, decay_policy support 'Linear' and 'Geometric',
    # learning_rate is the learning rate to update clip_norm,
    # target_unclipped_quantile is the target quantile of norm clip,
    # fraction_stddev is the stddev of Gaussian normal which used in
    # empirical_fraction, the formula is
    # $empirical_fraction + N(0, fraction_stddev)$.
    clip_mech = ClipMechanismsFactory().create(
        cfg.clip_mechanisms,
        decay_policy=cfg.clip_decay_policy,
Ejemplo n.º 5
0
    network = LeNet5()
    net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
    config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
                                 keep_checkpoint_max=cfg.keep_checkpoint_max)
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
                                 directory='./trained_ckpt_file/',
                                 config=config_ck)

    ds_train = generate_mnist_dataset(os.path.join(args.data_path, "train"),
                                      cfg.batch_size,
                                      cfg.epoch_size)

    if args.micro_batches and cfg.batch_size % args.micro_batches != 0:
        raise ValueError("Number of micro_batches should divide evenly batch_size")
    gaussian_mech = DPOptimizerClassFactory(args.micro_batches)
    gaussian_mech.set_mechanisms('Gaussian',
                                 norm_bound=args.l2_norm_bound,
                                 initial_noise_multiplier=args.initial_noise_multiplier)
    net_opt = gaussian_mech.create('SGD')(params=network.trainable_params(),
                                          learning_rate=cfg.lr,
                                          momentum=cfg.momentum)
    micro_size = int(cfg.batch_size // args.micro_batches)
    rdp_monitor = PrivacyMonitorFactory.create('rdp',
                                               num_samples=60000,
                                               batch_size=micro_size,
                                               initial_noise_multiplier=args.initial_noise_multiplier,
                                               per_print_times=10)
    model = DPModel(micro_batches=args.micro_batches,
                    norm_clip=args.l2_norm_bound,
                    dp_mech=gaussian_mech.mech,