def test_dp_monitor_gpu(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") batch_size = 16 batches = 128 epochs = 1 rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = rdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches), ["data", "label"]) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)
# or 'AdaGaussian', in which noise would be decayed with 'AdaGaussian' # mechanism while be constant with 'Gaussian' mechanism. noise_mech = NoiseMechanismsFactory().create( cfg.noise_mechanisms, norm_bound=cfg.norm_bound, initial_noise_multiplier=cfg.initial_noise_multiplier, decay_policy='Exp') net_opt = nn.Momentum(params=network.trainable_params(), learning_rate=cfg.lr, momentum=cfg.momentum) # Create a monitor for DP training. The function of the monitor is to # compute and print the privacy budget(eps and delta) while training. rdp_monitor = PrivacyMonitorFactory.create( 'rdp', num_samples=60000, batch_size=cfg.batch_size, initial_noise_multiplier=cfg.initial_noise_multiplier, per_print_times=234) # Create the DP model for training. model = DPModel(micro_batches=cfg.micro_batches, norm_bound=cfg.norm_bound, noise_mech=noise_mech, network=network, loss_fn=net_loss, optimizer=net_opt, metrics={"Accuracy": Accuracy()}) LOGGER.info(TAG, "============== Starting Training ==============") model.train(cfg['epoch_size'], ds_train, callbacks=[ckpoint_cb, LossMonitor(), rdp_monitor],