def test_dp_model(): context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") l2_norm_bound = 1.0 initial_noise_multiplier = 0.01 net = LeNet5() batch_size = 32 batches = 128 epochs = 1 loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) optim = SGD(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) gaussian_mech = DPOptimizerClassFactory() gaussian_mech.set_mechanisms( 'Gaussian', norm_bound=l2_norm_bound, initial_noise_multiplier=initial_noise_multiplier) model = DPModel(micro_batches=2, norm_clip=l2_norm_bound, dp_mech=gaussian_mech.mech, network=net, loss_fn=loss, optimizer=optim, metrics=None) ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches), ['data', 'label']) ms_ds.set_dataset_size(batch_size * batches) model.train(epochs, ms_ds)
def test_dp_monitor_gpu(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") batch_size = 16 batches = 128 epochs = 1 rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-5) suggest_epoch = rdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = LeNet5() net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches), ["data", "label"]) ds1.set_dataset_size(batch_size * batches) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)
def test_optimizer_cpu(): context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU") network = LeNet5() lr = 0.01 momentum = 0.9 micro_batches = 2 loss = nn.SoftmaxCrossEntropyWithLogits() gaussian_mech = DPOptimizerClassFactory(micro_batches) gaussian_mech.set_mechanisms('Gaussian', norm_bound=1.5, initial_noise_multiplier=5.0) net_opt = gaussian_mech.create('SGD')(params=network.trainable_params(), learning_rate=lr, momentum=momentum) _ = Model(network, loss_fn=loss, optimizer=net_opt, metrics=None)
def test_train_and_eval_lenet(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") network = LeNet5(10) net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) print("============== Starting Training ==============") ds_train = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "train"), 32, 1) model.train(1, ds_train, callbacks=[LossMonitor()], dataset_sink_mode=True) print("============== Starting Testing ==============") ds_eval = create_dataset(os.path.join('/home/workspace/mindspore_dataset/mnist', "test"), 32, 1) acc = model.eval(ds_eval, dataset_sink_mode=True) print("============== Accuracy:{} ==============".format(acc))
type=str, default="", help='if mode is test, must provide\ path where the trained ckpt file') parser.add_argument('--dataset_sink_mode', type=bool, default=False, help='dataset_sink_mode is False or True') args = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, enable_mem_reuse=False) network = LeNet5(cfg.num_classes) net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") repeat_size = cfg.epoch_size net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) config_ck = CheckpointConfig( save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) print("============== Starting Testing ==============") param_dict = load_checkpoint(args.ckpt_path) load_param_into_net(network, param_dict) ds_eval = create_dataset(os.path.join(args.data_path, "test"),