Exemple #1
0
    # g_scheduler = torch.optim.lr_scheduler.StepLR(
    #     g_optim, step_size=1, gamma=0.98)
    # d_scheduler = torch.optim.lr_scheduler.StepLR(
    #     d_optim, step_size=1, gamma=0.98)

    epoch = 0
    idx_iter = 0

    lst_eta = [generator.get_numpy_eta()]
    while True:
        idx_iter += 1

        if isinstance(args.kappa, dict):
            if epoch in args.kappa.keys():
                discriminator.kappa = args.kappa[epoch]
                print("Set kappa to {}".format(discriminator.kappa))
                del args.kappa[epoch]

        # XXX: note that training does not stop exactly at the end of the epoch

        lst_d_loss, lst_g_loss = train_one_round(
            loss_obj,
            discriminator,
            generator,
            d_optim,
            g_optim,
            data_loader_iter,
            noise_generator,
            fake_batch_size=args.fake_batch_size,
            device=None,