コード例 #1
0
def run_target(args):
    for i in range(len(args.config["domains"])):
        if i == args.s_da_i:
            continue
        args.t_da_i = i
        args.loaders = factory.LoaderFactory(args)
        add_loaders(args)

        args.output_dir_src = osp.join(
            args.output, args.d_name,
            args.config["domains"][args.s_da_i][0].upper())
        args.name = args.config["domains"][args.s_da_i][0].upper(
        ) + args.config["domains"][args.t_da_i][0].upper()
        args.output_dir = osp.join(args.output, args.d_name, args.name)
        args.writer = SummaryWriter(args.output_dir + "/runs")
        print("Task: {}".format(args.name))

        if not osp.exists(args.output_dir):
            os.system('mkdir -p ' + args.output_dir)
        if not osp.exists(args.output_dir):
            os.mkdir(args.output_dir)

        args.savename = 'par_' + str(args.cls_par)
        args.out_file = open(
            osp.join(args.output_dir, 'log_' + args.savename + '.txt'), 'w')
        args.out_file.write(print_args(args) + '\n')
        args.out_file.flush()

        train_target(args)
コード例 #2
0
def run_source(args):
    args.name_src = args.config["domains"][args.s_da_i][0].upper()
    if not osp.exists(args.output_dir_src):
        os.makedirs(args.output_dir_src)

    args.out_file = open(osp.join(args.output_dir_src, 'log.txt'), 'w')
    args.out_file.write(print_args(args) + '\n')
    args.out_file.flush()

    args.t_da_i = 1
    args.loaders = factory.LoaderFactory(args)
    netF, netB, netC = train_source(args)

    args.out_file = open(osp.join(args.output_dir_src, 'log_test.txt'), 'w')
    netF.eval(), netB.eval(), netC.eval()
    for i in range(len(args.config["domains"])):
        if i == args.s_da_i:
            continue
        args.t_da_i = i
        args.loaders = factory.LoaderFactory(args)
        args.name = args.config["domains"][args.s_da_i][0].upper(
        ) + args.config["domains"][args.t_da_i][0].upper()
        if args.d_name != "visdac":
            acc = object_cal_acc(args.loaders["test"],
                                 netF,
                                 netB,
                                 netC,
                                 flag=False)
            log_str = '\nTraining: Task: {}, Accuracy = {:.2f}%'.format(
                args.name, acc)
        else:
            mean_acc, all_acc = object_cal_acc(args.loaders["test"],
                                               netF,
                                               netB,
                                               netC,
                                               flag=True)
            log_str = '\nTraining: Task: {}, Accuracy = {:.2f}%//'.format(
                args.name, mean_acc, all_acc)
        args.out_file.write(log_str)
        args.out_file.flush()
        print(log_str)
コード例 #3
0
ファイル: test.py プロジェクト: PeterZhouSZ/MASA-SR
def main():
    parser = argparse.ArgumentParser(description='referenceSR Testing')
    parser.add_argument('--random_seed', default=0, type=int)
    parser.add_argument('--name',
                        default='test_masa_rec_TestSet_multi',
                        type=str)
    parser.add_argument('--phase', default='test', type=str)

    ## device setting
    parser.add_argument('--gpu_ids',
                        type=str,
                        default='0',
                        help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)

    ## network setting
    parser.add_argument('--net_name', default='MASA', type=str, help='')
    parser.add_argument('--sr_scale', default=4, type=int)
    parser.add_argument('--input_nc', default=3, type=int)
    parser.add_argument('--output_nc', default=3, type=int)
    parser.add_argument('--nf', default=64, type=int)
    parser.add_argument('--n_blks', default='4, 4, 4', type=str)
    parser.add_argument('--nf_ctt', default=32, type=int)
    parser.add_argument('--n_blks_ctt', default='2, 2, 2', type=str)
    parser.add_argument('--num_nbr', default=1, type=int)
    parser.add_argument('--n_blks_dec', default=10, type=int)
    parser.add_argument('--ref_level', default=1, type=int)

    ## dataloader setting
    parser.add_argument('--data_root',
                        default='/home/liyinglu/newData/datasets/SR/',
                        type=str)
    parser.add_argument('--dataset', default='CUFED', type=str, help='CUFED')
    parser.add_argument('--crop_size', default=256, type=int)
    parser.add_argument('--batch_size', default=1, type=int)
    parser.add_argument('--num_workers', default=4, type=int)
    parser.add_argument('--data_augmentation', default=False, type=bool)

    parser.add_argument('--resume',
                        default='./pretrained_weights/masa_rec.pth',
                        type=str)
    parser.add_argument('--testset',
                        default='TestSet_multi',
                        type=str,
                        help='Sun80 | Urban100 | TestSet_multi')
    parser.add_argument('--save_folder', default='./test_results/', type=str)

    ## setup training environment
    args = parser.parse_args()

    ## setup training device
    str_ids = args.gpu_ids.split(',')
    args.gpu_ids = []
    for str_id in str_ids:
        id = int(str_id)
        if id >= 0:
            args.gpu_ids.append(id)
    if len(args.gpu_ids) > 0:
        torch.cuda.set_device(args.gpu_ids[0])

    #### distributed training settings
    if args.launcher == 'none':  # disabled distributed training
        args.dist = False
        args.rank = -1
        print('Disabled distributed training.')
    else:
        args.dist = True
        init_dist()
        args.world_size = torch.distributed.get_world_size()
        args.rank = torch.distributed.get_rank()

    args.save_folder = os.path.join(args.save_folder, args.testset, args.name)
    if not os.path.exists(args.save_folder):
        os.makedirs(args.save_folder)
    log_file_path = args.save_folder + '/' + time.strftime(
        '%Y%m%d_%H%M%S') + '.log'
    setup_logger(log_file_path)

    print_args(args)
    cudnn.benchmark = True

    ## test model
    trainer = Trainer(args)
    trainer.test()
コード例 #4
0

if __name__ == '__main__':
    args = get_args()
    set_random_seed(args.seed)

    loss_list = alg_loss_dict(args)
    train_loaders, eval_loaders = get_img_dataloader(args)
    eval_name_dict = train_valid_target_eval_names(args)
    algorithm_class = alg.get_algorithm_class(args.algorithm)
    algorithm = algorithm_class(args).cuda()
    algorithm.train()
    opt = get_optimizer(algorithm, args)
    sch = get_scheduler(opt, args)

    s = print_args(args, [])
    print('=======hyper-parameter used========')
    print(s)
    acc_record = {}
    acc_type_list = ['train', 'valid', 'target']
    train_minibatches_iterator = zip(*train_loaders)
    best_valid_acc, target_acc = 0, 0
    print('===========start training===========')
    sss = time.time()
    for epoch in range(args.max_epoch):
        for iter_num in range(args.steps_per_epoch):
            minibatches_device = [(data)
                                  for data in next(train_minibatches_iterator)]
            if args.algorithm == 'VREx' and algorithm.update_count == args.anneal_iters:
                opt = get_optimizer(algorithm, args)
                sch = get_scheduler(opt, args)
コード例 #5
0
ファイル: train.py プロジェクト: PeterZhouSZ/MASA-SR
def main():
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser(description='referenceSR Training')
    parser.add_argument('--random_seed', default=0, type=int)
    parser.add_argument('--name', default='train_masa', type=str)
    parser.add_argument('--phase', default='train', type=str)

    ## device setting
    parser.add_argument('--gpu_ids',
                        type=str,
                        default='0',
                        help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)

    ## network setting
    parser.add_argument('--net_name',
                        default='MASA',
                        type=str,
                        help='RefNet | Baseline')
    parser.add_argument('--sr_scale', default=4, type=int)
    parser.add_argument('--input_nc', default=3, type=int)
    parser.add_argument('--output_nc', default=3, type=int)
    parser.add_argument('--nf', default=64, type=int)
    parser.add_argument('--n_blks', default='4, 4, 4', type=str)
    parser.add_argument('--nf_ctt', default=32, type=int)
    parser.add_argument('--n_blks_ctt', default='2, 2, 2', type=str)
    parser.add_argument('--num_nbr', default=1, type=int)
    parser.add_argument('--n_blks_dec', default=10, type=int)

    ## dataloader setting
    parser.add_argument('--data_root',
                        default='/home/liyinglu/newData/datasets/SR/',
                        type=str)
    parser.add_argument('--dataset', default='CUFED', type=str, help='CUFED')
    parser.add_argument('--testset',
                        default='TestSet',
                        type=str,
                        help='TestSet')
    parser.add_argument('--save_test_root', default='generated', type=str)
    parser.add_argument('--crop_size', default=256, type=int)
    parser.add_argument('--batch_size', default=9, type=int)
    parser.add_argument('--num_workers', default=4, type=int)
    parser.add_argument('--multi_scale', action='store_true')
    parser.add_argument('--data_augmentation', action='store_true')

    ## optim setting
    parser.add_argument('--lr', default=1e-4, type=float)
    parser.add_argument('--lr_D', default=1e-4, type=float)
    parser.add_argument('--weight_decay', default=0, type=float)
    parser.add_argument('--start_iter', default=0, type=int)
    parser.add_argument('--max_iter', default=500, type=int)

    parser.add_argument('--loss_l1', action='store_true')
    parser.add_argument('--loss_mse', action='store_true')
    parser.add_argument('--loss_perceptual', action='store_true')
    parser.add_argument('--loss_adv', action='store_true')
    parser.add_argument('--gan_type', default='WGAN_GP', type=str)

    parser.add_argument('--lambda_l1', default=1, type=float)
    parser.add_argument('--lambda_mse', default=1, type=float)
    parser.add_argument('--lambda_perceptual', default=1, type=float)
    parser.add_argument('--lambda_adv', default=5e-3, type=float)

    parser.add_argument('--resume', default='', type=str)
    parser.add_argument('--resume_optim', default='', type=str)
    parser.add_argument('--resume_scheduler', default='', type=str)

    ## log setting
    parser.add_argument('--log_freq', default=10, type=int)
    parser.add_argument('--vis_freq', default=50000, type=int)  #50000
    parser.add_argument('--save_epoch_freq', default=10, type=int)  #100
    parser.add_argument('--test_freq', default=100, type=int)  #100
    parser.add_argument('--save_folder', default='./weights', type=str)
    parser.add_argument('--vis_step_freq', default=100, type=int)
    parser.add_argument('--use_tb_logger', action='store_true')
    parser.add_argument('--save_test_results', action='store_true')

    ## for evaluate
    parser.add_argument('--ref_level', default=1, type=int)

    ## setup training environment
    args = parser.parse_args()
    set_random_seed(args.random_seed)

    ## setup training device
    str_ids = args.gpu_ids.split(',')
    args.gpu_ids = []
    for str_id in str_ids:
        id = int(str_id)
        if id >= 0:
            args.gpu_ids.append(id)
    if len(args.gpu_ids) > 0:
        torch.cuda.set_device(args.gpu_ids[0])

    #### distributed training settings
    if args.launcher == 'none':  # disabled distributed training
        args.dist = False
        args.rank = -1
        print('Disabled distributed training.')
    else:
        args.dist = True
        init_dist()
        args.world_size = torch.distributed.get_world_size()
        args.rank = torch.distributed.get_rank()

    args.save_folder = os.path.join(args.save_folder, args.name)
    args.vis_save_dir = os.path.join(args.save_folder, 'vis')
    args.snapshot_save_dir = os.path.join(args.save_folder, 'snapshot')
    log_file_path = args.save_folder + '/' + time.strftime(
        '%Y%m%d_%H%M%S') + '.log'

    if args.rank <= 0:
        if os.path.exists(args.vis_save_dir) == False:
            os.makedirs(args.vis_save_dir)
        if os.path.exists(args.snapshot_save_dir) == False:
            os.mkdir(args.snapshot_save_dir)
        setup_logger(log_file_path)

    print_args(args)

    cudnn.benchmark = True

    ## train model
    trainer = Trainer(args)
    trainer.train()
コード例 #6
0
def main():
    # For fast training
    cudnn.benchmark = True

    parser = argparse.ArgumentParser()
    # GPU option
    parser.add_argument('--gpu_id', type=int, default=0)
    # Dataset options
    parser.add_argument('--dataset', type=str, default='cifar10to5')
    parser.add_argument('--dataroot', type=str, default='data')
    parser.add_argument('--data_seed', type=int, default=12345)
    parser.add_argument('--num_workers', type=int, default=4)
    # Model options
    parser.add_argument('--latent_dim', type=int, default=128)
    parser.add_argument('--image_size', type=int, default=32)
    parser.add_argument('--g_channels', type=int, default=128)
    parser.add_argument('--g_spectral_norm', type=int, default=0)
    parser.add_argument('--d_channels', type=int, default=128)
    parser.add_argument('--d_dropout', type=int, default=1)
    parser.add_argument('--d_spectral_norm', type=int, default=0)
    parser.add_argument('--d_pooling', type=str, default='mean')
    # Training options
    parser.add_argument('--trainer', type=str, default='cpgan')
    parser.add_argument('--gan_loss', type=str, default='wgan')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--g_bs_multiple', type=int, default=2)
    parser.add_argument('--g_lr', type=float, default=2e-4)
    parser.add_argument('--d_lr', type=float, default=2e-4)
    parser.add_argument('--beta1', type=float, default=0.)
    parser.add_argument('--beta2', type=float, default=0.9)
    parser.add_argument('--num_critic', type=int, default=5)
    parser.add_argument('--lambda_gp', type=float, default=10.)
    parser.add_argument('--lambda_ct', type=float, default=0.)
    parser.add_argument('--lambda_cls_g', type=float, default=0.4)
    parser.add_argument('--lambda_cls_d', type=float, default=1.)
    parser.add_argument('--factor_m', type=float, default=0.)
    parser.add_argument('--num_iterations', type=int, default=100000)
    parser.add_argument('--num_iterations_decay', type=int, default=100000)
    # Output options
    parser.add_argument('--out', type=str, default='outputs')
    parser.add_argument('--display_interval', type=int, default=100)
    parser.add_argument('--snapshot_interval', type=int, default=5000)
    parser.add_argument('--visualize_interval', type=int, default=5000)
    parser.add_argument('--num_samples', type=int, default=10)
    parser.add_argument('--eval_batch_size', type=int, default=128)
    args = parser.parse_args()
    args.g_spectral_norm = bool(args.g_spectral_norm)
    args.d_dropout = bool(args.d_dropout)
    args.d_spectral_norm = bool(args.d_spectral_norm)

    # Set up GPU
    if torch.cuda.is_available() and args.gpu_id >= 0:
        device = torch.device('cuda:%d' % args.gpu_id)
    else:
        device = torch.device('cpu')

    # Set up dataset
    if args.dataset == 'cifar10':
        args.num_classes = 10
        Dataset = torchvision.datasets.CIFAR10
        args.vis_label_list = [[0], [1], [2], [3], [4], [5], [6], [7], [8],
                               [9]]
    elif args.dataset == 'cifar10to5':
        args.num_classes = 5
        Dataset = functools.partial(datasets.CIFAR10to5, seed=args.data_seed)
        args.vis_label_list = [[0], [0, 1], [1], [1, 2], [2], [2, 3], [3],
                               [3, 4], [4], [4, 0]]
    elif args.dataset == 'cifar7to3':
        args.num_classes = 3
        Dataset = functools.partial(datasets.CIFAR7to3, seed=args.data_seed)
        args.vis_label_list = [[0], [0, 1], [1], [1, 2], [2], [2, 0],
                               [0, 1, 2]]

    def normalize(x):
        x = 2 * ((x * 255. / 256.) - .5)
        x += torch.zeros_like(x).uniform_(0, 1. / 128)
        return x

    dataset = Dataset(root=args.dataroot,
                      train=True,
                      download=True,
                      transform=transforms.Compose([
                          transforms.ToTensor(),
                          transforms.Lambda(normalize)
                      ]))

    iterator = util.InfDataLoader(dataset,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  drop_last=True)

    # Set up output
    if not os.path.exists(args.out):
        os.makedirs(args.out)
    util.print_args(args, os.path.join(args.out, 'args.txt'))

    # Set up models
    g_params = {
        'latent_dim': args.latent_dim,
        'num_classes': args.num_classes,
        'channels': args.g_channels,
        'image_size': args.image_size,
        'spectral_norm': args.g_spectral_norm
    }
    d_params = {
        'num_classes': args.num_classes,
        'channels': args.d_channels,
        'dropout': args.d_dropout,
        'spectral_norm': args.d_spectral_norm,
        'pooling': args.d_pooling
    }
    netG = resnet.Generator(**g_params)
    netD = resnet.ACGANDiscriminator(**d_params)

    util.save_params(g_params, os.path.join(args.out, 'netG_params.pkl'))
    util.save_params(d_params, os.path.join(args.out, 'netD_params.pkl'))
    netG.to(device)
    netD.to(device)
    netG.apply(common.weights_init)
    netD.apply(common.weights_init)
    util.print_network(netG, 'G', os.path.join(args.out, 'netG_arch.txt'))
    util.print_network(netD, 'D', os.path.join(args.out, 'netD_arch.txt'))

    # Set up optimziers
    optimizerG = optim.Adam(netG.parameters(),
                            lr=args.g_lr,
                            betas=(args.beta1, args.beta2))
    optimizerD = optim.Adam(netD.parameters(),
                            lr=args.d_lr,
                            betas=(args.beta1, args.beta2))

    # Set up learning rate schedulers
    def lr_lambda(iteration):
        if args.num_iterations_decay > 0:
            lr = 1.0 - max(0,
                           (iteration + 1 -
                            (args.num_iterations - args.num_iterations_decay)
                            )) / float(args.num_iterations_decay)
        else:
            lr = 1.0
        return lr

    lr_schedulerG = optim.lr_scheduler.LambdaLR(optimizerG,
                                                lr_lambda=lr_lambda)
    lr_schedulerD = optim.lr_scheduler.LambdaLR(optimizerD,
                                                lr_lambda=lr_lambda)

    # Set up trainer
    trainter_params = {
        'iterator': iterator,
        'models': (netG, netD),
        'optimizers': (optimizerG, optimizerD),
        'gan_loss': args.gan_loss,
        'lr_schedulers': (lr_schedulerG, lr_schedulerD),
        'batch_size': args.batch_size,
        'g_bs_multiple': args.g_bs_multiple,
        'num_critic': args.num_critic,
        'factor_m': args.factor_m,
        'device': device
    }
    if args.trainer == 'acgan':
        Trainer = trainers.ACGANTrainer
        trainter_params.update({
            'lambdas': (args.lambda_gp, args.lambda_ct, args.lambda_cls_g,
                        args.lambda_cls_d)
        })
    elif args.trainer == 'cpgan':
        Trainer = trainers.CPGANTrainer
        trainter_params.update({
            'lambdas': (args.lambda_gp, args.lambda_ct, args.lambda_cls_g,
                        args.lambda_cls_d)
        })
    trainer = Trainer(**trainter_params)

    # Set up visualizer and logger
    visualizer = Visualizer(netG, args.vis_label_list, device, args.out,
                            args.num_samples, args.eval_batch_size)
    logger = Logger(args.out, 'loss')

    # Train
    while trainer.iteration < args.num_iterations:
        iter_start_time = time.time()
        trainer.update()

        if (args.display_interval > 0
                and trainer.iteration % args.display_interval == 0):
            t = (time.time() - iter_start_time) / args.batch_size
            logger.log(trainer.iteration, trainer.get_current_loss(), t)

        if (args.snapshot_interval > 0
                and trainer.iteration % args.snapshot_interval == 0):
            torch.save(
                netG.state_dict(),
                os.path.join(args.out, 'netG_iter_%d.pth' % trainer.iteration))
            torch.save(
                netD.state_dict(),
                os.path.join(args.out, 'netD_iter_%d.pth' % trainer.iteration))

        if (args.visualize_interval > 0
                and trainer.iteration % args.visualize_interval == 0):
            visualizer.visualize(trainer.iteration)
コード例 #7
0
ファイル: continuer.py プロジェクト: ChiragCD/NR-GAN
def main():
    parser = argparse.ArgumentParser()
    # GPU option
    parser.add_argument('--gpu_id', type=int, default=0)
    parser.add_argument('--semi_trained', type=bool, default=None)
    parser.add_argument('--semi_trained_iamge_gen', type=str, default=None)
    parser.add_argument('--semi_trained_noise_gen', type=str, default=None)
    parser.add_argument('--semi_trained_discrim', type=str, default=None)
    parser.add_argument('--semi_trained_current_iters', type=int, default=None)
    # Dataset options
    parser.add_argument('--dataset', type=str, default='CIFAR10AG')
    parser.add_argument('--dataroot', type=str, default='data')
    parser.add_argument('--noise_scale', type=float, default=25.)
    parser.add_argument('--noise_scale_high', type=float, default=None)
    parser.add_argument('--patch_size', type=int, default=16)
    parser.add_argument('--patch_max_size', type=int, default=None)
    parser.add_argument('--noise_scale_list',
                        type=float,
                        nargs='*',
                        default=[15, 25, 50])
    parser.add_argument('--mixture_rate_list',
                        type=float,
                        nargs='*',
                        default=[0.7, 0.2, 0.1])
    parser.add_argument('--kernel_size', type=int, default=5)
    parser.add_argument('--noise_lam', type=float, default=30.)
    parser.add_argument('--noise_lam_high', type=float, default=None)
    parser.add_argument('--multi_noise_scale', type=float, default=25.)
    parser.add_argument('--multi_noise_scale_high', type=float, default=None)
    parser.add_argument('--no_clip', action='store_false', dest='clip')
    parser.add_argument('--data_seed', type=int, default=0)
    parser.add_argument('--num_workers', type=int, default=4)
    # Model options
    parser.add_argument('--model_seed', type=int, default=0)
    parser.add_argument('--gn_train', action='store_true')
    parser.add_argument('--g_latent_dim', type=int, default=128)
    parser.add_argument('--g_image_size', type=int, default=32)
    parser.add_argument('--g_image_channels', type=int, default=3)
    parser.add_argument('--g_channels', type=int, default=128)
    parser.add_argument('--g_residual_factor', type=float, default=0.1)
    parser.add_argument('--d_channels', type=int, default=128)
    parser.add_argument('--d_residual_factor', type=float, default=0.1)
    parser.add_argument('--d_pooling', type=str, default='mean')
    # Measure option
    parser.add_argument('--noise_measure', action='store_true')
    # Training options
    parser.add_argument('--deterministic', action='store_true')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--g_bs_multiple', type=int, default=2)
    parser.add_argument('--g_lr', type=float, default=2e-4)
    parser.add_argument('--d_lr', type=float, default=2e-4)
    parser.add_argument('--beta1', type=float, default=0.)
    parser.add_argument('--beta2', type=float, default=0.99)
    parser.add_argument('--num_critic', type=int, default=1)
    parser.add_argument('--lambda_r1', type=float, default=10.)
    parser.add_argument('--lambda_ds', type=float, default=0.02)
    parser.add_argument('--g_no_model_average',
                        action='store_false',
                        dest='g_model_average')
    parser.add_argument('--model_average_beta', type=float, default=0.999)
    parser.add_argument('--implicit', action='store_true')
    parser.add_argument('--prior', type=str, default=None)
    parser.add_argument('--rotation', action='store_true')
    parser.add_argument('--channel_shuffle', action='store_true')
    parser.add_argument('--color_inversion', action='store_true')
    parser.add_argument('--blurvh', action='store_true')
    parser.add_argument('--num_iterations', type=int, default=200000)
    parser.add_argument('--num_iterations_decay', type=int, default=0)
    # Output options
    parser.add_argument('--out', type=str, default='outputs')
    parser.add_argument('--display_interval', type=int, default=100)
    parser.add_argument('--snapshot_interval', type=int, default=5000)
    parser.add_argument('--visualize_interval', type=int, default=5000)
    parser.add_argument('--num_columns', type=int, default=10)
    args = parser.parse_args()

    # Set up options
    args.image_range = (-1, 1)
    if args.clip:
        args.clip_range = args.image_range
    else:
        args.clip_range = None

    def normalize(x):
        x = 2 * ((x * 255. / 256.) - .5)
        x += torch.zeros_like(x).uniform_(0, 1. / 128)
        return x

    # Set up GPU
    if torch.cuda.is_available() and args.gpu_id >= 0:
        device = torch.device('cuda:%d' % args.gpu_id)
    else:
        device = torch.device('cpu')

    # Set up dataset
    if args.dataset == 'CIFAR10':
        Dataset = torchvision.datasets.CIFAR10
    elif args.dataset == 'CIFAR10AG':
        Dataset = functools.partial(datasets.CIFAR10AdditiveGaussianNoise,
                                    noise_scale=args.noise_scale,
                                    noise_scale_high=args.noise_scale_high,
                                    clip=args.clip,
                                    seed=args.data_seed)
    elif args.dataset == 'CIFAR10LG':
        Dataset = functools.partial(datasets.CIFAR10LocalGaussianNoise,
                                    noise_scale=args.noise_scale,
                                    patch_size=args.patch_size,
                                    noise_scale_high=args.noise_scale_high,
                                    patch_max_size=args.patch_max_size,
                                    clip=args.clip,
                                    seed=args.data_seed)
    elif args.dataset == 'CIFAR10U':
        Dataset = functools.partial(datasets.CIFAR10UniformNoise,
                                    noise_scale=args.noise_scale,
                                    noise_scale_high=args.noise_scale_high,
                                    clip=args.clip,
                                    seed=args.data_seed)
    elif args.dataset == 'CIFAR10MIX':
        Dataset = functools.partial(datasets.CIFAR10MixtureNoise,
                                    noise_scale_list=args.noise_scale_list,
                                    mixture_rate_list=args.mixture_rate_list,
                                    clip=args.clip,
                                    seed=args.data_seed)
    elif args.dataset == 'CIFAR10BG':
        Dataset = functools.partial(datasets.CIFAR10BrownGaussianNoise,
                                    noise_scale=args.noise_scale,
                                    noise_scale_high=args.noise_scale_high,
                                    kernel_size=args.kernel_size,
                                    clip=args.clip,
                                    seed=args.data_seed)
    elif args.dataset == 'CIFAR10ABG':
        Dataset = functools.partial(datasets.CIFAR10AdditiveBrownGaussianNoise,
                                    noise_scale=args.noise_scale,
                                    noise_scale_high=args.noise_scale_high,
                                    kernel_size=args.kernel_size,
                                    clip=args.clip,
                                    seed=args.data_seed)
    elif args.dataset == 'CIFAR10MG':
        Dataset = functools.partial(
            datasets.CIFAR10MultiplicativeGaussianNoise,
            multi_noise_scale=args.multi_noise_scale,
            multi_noise_scale_high=args.multi_noise_scale_high,
            clip=args.clip,
            seed=args.data_seed)
    elif args.dataset == 'CIFAR10AMG':
        Dataset = functools.partial(
            datasets.CIFAR10AdditiveMultiplicativeGaussianNoise,
            noise_scale=args.noise_scale,
            multi_noise_scale=args.multi_noise_scale,
            noise_scale_high=args.noise_scale_high,
            multi_noise_scale_high=args.multi_noise_scale_high,
            clip=args.clip,
            seed=args.data_seed)
    elif args.dataset == 'CIFAR10P':
        Dataset = functools.partial(datasets.CIFAR10PoissonNoise,
                                    noise_lam=args.noise_lam,
                                    noise_lam_high=args.noise_lam_high,
                                    clip=args.clip,
                                    seed=args.data_seed)
    elif args.dataset == 'CIFAR10PG':
        Dataset = functools.partial(datasets.CIFAR10PoissonGaussianNoise,
                                    noise_lam=args.noise_lam,
                                    noise_scale=args.noise_scale,
                                    noise_lam_high=args.noise_lam_high,
                                    noise_scale_high=args.noise_scale_high,
                                    clip=args.clip,
                                    seed=args.data_seed)
    else:
        raise ValueError('Unknown dataset: {}'.format(args.dataset))

    dataset = Dataset(root=args.dataroot,
                      train=True,
                      download=True,
                      transform=transforms.Compose([
                          transforms.ToTensor(),
                          transforms.Lambda(normalize)
                      ]))

    iterator = util.InfDataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True,
                                  drop_last=True)

    # Set up output
    if not os.path.exists(args.out):
        os.makedirs(args.out)

    # Set up models
    if args.model_seed >= 0:
        torch.manual_seed(args.model_seed)
    if args.deterministic:
        cudnn.deterministic = True
        cudnn.benchmark = False
    else:
        cudnn.benchmark = True

    g_params = {
        'latent_dim': args.g_latent_dim,
        'image_size': args.g_image_size,
        'image_channels': args.g_image_channels,
        'channels': args.g_channels,
        'residual_factor': args.g_residual_factor
    }
    netG_test = net.Generator(**g_params)
    util.save_params(g_params, os.path.join(args.out, 'netG_params.pkl'))
    netG.to(device)
    netG.apply(common.weights_init)
    util.print_network(netG, 'G', os.path.join(args.out, 'netG_arch.txt'))
    if args.g_model_average:
        netG_test = copy.deepcopy(netG)
    else:
        netG_test = netG

    if args.blurvh:
        args.d_image_channels = args.g_image_channels * 2
    else:
        args.d_image_channels = args.g_image_channels
    d_params = {
        'image_channels': args.d_image_channels,
        'channels': args.d_channels,
        'residual_factor': args.d_residual_factor,
        'pooling': args.d_pooling
    }
    netD = net.Discriminator(**d_params)
    util.save_params(d_params, os.path.join(args.out, 'netD_params.pkl'))
    netD.to(device)
    netD.apply(common.weights_init)
    util.print_network(netD, 'D', os.path.join(args.out, 'netD_arch.txt'))

    if args.gn_train:
        if args.implicit:
            args.gn_latent_dim = args.g_latent_dim * 2
        else:
            args.gn_latent_dim = args.g_latent_dim
        gn_params = {
            'latent_dim': args.gn_latent_dim,
            'image_size': args.g_image_size,
            'image_channels': args.g_image_channels,
            'channels': args.g_channels,
            'residual_factor': args.g_residual_factor
        }
        netGn = net.Generator(**gn_params)
        util.save_params(gn_params, os.path.join(args.out, 'netGn_params.pkl'))
        netGn.to(device)
        netGn.apply(common.weights_init)
        util.print_network(netGn, 'Gn', os.path.join(args.out,
                                                     'netGn_arch.txt'))
        if args.g_model_average:
            netGn_test = copy.deepcopy(netGn)
        else:
            netGn_test = netGn

        if(args.semi_trained):
            netG_test.load_state_dict(torch.load(args.semi_trained_image_gen))
            netGn_test.load_state_dict(torch.load(args.semi_trained_noise_gen))
            netD.load_state_dict(torch.load(args.semi_trained_discrim))

    else:
        netGn, netGn_test = None, None

    # Set up measure
    if args.noise_measure:
        if args.dataset == 'CIFAR10':
            noise_measure = None
        elif args.dataset == 'CIFAR10AG':
            noise_measure = functools.partial(
                measure.additive_gaussian_noise_measure,
                noise_scale=args.noise_scale,
                noise_scale_high=args.noise_scale_high,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10LG':
            noise_measure = functools.partial(
                measure.local_gaussian_noise_measure,
                noise_scale=args.noise_scale,
                patch_size=args.patch_size,
                noise_scale_high=args.noise_scale_high,
                patch_max_size=args.patch_max_size,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10U':
            noise_measure = functools.partial(
                measure.uniform_noise_measure,
                noise_scale=args.noise_scale,
                noise_scale_high=args.noise_scale_high,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10MIX':
            noise_measure = functools.partial(
                measure.mixture_noise_measure,
                noise_scale_list=args.noise_scale_list,
                mixture_rate_list=args.mixture_rate_list,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10BG':
            noise_measure = functools.partial(
                measure.brown_gaussian_noise_measure,
                noise_scale=args.noise_scale,
                noise_scale_high=args.noise_scale_high,
                kernel_size=args.kernel_size,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10ABG':
            noise_measure = functools.partial(
                measure.additive_brown_gaussian_noise_measure,
                noise_scale=args.noise_scale,
                noise_scale_high=args.noise_scale_high,
                kernel_size=args.kernel_size,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10MG':
            noise_measure = functools.partial(
                measure.multiplicative_gaussian_noise_measure,
                multi_noise_scale=args.multi_noise_scale,
                multi_noise_scale_high=args.multi_noise_scale_high,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10AMG':
            noise_measure = functools.partial(
                measure.additive_multiplicative_gaussian_noise_measure,
                noise_scale=args.noise_scale,
                multi_noise_scale=args.multi_noise_scale,
                noise_scale_high=args.noise_scale_high,
                multi_noise_scale_high=args.multi_noise_scale_high,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10P':
            noise_measure = functools.partial(
                measure.poisson_noise_measure,
                noise_lam=args.noise_lam,
                noise_lam_high=args.noise_lam_high,
                image_range=args.image_range)
        elif args.dataset == 'CIFAR10PG':
            noise_measure = functools.partial(
                measure.poisson_gaussian_noise_measure,
                noise_lam=args.noise_lam,
                noise_scale=args.noise_scale,
                noise_lam_high=args.noise_lam_high,
                noise_scale_high=args.noise_scale_high,
                image_range=args.image_range)
    else:
        noise_measure = None

    # Set up optimziers
    optimizerG = optim.Adam(netG.parameters(),
                            lr=args.g_lr,
                            betas=(args.beta1, args.beta2))
    optimizerD = optim.Adam(netD.parameters(),
                            lr=args.d_lr,
                            betas=(args.beta1, args.beta2))
    if args.gn_train:
        optimizerGn = optim.Adam(netGn.parameters(),
                                 lr=args.g_lr,
                                 betas=(args.beta1, args.beta2))
    else:
        optimizerGn = None

    # Set up learning rate schedulers
    def lr_lambda(iteration):
        if args.num_iterations_decay > 0:
            lr = 1.0 - max(0,
                           (iteration + 1 -
                            (args.num_iterations - args.num_iterations_decay)
                            )) / float(args.num_iterations_decay)
        else:
            lr = 1.0
        return lr

    lr_schedulers = []
    lr_schedulers.append(
        optim.lr_scheduler.LambdaLR(optimizerG, lr_lambda=lr_lambda))
    lr_schedulers.append(
        optim.lr_scheduler.LambdaLR(optimizerD, lr_lambda=lr_lambda))
    if args.gn_train:
        lr_schedulers.append(
            optim.lr_scheduler.LambdaLR(optimizerGn, lr_lambda=lr_lambda))

    # Set up trainer
    trainter_params = {
        'iterator': iterator,
        'models': (netG, netD, netGn),
        'models_test': (netG_test, netGn_test),
        'measures': (noise_measure, ),
        'optimizers': (optimizerG, optimizerD, optimizerGn),
        'lr_schedulers': lr_schedulers,
        'batch_size': args.batch_size,
        'g_bs_multiple': args.g_bs_multiple,
        'num_critic': args.num_critic,
        'lambdas': (args.lambda_r1, args.lambda_ds),
        'model_averages': (args.g_model_average, args.g_model_average),
        'model_average_beta': args.model_average_beta,
        'image_range': args.image_range,
        'implicit': args.implicit,
        'prior': args.prior,
        'rotation': args.rotation,
        'channel_shuffle': args.channel_shuffle,
        'color_inversion': args.color_inversion,
        'blurvh': args.blurvh,
        'clip_range': args.clip_range,
        'device': device
    }
    trainer = Trainer(**trainter_params)

    # Set up visualizer and logger
    visualizer = Visualizer(netG_test,
                            netGn_test,
                            device,
                            args.out,
                            args.implicit,
                            args.prior,
                            args.rotation,
                            args.channel_shuffle,
                            args.color_inversion,
                            args.num_columns,
                            image_range=args.image_range)
    logger = Logger(args.out, 'loss')

    # Print args
    util.print_args(args, os.path.join(args.out, 'args.txt'))

    if(args.semi_trained):
        trainer.iteration = args.semi_trained_current_iters

    # Train
    while trainer.iteration < args.num_iterations:
        iter_start_time = time.time()
        trainer.update()

        if (args.display_interval > 0
                and trainer.iteration % args.display_interval == 0):
            t = (time.time() - iter_start_time) / args.batch_size
            logger.log(trainer.iteration, trainer.get_current_loss(), t)

        if (args.snapshot_interval > 0
                and trainer.iteration % args.snapshot_interval == 0):
            torch.save(
                netG_test.state_dict(),
                os.path.join(args.out, 'netG_iter_%d.pth' % trainer.iteration))
            torch.save(
                netD.state_dict(),
                os.path.join(args.out, 'netD_iter_%d.pth' % trainer.iteration))
            if args.gn_train:
                torch.save(
                    netGn_test.state_dict(),
                    os.path.join(args.out,
                                 'netGn_iter_%d.pth' % trainer.iteration))

        if (args.visualize_interval > 0
                and trainer.iteration % args.visualize_interval == 0):
            visualizer.visualize(trainer.iteration)