grad_image.reshape(-1, cfg.iDimsC, 32, 32).transpose([0, 2, 3, 1]),
            [16, 16], sTestCaseDir + 'grad_image16x16_%d.png' % iter)

    if time.time() - last_save_time > 60 * 30 or iter == cfg.iMaxIter or bSave:
        stime = time.time()
        log_netstate()
        logger.save()
        save_model(saver, sess, sCheckpointDir, step=iter)
        last_save_time = time.time()
        logger.log('Model saved')
        logger.log('Time: %.2f' % (time.time() - stime))
        bSave = False

    if time.time() - last_plot_time > 60 * 30 or iter == cfg.iMaxIter or bPlot:
        stime = time.time()
        logger.plot()
        logger.plot_together(['logit_real', 'logit_fake'], [
            r'$\mathbb{E}_{x \sim P_r} f(x)$',
            r'$\mathbb{E}_{x \sim P_g} f(x)$'
        ], ['olive', 'skyblue'], 'logits.pdf')
        last_plot_time = time.time()
        logger.log('Plotted')
        logger.log('Time: %.2f' % (time.time() - stime))
        bPlot = False

    if time.time() - last_imshave_time > 60 * 10 or iter == cfg.iMaxIter:
        stime = time.time()
        f0 = gen_images_with_noise(fixed_noise[:256])
        save_images(
            f0.reshape(-1, cfg.iDimsC, 32, 32).transpose([0, 2, 3, 1]),
            [16, 16], sSampleDir + 'gen_image16x16_%d.png' % iter)
Пример #2
0
def main(args):
    print("==> using settings {}".format(args))

    num_workers = 8
    num_epochs = args.num_epochs
    img_dir_path = args.img_dir_path

    cudnn.benchmark = True
    device = torch.device("cuda")

    h_dim = args.h_dim
    img_size = args.img_size
    batch_size = args.batch_size
    lr = 0.0001
    betas = (0.0, 0.9)

    transform = transforms.Compose([
        transforms.Scale(img_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    train_loader = torch.utils.data.DataLoader(ImageFolder(img_dir_path, 'train.txt', transform), batch_size=batch_size,
        num_workers=int(num_workers), shuffle=True, pin_memory=True, drop_last=True)

    val_loader = torch.utils.data.DataLoader(ImageFolder(img_dir_path, 'test.txt', transform, step=32),
        batch_size=batch_size, num_workers=int(num_workers), shuffle=False, pin_memory=True, drop_last=True)

    model_gen = ResUNetG(img_size, h_dim, img_dim=3, norm_dim=3)
    model_dis = NetD(img_size, input_dim=6)

    model_gen = torch.nn.DataParallel(model_gen).to(device)
    model_dis = torch.nn.DataParallel(model_dis).to(device)

    model_gen.apply(init_weights)
    model_dis.apply(init_weights)

    optim_gen = optim.Adam(model_gen.parameters(), lr=lr, betas=betas)
    optim_dis = optim.Adam(model_dis.parameters(), lr=lr, betas=betas)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            model_gen.load_state_dict(checkpoint['gen_state_dict'])
            model_dis.load_state_dict(checkpoint['dis_state_dict'])
            optim_gen.load_state_dict(checkpoint['gen_optim'])
            optim_dis.load_state_dict(checkpoint['dis_optim'])
            print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
            out_dir_path = os.path.dirname(args.resume)
            logger = Logger(os.path.join(out_dir_path, 'log.txt'), resume=True)
        else:
            raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
    else:
        start_epoch = 0
        out_dir_path = os.path.join('checkpoints', datetime.datetime.now().isoformat())

        if not os.path.exists(out_dir_path):
            os.makedirs(out_dir_path)
            print('Make output dir: {}'.format(out_dir_path))

        logger = Logger(os.path.join(out_dir_path, 'log.txt'))
        logger.set_names(['Epoch', 'Train Loss G', 'Train Loss D'])

    for epoch in range(start_epoch, start_epoch + num_epochs):
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

        # train for one epoch
        loss_gen, loss_dis = train(train_loader, model_gen, model_dis, optim_gen, optim_dis, device)

        # append logger file
        logger.append([epoch + 1, loss_gen, loss_dis])

        if (epoch + 1) % args.snapshot == 0:
            # validate
            validate(val_loader, model_gen, device, os.path.join(out_dir_path, 'epoch_{:04d}'.format(epoch + 1)))

            # save checkpoint
            save_checkpoint({
                'epoch': epoch + 1,
                'gen_state_dict': model_gen.state_dict(),
                'dis_state_dict': model_dis.state_dict(),
                'gen_optim': optim_gen.state_dict(),
                'dis_optim': optim_dis.state_dict()
            }, checkpoint=out_dir_path)

    logger.close()
    logger.plot(['Train Loss G', 'Train Loss D'])
    savefig(os.path.join(out_dir_path, 'log.eps'))