Пример #1
0
        m.bias.data.fill_(0)


resume_epoch = 0

netG = _netG(opt)
netG.apply(weights_init)
if opt.netG != '':
    netG.load_state_dict(
        torch.load(
            opt.netG,
            map_location=lambda storage, location: storage)['state_dict'])
    resume_epoch = torch.load(opt.netG)['epoch']
print(netG)

netD = _netlocalD(opt)
netD.apply(weights_init)
if opt.netD != '':
    netD.load_state_dict(
        torch.load(
            opt.netD,
            map_location=lambda storage, location: storage)['state_dict'])
    resume_epoch = torch.load(opt.netD)['epoch']
print(netD)

criterion = nn.BCELoss()
criterionMSE = nn.MSELoss()

input_real = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
input_cropped = torch.FloatTensor(opt.batchSize, 3, opt.imageSize,
                                  opt.imageSize)
Пример #2
0
# 恢复到指定 epoch
resume_epoch = 0

netG = _netG(opt)  # 编码器+判别器网络
netG.apply(weights_init)  # 初始化权重

if opt.netG != '':  # 可选导入模型
    netG.load_state_dict(
        torch.load(
            opt.netG,
            map_location=lambda storage, location: storage)['state_dict'])
    resume_epoch = torch.load(opt.netG)['epoch']

print(netG)

netD = _netlocalD(opt)  # 判别器网络
netD.apply(weights_init)  # 初始化判别器

if opt.netD != '':  # 可选导入模型
    netD.load_state_dict(
        torch.load(
            opt.netD,
            map_location=lambda storage, location: storage)['state_dict'])
    resume_epoch = torch.load(opt.netD)['epoch']

print(netD)

criterion = nn.BCELoss()  # Binary Cross Entropy
criterionMSE = nn.MSELoss()  # squared L2 norm

input_real = torch.Tensor(opt.batchSize, 3, opt.imageSize,
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


resume_epoch=0

netG = _netG(opt)
netG.apply(weights_init)
if opt.netG != '':
    netG.load_state_dict(torch.load(opt.netG,map_location=lambda storage, location: storage)['state_dict'])
    resume_epoch = torch.load(opt.netG)['epoch']
print(netG)


netD = _netlocalD(opt)
netD.apply(weights_init)
if opt.netD != '':
    netD.load_state_dict(torch.load(opt.netD,map_location=lambda storage, location: storage)['state_dict'])
    resume_epoch = torch.load(opt.netD)['epoch']
print(netD)

criterion = nn.BCELoss()
criterionMSE = nn.MSELoss()

input_real = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
input_cropped = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
Пример #4
0
def main():
    try:
        os.makedirs("result/train/cropped")
        os.makedirs("result/train/real")
        os.makedirs("result/train/recon")
        os.makedirs("model")
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    if opt.dataset in ['imagenet', 'folder', 'lfw']:
        # folder dataset
        dataset = dset.ImageFolder(root=opt.dataroot,
                                   transform=transforms.Compose([
                                       transforms.Resize(opt.imageSize),
                                       transforms.CenterCrop(opt.imageSize),
                                       transforms.ToTensor(),
                                       transforms.Normalize((0.5, 0.5, 0.5),
                                                            (0.5, 0.5, 0.5)),
                                   ]))
    elif opt.dataset == 'lsun':
        dataset = dset.LSUN(db_path=opt.dataroot,
                            classes=['bedroom_train'],
                            transform=transforms.Compose([
                                transforms.Resize(opt.imageSize),
                                transforms.CenterCrop(opt.imageSize),
                                transforms.ToTensor(),
                                transforms.Normalize((0.5, 0.5, 0.5),
                                                     (0.5, 0.5, 0.5)),
                            ]))
    elif opt.dataset == 'cifar10':
        dataset = dset.CIFAR10(root=opt.dataroot,
                               download=True,
                               transform=transforms.Compose([
                                   transforms.Resize(opt.imageSize),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5),
                                                        (0.5, 0.5, 0.5)),
                               ]))
    elif opt.dataset == 'streetview':
        transform = transforms.Compose([
            transforms.Resize(opt.imageSize),
            transforms.CenterCrop(opt.imageSize),
            transforms.ToTensor(),
            transforms.Normalize((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))
        ])  #none normalize
        dataset = dset.ImageFolder(root=opt.dataroot, transform=transform)
    assert dataset
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 3
    nef = int(opt.nef)
    nBottleneck = int(opt.nBottleneck)
    wtl2 = float(opt.wtl2)
    overlapL2Weight = 10

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    resume_epoch = 0

    netG = _netG(opt)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(
            torch.load(
                opt.netG,
                map_location=lambda storage, location: storage)['state_dict'])
        resume_epoch = torch.load(opt.netG)['epoch']
    print(netG)

    netD = _netlocalD(opt)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(
            torch.load(
                opt.netD,
                map_location=lambda storage, location: storage)['state_dict'])
        resume_epoch = torch.load(opt.netD)['epoch']
    print(netD)

    criterion = nn.BCELoss()
    criterionMSE = nn.MSELoss()

    input_real = torch.FloatTensor(opt.batchSize, 3, opt.imageSize,
                                   opt.imageSize)
    input_cropped = torch.FloatTensor(opt.batchSize, 3, opt.imageSize,
                                      opt.imageSize)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    print(opt.batchSize)
    print(opt.imageSize)

    real_center = torch.FloatTensor(int(opt.batchSize), 3,
                                    int(opt.imageSize / 2),
                                    int(opt.imageSize / 2))
    #real_center = torch.FloatTensor(64, 3, 64,64)

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        criterionMSE.cuda()
        input_real, input_cropped, label = input_real.cuda(
        ), input_cropped.cuda(), label.cuda()
        real_center = real_center.cuda()

    input_real = Variable(input_real)
    input_cropped = Variable(input_cropped)
    label = Variable(label)

    real_center = Variable(real_center)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(resume_epoch, opt.niter):
        #jittering add
        randwf = random.uniform(-1.0, 1.0)
        randhf = random.uniform(-1.0, 1.0)
        if opt.jittering:
            jitterSizeW = int(opt.imageSize / 5 * randwf)
            jitterSizeH = int(opt.imageSize / 5 * randhf)
            print("jittering : W > ", jitterSizeW, " H >", jitterSizeH)
        else:
            jitterSizeW = 0
            jitterSizeH = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            real_center_cpu = real_cpu[:, :,
                                       int(opt.imageSize / 4 +
                                           jitterSizeW):int(opt.imageSize / 4 +
                                                            opt.imageSize / 2 +
                                                            jitterSizeW),
                                       int(opt.imageSize / 4 +
                                           jitterSizeH):int(opt.imageSize / 4 +
                                                            opt.imageSize / 2 +
                                                            jitterSizeH)]
            batch_size = real_cpu.size(0)
            input_real.data.resize_(real_cpu.size()).copy_(real_cpu)
            input_cropped.data.resize_(real_cpu.size()).copy_(real_cpu)
            real_center.data.resize_(
                real_center_cpu.size()).copy_(real_center_cpu)
            input_cropped.data[:, 0,
                               int(opt.imageSize / 4 + opt.overlapPred +
                                   jitterSizeW):int(opt.imageSize / 4 +
                                                    opt.imageSize / 2 -
                                                    opt.overlapPred +
                                                    jitterSizeW),
                               int(opt.imageSize / 4 + opt.overlapPred +
                                   jitterSizeH):
                               int(opt.imageSize / 4 + opt.imageSize / 2 -
                                   opt.overlapPred +
                                   jitterSizeH)] = 2 * 117.0 / 255.0 - 1.0
            input_cropped.data[:, 1,
                               int(opt.imageSize / 4 + opt.overlapPred +
                                   jitterSizeW):int(opt.imageSize / 4 +
                                                    opt.imageSize / 2 -
                                                    opt.overlapPred +
                                                    jitterSizeW),
                               int(opt.imageSize / 4 + opt.overlapPred +
                                   jitterSizeH):
                               int(opt.imageSize / 4 + opt.imageSize / 2 -
                                   opt.overlapPred +
                                   jitterSizeH)] = 2 * 104.0 / 255.0 - 1.0
            input_cropped.data[:, 2,
                               int(opt.imageSize / 4 + opt.overlapPred +
                                   jitterSizeW):int(opt.imageSize / 4 +
                                                    opt.imageSize / 2 -
                                                    opt.overlapPred +
                                                    jitterSizeW),
                               int(opt.imageSize / 4 + opt.overlapPred +
                                   jitterSizeH):
                               int(opt.imageSize / 4 + opt.imageSize / 2 -
                                   opt.overlapPred +
                                   jitterSizeH)] = 2 * 104.0 / 255.0 - 1.0

            # train with real
            netD.zero_grad()
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(real_center)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            # noise.data.resize_(batch_size, nz, 1, 1)
            # noise.data.normal_(0, 1)
            fake = netG(input_cropped)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            label.data.fill_(
                real_label)  # fake labels are real for generator cost
            output = netD(fake)
            errG_D = criterion(output, label)
            # errG_D.backward(retain_variables=True)

            # errG_l2 = criterionMSE(fake,real_center)
            wtl2Matrix = real_center.clone()
            wtl2Matrix.data.fill_(wtl2 * overlapL2Weight)
            wtl2Matrix.data[:, :,
                            int(opt.overlapPred):int(opt.imageSize / 2 -
                                                     opt.overlapPred),
                            int(opt.overlapPred):int(opt.imageSize / 2 -
                                                     opt.overlapPred)] = wtl2

            errG_l2 = (fake - real_center).pow(2)
            errG_l2 = errG_l2 * wtl2Matrix
            errG_l2 = errG_l2.mean()

            errG = (1 - wtl2) * errG_D + wtl2 * errG_l2

            errG.backward()

            D_G_z2 = output.data.mean()
            optimizerG.step()

            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f / %.4f l_D(x): %.4f l_D(G(z)): %.4f'
                % (
                    epoch,
                    opt.niter,
                    i,
                    len(dataloader),
                    errD.data[0],
                    errG_D.data[0],
                    errG_l2.data[0],
                    D_x,
                    D_G_z1,
                ))
            if i % 100 == 0:
                vutils.save_image(
                    real_cpu,
                    'result/train/real/real_samples_epoch_%03d.png' % (epoch))
                vutils.save_image(
                    input_cropped.data,
                    'result/train/cropped/cropped_samples_epoch_%03d.png' %
                    (epoch))
                recon_image = input_cropped.clone()
                recon_image.data[:, :,
                                 int(opt.imageSize / 4 +
                                     jitterSizeW):int(opt.imageSize / 4 +
                                                      opt.imageSize / 2 +
                                                      jitterSizeW),
                                 int(opt.imageSize / 4 +
                                     jitterSizeH):int(opt.imageSize / 4 +
                                                      opt.imageSize / 2 +
                                                      jitterSizeH)] = fake.data
                vutils.save_image(
                    recon_image.data,
                    'result/train/recon/recon_center_samples_epoch_%03d.png' %
                    (epoch))

        # do checkpointing
        torch.save({
            'epoch': epoch + 1,
            'state_dict': netG.state_dict()
        }, 'model/netG_streetview.pth')
        torch.save({
            'epoch': epoch + 1,
            'state_dict': netD.state_dict()
        }, 'model/netlocalD.pth')