def main():
    # parse options
    parser = TrainSketchOptions()
    opts = parser.parse()

    # create model
    print('--- create model ---')
    netSketch = SketchModule(opts.GB_nlayers, opts.DB_nlayers, opts.GB_nf, opts.DB_nf, opts.gpu)
    if opts.gpu:
        netSketch.cuda()
    netSketch.init_networks(weights_init)
    netSketch.train()

    print('--- training ---')
    for epoch in range(opts.epochs):
        itr = 0
        fnames = load_train_batchfnames(opts.text_path, opts.batchsize, 
                                        opts.text_datasize, trainnum=opts.Btraining_num)
        fnames2 = load_train_batchfnames(opts.augment_text_path, opts.batchsize, 
                                        opts.augment_text_datasize, trainnum=opts.Btraining_num)
        for ii in range(len(fnames)):
            fnames[ii][0:opts.batchsize//2-1] = fnames2[ii][0:opts.batchsize//2-1]
        for fname in fnames:
            itr += 1
            t = prepare_text_batch(fname, anglejitter=True)
            t = to_var(t) if opts.gpu else t
            losses = netSketch.one_pass(t, [l/4.-1. for l in range(0,9)])      
            print('Epoch [%d/%d][%03d/%03d]' %(epoch+1, opts.epochs,itr,len(fnames)), end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f'%(losses[0], losses[1], losses[2]))

    print('--- save ---')
    # directory
    torch.save(netSketch.state_dict(), opts.save_GB_name)    
예제 #2
0
def main():
    # parse options
    parser = TrainShapeMatchingOptions()
    opts = parser.parse()

    # create model
    print('--- create model ---')
    netShapeM = ShapeMatchingGAN(opts.GS_nlayers, opts.DS_nlayers, opts.GS_nf, opts.DS_nf,
                     opts.GT_nlayers, opts.DT_nlayers, opts.GT_nf, opts.DT_nf, opts.gpu)
    netSketch = SketchModule(opts.GB_nlayers, opts.DB_nlayers, opts.GB_nf, opts.DB_nf, opts.gpu)

    if opts.gpu:
        netShapeM.cuda()
        netSketch.cuda()
    netShapeM.init_networks(weights_init)
    netShapeM.train()

    netSketch.load_state_dict(torch.load(opts.load_GB_name))
    netSketch.eval()

    print('--- training ---')
    # load image pair
    scales = [l*2.0/(opts.scale_num-1)-1 for l in range(opts.scale_num)]
    Xl, X, _, Noise = load_style_image_pair(opts.style_name, scales, netSketch, opts.gpu)
    Xl = [to_var(a) for a in Xl] if opts.gpu else Xl
    X = to_var(X) if opts.gpu else X
    Noise = to_var(Noise) if opts.gpu else Noise
    for epoch in range(opts.step1_epochs):
        for i in range(opts.Straining_num//opts.batchsize):
            idx = opts.scale_num-1
            xl, x = cropping_training_batches(Xl[idx], X, Noise, opts.batchsize, 
                                      opts.Sanglejitter, opts.subimg_size, opts.subimg_size)
            losses = netShapeM.structure_one_pass(x, xl, scales[idx])
            print('Step1, Epoch [%02d/%02d][%03d/%03d]' %(epoch+1, opts.step1_epochs, i+1, 
                                                          opts.Straining_num//opts.batchsize), end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f'%(losses[0], losses[1], losses[2], losses[3]))
    netShapeM.G_S.myCopy()
    for epoch in range(opts.step2_epochs):
        for i in range(opts.Straining_num//opts.batchsize):
            idx = random.choice([0, opts.scale_num-1])
            xl, x = cropping_training_batches(Xl[idx], X, Noise, opts.batchsize, 
                                      opts.Sanglejitter, opts.subimg_size, opts.subimg_size)
            losses = netShapeM.structure_one_pass(x, xl, scales[idx])
            print('Step2, Epoch [%02d/%02d][%03d/%03d]' %(epoch+1, opts.step2_epochs, i+1, 
                                                          opts.Straining_num//opts.batchsize), end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f'%(losses[0], losses[1], losses[2], losses[3]))
    for epoch in range(opts.step3_epochs):
        for i in range(opts.Straining_num//opts.batchsize):
            idx = random.choice(range(opts.scale_num))
            xl, x = cropping_training_batches(Xl[idx], X, Noise, opts.batchsize, 
                                      opts.Sanglejitter, opts.subimg_size, opts.subimg_size)
            losses = netShapeM.structure_one_pass(x, xl, scales[idx])  
            print('Step3, Epoch [%02d/%02d][%03d/%03d]' %(epoch+1, opts.step3_epochs, i+1, 
                                                          opts.Straining_num//opts.batchsize), end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f'%(losses[0], losses[1], losses[2], losses[3]))
    if opts.glyph_preserve:
        fnames = load_train_batchfnames(opts.text_path, opts.batchsize, 
                                        opts.text_datasize, opts.Straining_num)
        for epoch in range(opts.step4_epochs):
            itr = 0
            for fname in fnames:
                itr += 1
                t = prepare_text_batch(fname, anglejitter=False)
                idx = random.choice(range(opts.scale_num))
                xl, x = cropping_training_batches(Xl[idx], X, Noise, opts.batchsize, 
                                          opts.Sanglejitter, opts.subimg_size, opts.subimg_size)
                t = to_var(x) if opts.gpu else t
                losses = netShapeM.structure_one_pass(x, xl, scales[idx], t)  
                print('Step4, Epoch [%02d/%02d][%03d/%03d]' %(epoch+1, opts.step4_epochs, itr+1, 
                                                          len(fnames)), end=': ')
                print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f'%(losses[0], losses[1], losses[2], losses[3])) 

    print('--- save ---')
    # directory
    netShapeM.save_structure_model(opts.save_path, opts.save_name)    
opts.Btraining_num = 25600

ep_320 = 2
ep_256 = 1
ep_192 = 0
ep_128 = 3
ep_64 = 1
ep_32 = 2

# fnames, fnames2 = custom_load_train_batchfnames(opts.text_path, opts.augment_text_path, opts.batchsize, opts.text_datasize, opts.augment_text_datasize, trainnum = opts.Btraining_num)

pil2tensor = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5])])
tensor2pil = transforms.ToPILImage()

print('--- create model ---')
netSketch = SketchModule(opts.GB_nlayers, opts.DB_nlayers, opts.GB_nf, opts.DB_nf, opts.gpu)
if opts.gpu:
    netSketch.cuda()
netSketch.init_networks(weights_init)
netSketch.train()

print('--- training ---')

##########################################################################################################

for epoch in range(ep_320):

    opts.batchsize = 16
    opts.Btraining_num = 12800

    curr_time = time.time()
def main():
    # parse options
    parser = TrainShapeMatchingOptions()
    opts = parser.parse()

    # create model
    print('--- create model ---')
    # 6,4,32,32,6,4,32,32,False
    netShape_M_GAN = ShapeMatchingGAN(opts.GS_nlayers, opts.DS_nlayers,
                                      opts.GS_nf, opts.DS_nf, opts.GT_nlayers,
                                      opts.DT_nlayers, opts.GT_nf, opts.DT_nf,
                                      opts.gpu)
    # 6, 5, 32, 32
    netSketch = SketchModule(opts.GB_nlayers, opts.DB_nlayers, opts.GB_nf,
                             opts.DB_nf, opts.gpu)

    if opts.gpu:
        netShape_M_GAN.cuda()
        netSketch.cuda()
    netShape_M_GAN.init_networks(weights_init)
    netShape_M_GAN.train()

    netSketch.load_state_dict(torch.load(opts.load_GB_name))
    netSketch.eval()  # 不训练SketchModule

    print('--- training ---')
    # load image pair
    scales = [
        l * 2.0 / (opts.scale_num - 1) - 1 for l in range(opts.scale_num)
    ]  # opts.scale_num默认值 4
    # [-1.0, -0.33333333333333337, 0.33333333333333326, 1.0]

    # 使用已训练的 netSketch!!!
    Xl, X, _, Noise = load_style_image_pair(opts.style_name, scales, netSketch,
                                            opts.gpu)
    """
    Xl: 经过SketchModule的不同模糊程度的 4 个(scale_num默认值 4)距离图像 X
    Xl[0] -- scales[0] -- -1.0
    Xl[3] -- scales[3] -- 1.0
    X:风格图像的距离图像  shape [1, 3, 740图像高度, 1000图像宽度]
    Noise: 噪声?均值.0,方差.2,shape [1, 3, 740图像高度, 1000图像宽度]
    """
    Xl = [to_var(a) for a in Xl] if opts.gpu else Xl
    X = to_var(X) if opts.gpu else X
    Noise = to_var(Noise) if opts.gpu else Noise

    for epoch in range(opts.step1_epochs):  # 默认 30
        for i in range(opts.Straining_num //
                       opts.batchsize):  # 2560 // 32 = 80
            # 论文5.1 :首先以固定的 l=1 训练G_S ,以学习最大变形。。。。。。。。
            idx = opts.scale_num - 1  # 3
            xl, x = cropping_training_batches(Xl[idx], X, Noise,
                                              opts.batchsize,
                                              opts.Sanglejitter,
                                              opts.subimg_size,
                                              opts.subimg_size)
            # xl与x裁剪的坐标是相同的。
            # xl是加入了一些噪声的自Xl[idx]随机裁剪出的 32 个 大小为 256x256 的xl图像 [32, 3, 256, 256]
            # x就是输入的Output的随机裁剪/选择后的结果,也就是原距离图像随机裁剪/选择后的,与 xl shape 相同 [32, 3, 256, 256]

            losses = netShape_M_GAN.structure_one_pass(x, xl, scales[idx])
            print('Step1, Epoch [%02d/%02d][%03d/%03d]' %
                  (epoch + 1, opts.step1_epochs, i + 1,
                   opts.Straining_num // opts.batchsize),
                  end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f' %
                  (losses[0], losses[1], losses[2], losses[3]))

    netShape_M_GAN.G_S.myCopy()

    for epoch in range(opts.step2_epochs):  # 40
        for i in range(opts.Straining_num //
                       opts.batchsize):  # 2560 // 32 = 80
            idx = random.choice([0, opts.scale_num - 1])  # 0 或 3
            xl, x = cropping_training_batches(Xl[idx], X, Noise,
                                              opts.batchsize,
                                              opts.Sanglejitter,
                                              opts.subimg_size,
                                              opts.subimg_size)
            losses = netShape_M_GAN.structure_one_pass(x, xl, scales[idx])
            print('Step2, Epoch [%02d/%02d][%03d/%03d]' %
                  (epoch + 1, opts.step2_epochs, i + 1,
                   opts.Straining_num // opts.batchsize),
                  end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f' %
                  (losses[0], losses[1], losses[2], losses[3]))

    for epoch in range(opts.step3_epochs):
        for i in range(opts.Straining_num // opts.batchsize):
            idx = random.choice(range(opts.scale_num))  # 0,1,2,3
            xl, x = cropping_training_batches(Xl[idx], X, Noise,
                                              opts.batchsize,
                                              opts.Sanglejitter,
                                              opts.subimg_size,
                                              opts.subimg_size)
            losses = netShape_M_GAN.structure_one_pass(x, xl, scales[idx])
            print('Step3, Epoch [%02d/%02d][%03d/%03d]' %
                  (epoch + 1, opts.step3_epochs, i + 1,
                   opts.Straining_num // opts.batchsize),
                  end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f' %
                  (losses[0], losses[1], losses[2], losses[3]))

    # glyph_preserve 默认False,如果是True那么复杂结构字的论文效果会比不加更好吗?
    if opts.glyph_preserve:
        fnames = load_train_batchfnames(opts.text_path, opts.batchsize,
                                        opts.text_datasize, opts.Straining_num)
        for epoch in range(opts.step4_epochs):
            itr = 0
            for fname in fnames:
                itr += 1
                t = prepare_text_batch(fname, anglejitter=False)
                idx = random.choice(range(opts.scale_num))
                xl, x = cropping_training_batches(Xl[idx], X, Noise,
                                                  opts.batchsize,
                                                  opts.Sanglejitter,
                                                  opts.subimg_size,
                                                  opts.subimg_size)
                t = to_var(x) if opts.gpu else t
                losses = netShape_M_GAN.structure_one_pass(
                    x, xl, scales[idx], t)
                print('Step4, Epoch [%02d/%02d][%03d/%03d]' %
                      (epoch + 1, opts.step4_epochs, itr + 1, len(fnames)),
                      end=': ')
                print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f, Lgly: %+.3f' %
                      (losses[0], losses[1], losses[2], losses[3]))

    print('--- save ---')
    # directory
    netShape_M_GAN.save_structure_model(opts.save_path, opts.save_name)
예제 #5
0
def main():
    # parse options
    parser = TrainSketchOptions()
    opts = parser.parse()
    """
    opts.GB_nlayers = 6 生成器的层数
    opts.DB_nlayers = 5 判别器的层数
    opts.GB_nf = 32 生成器第一层中的特征数量
    opts.DB_nf = 32 判别器第一层中的特征数量
    opts.epochs = 3
    """
    # create model
    print('--- create model ---')
    netSketch = SketchModule(opts.GB_nlayers, opts.DB_nlayers, opts.GB_nf,
                             opts.DB_nf, opts.gpu)
    if opts.gpu:
        netSketch.cuda()
    netSketch.init_networks(weights_init)
    netSketch.train()
    # print(netSketch)
    print('--- training ---')
    for epoch in range(opts.epochs):
        itr = 0
        """
        text_path = ../data/rawtext/yaheiB/train
        batchsize = 16
        text_datasize = 708
        Btraining_num = 12800
        """
        # 按批次划分的文件名,没有的文件也不会报错,(trainnum // batch_size) X (batch_size) 矩阵
        f_names = load_train_batchfnames(opts.text_path,
                                         opts.batchsize,
                                         opts.text_datasize,
                                         trainnum=opts.Btraining_num)
        """
        augment_text_path = ../data/rawtext/augment/
        batchsize = 16
        augment_text_datasize = 5
        """
        f_names2 = load_train_batchfnames(opts.augment_text_path,
                                          opts.batchsize,
                                          opts.augment_text_datasize,
                                          trainnum=opts.Btraining_num)

        # fnames的每个数组里(即每个批次的)前 (opts.batchsize // 2 - 1) 为增强数据
        for ii in range(len(f_names)):
            f_names[ii][0:opts.batchsize // 2 -
                        1] = f_names2[ii][0:opts.batchsize // 2 - 1]
        # print(f'fnames2[1]: {fnames2[0]}')  # OK
        for fname in f_names:  # 每次循环是一个批次大小,即fname有batch_size个元素
            itr += 1
            t = prepare_text_batch(fname, anglejitter=True)
            t = to_var(t) if opts.gpu else t
            control_l = [e / 4. - 1. for e in range(0, 9)]
            # t的shape为[batch_size, 3, 256, 256])
            # control_l为[-1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0]
            losses = netSketch.one_pass(t, control_l)
            print('Epoch [%d/%d][%03d/%03d]' %
                  (epoch + 1, opts.epochs, itr, len(f_names)),
                  end=': ')
            print('LDadv: %+.3f, LGadv: %+.3f, Lrec: %+.3f' %
                  (losses[0], losses[1], losses[2]))

    print('--- save ---')
    # directory
    torch.save(netSketch.state_dict(), opts.save_GB_name)
        raise Exception('Please provide 3 commandline arguments. ckpt testing_dir destination_dir')


    testing_directory = sys.argv[2] 
    destination_directory = sys.argv[3] 
    ckpt_file = sys.argv[1]


    if os.path.exists(destination_directory):
        print('REMOVING DIRECTORY')
        shutil.rmtree(destination_directory)

    print('CREATING DIRECTORY')
    os.mkdir(destination_directory)

    netSketch = SketchModule(opts.GB_nlayers, opts.DB_nlayers, opts.GB_nf, opts.DB_nf, opts.gpu)
    if opts.gpu:
        netSketch.cuda()
    netSketch.init_networks(weights_init)
    netSketch.train()

    netSketch.load_state_dict(torch.load(ckpt_file))


    netSketch.eval()

    list_of_files = [os.path.join(testing_directory, i) for i in os.listdir(testing_directory)]

    for file in list_of_files:
        print('PROCESSING == ', file)
        I = load_image(file)