# -------------delete previous model checkpoints --------------
    checkpoint_path = join(opt.checkpoints_dir, opt.name)
    if os.path.exists(checkpoint_path): shutil.rmtree(checkpoint_path)

    # ----------------------- data preparation -------------------
    # datadir_syn = join(datadir, 'VOCdevkit/VOC2012/PNGImages')
    # datadir_real = join(datadir, 'real_train')
    datadir_unaligned = join(datadir, 'unaligned', 'unaligned_train400')

    # train_dataset = datasets.CEILDataset(datadir_syn, read_fns('VOC2012_224_train_png.txt'), size=opt.max_dataset_size)
    # train_dataset_real = datasets.CEILTestDataset(datadir_real, enable_transforms=True)
    train_dataset_unaligned = datasets.CEILTestDataset(datadir_unaligned, enable_transforms=True, flag={'unaligned':True}, size=None)
    # train_dataset_fusion = datasets.FusionDataset([train_dataset, train_dataset_unaligned, train_dataset_real], [0.25,0.5,0.25])

    train_dataloader_fusion = datasets.DataLoader(
        train_dataset_unaligned, batch_size=opt.batchSize, shuffle=not opt.serial_batches,  # train_dataset_fusion
        num_workers=opt.nThreads, pin_memory=True)

    eval_dataset_ceilnet = datasets.CEILTestDataset(join(datadir, 'testdata_CEILNET_table2'))

    eval_dataset_real = datasets.CEILTestDataset(
        join(datadir, 'real20'),
        fns=read_fns('real_test.txt'))

    eval_dataloader_ceilnet = datasets.DataLoader(
        eval_dataset_ceilnet, batch_size=1, shuffle=False,
        num_workers=opt.nThreads, pin_memory=True)

    eval_dataloader_real = datasets.DataLoader(
        eval_dataset_real, batch_size=1, shuffle=False,
        num_workers=opt.nThreads, pin_memory=True)
示例#2
0
datadir = '/media/kaixuan/DATA/Papers/Code/Data/Reflection/'

datadir_syn = join(datadir, 'VOCdevkit/VOC2012/PNGImages')
datadir_real = join(datadir, 'real_train')
datadir_unaligned = join(datadir, 'unaligned', 'unaligned_train250')

train_dataset = datasets.CEILDataset(datadir_syn, read_fns('VOC2012_224_train_png.txt'), size=opt.max_dataset_size)
train_dataset_real = datasets.CEILTestDataset(datadir_real, enable_transforms=True)

train_dataset_unaligned = datasets.CEILTestDataset(datadir_unaligned, enable_transforms=True, flag={'unaligned':True}, size=None)

train_dataset_fusion = datasets.FusionDataset([train_dataset, train_dataset_unaligned, train_dataset_real], [0.25,0.5,0.25])


train_dataloader_fusion = datasets.DataLoader(
    train_dataset_fusion, batch_size=opt.batchSize, shuffle=not opt.serial_batches, 
    num_workers=opt.nThreads, pin_memory=True)


engine = Engine(opt)
"""Main Loop"""
def set_learning_rate(lr):
    for optimizer in engine.model.optimizers:
        util.set_opt_param(optimizer, 'lr', lr)


set_learning_rate(1e-4)
while engine.epoch < 80:
    if engine.epoch == 65:
        set_learning_rate(5e-5)
    if engine.epoch == 70:
示例#3
0
                                             fns=read_fns('real_test.txt'),
                                             size=20)

eval_dataset_postcard = datasets.CEILTestDataset(join(datadir, 'postcard'))
eval_dataset_solidobject = datasets.CEILTestDataset(
    join(datadir, 'solidobject'))

# test_dataset_internet = datasets.RealDataset(join(datadir, 'internet'))
# test_dataset_unaligned300 = datasets.RealDataset(join(datadir, 'refined_unaligned_data/unaligned300/blended'))
# test_dataset_unaligned150 = datasets.RealDataset(join(datadir, 'refined_unaligned_data/unaligned150/blended'))
# test_dataset_unaligned_dynamic = datasets.RealDataset(join(datadir, 'refined_unaligned_data/unaligned_dynamic/blended'))
# test_dataset_sir2 = datasets.RealDataset(join(datadir, 'sir2_wogt/blended'))

eval_dataloader_ceilnet = datasets.DataLoader(eval_dataset_ceilnet,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=opt.nThreads,
                                              pin_memory=True)

eval_dataloader_real = datasets.DataLoader(eval_dataset_real,
                                           batch_size=1,
                                           shuffle=False,
                                           num_workers=opt.nThreads,
                                           pin_memory=True)

eval_dataloader_sir2 = datasets.DataLoader(eval_dataset_sir2,
                                           batch_size=1,
                                           shuffle=False,
                                           num_workers=opt.nThreads,
                                           pin_memory=True)
if __name__ == "__main__":
    mp.set_start_method('spawn')

    opt = TrainOptions().parse()
    opt.isTrain = False
    cudnn.benchmark = True
    opt.no_log = True
    opt.display_id = 0
    opt.verbose = False

    datadir = opt.root_dir  # datadir = '/media/kaixuan/DATA/Papers/Code/Data/Reflection/'

    test_dataset_unaligned = datasets.RealDataset(
        join(datadir, 'unaligned/unaligned_test50/blended'))
    test_dataloader_unaligned = datasets.DataLoader(test_dataset_unaligned,
                                                    batch_size=1,
                                                    shuffle=False,
                                                    num_workers=opt.nThreads,
                                                    pin_memory=True)

    engine = Engine(opt)
    """Main Loop"""
    result_dir = join(datadir, 'results/' + opt.name)
    if os.path.exists(result_dir):  # delete previous model results
        shutil.rmtree(result_dir)
        print(result_dir, 'removed')
    os.mkdir(result_dir)

    res = engine.test(test_dataloader_unaligned,
                      savedir=join(result_dir, 'unaligned_test50'))
示例#5
0
    opt.no_log = True
    opt.display_id = 0
    opt.verbose = False

    datadir = opt.root_dir

    # ----------------------- data preparation -------------------
    eval_dataset_ceilnet = datasets.CEILTestDataset(
        join(datadir, 'testdata_CEILNET_table2'))

    eval_dataset_real = datasets.CEILTestDataset(join(datadir, 'real20'),
                                                 fns=read_fns('real_test.txt'))

    eval_dataloader_ceilnet = datasets.DataLoader(eval_dataset_ceilnet,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=opt.nThreads,
                                                  pin_memory=True)

    eval_dataloader_real = datasets.DataLoader(eval_dataset_real,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=opt.nThreads,
                                               pin_memory=True)

    # -------------------- engine start ---------------------------
    best_val_loss = 1e-6
    for root, _, fnames in sorted(os.walk(join(opt.checkpoints_dir,
                                               opt.name))):
        for fname in fnames:
            if fname.endswith('.pt') and 'best' not in fname:
示例#6
0
    join(datadir, 'real20'),
    fns=read_fns('real_test.txt'),
    size=20)

eval_dataset_postcard = datasets.CEILTestDataset(join(datadir, 'postcard'))
eval_dataset_solidobject = datasets.CEILTestDataset(join(datadir, 'solidobject'))

test_dataset_internet = datasets.RealDataset(join(datadir, 'internet'))
test_dataset_unaligned300 = datasets.RealDataset(join(datadir, 'refined_unaligned_data/unaligned300/blended'))
test_dataset_unaligned150 = datasets.RealDataset(join(datadir, 'refined_unaligned_data/unaligned150/blended'))
test_dataset_unaligned_dynamic = datasets.RealDataset(join(datadir, 'refined_unaligned_data/unaligned_dynamic/blended'))
test_dataset_sir2 = datasets.RealDataset(join(datadir, 'sir2_wogt/blended'))


eval_dataloader_ceilnet = datasets.DataLoader(
    eval_dataset_ceilnet, batch_size=1, shuffle=False,
    num_workers=opt.nThreads, pin_memory=True)

eval_dataloader_real = datasets.DataLoader(
    eval_dataset_real, batch_size=1, shuffle=False,
    num_workers=opt.nThreads, pin_memory=True)

eval_dataloader_sir2 = datasets.DataLoader(
    eval_dataset_sir2, batch_size=1, shuffle=False,
    num_workers=opt.nThreads, pin_memory=True)

eval_dataloader_solidobject = datasets.DataLoader(
    eval_dataset_solidobject, batch_size=1, shuffle=False,
    num_workers=opt.nThreads, pin_memory=True)

eval_dataloader_postcard = datasets.DataLoader(