Exemple #1
0
def test_V3Model():
    from data.data_loader import CreateDataLoader
    from options.multimodal_gan_options_v3 import TrainMMGANOptions_V3 as Option
    from models.multimodal_designer_gan_model_v3 import MultimodalDesignerGAN_V3 as Model
    from misc.visualizer import GANVisualizer_V3

    opt = Option().parse(
        '--debug --batch_size 1 --nThreads 1 --shape_encode flx_seg --shape_with_face 1'
    )
    data = iter(CreateDataLoader(opt)).next()

    visualizer = GANVisualizer_V3(opt)

    model = Model()
    model.initialize(opt)
    model.set_input(data)
    model.forward()

    for k, v in model.output.iteritems():
        try:
            print('%s: %s' % (k, v.size()))
        except:
            print('%s' % k)

    visuals = model.get_current_visuals(mode='input')
    visualizer.visualize_image(1, subset='input', visuals=visuals)
############################################
opt = TestPoseParsingOptions().parse()
train_opt = io.load_json(os.path.join('checkpoints', opt.id, 'train_opt.json'))
preserved_opt = {'gpu_ids', 'is_train'}
for k, v in train_opt.iteritems():
    if k in opt and (k not in preserved_opt):
        setattr(opt, k, v)
# create model
model = PoseParsingModel()
model.initialize(opt)
# save terminal order line
io.save_str_list([' '.join(sys.argv)], os.path.join(model.save_dir, 'order_line.txt'))
# create data loader
val_loader = CreateDataLoader(opt, split='test')
# create visualizer
visualizer = GANVisualizer_V3(opt)

############################################
# Visualize
############################################
if opt.nvis > 0:
    print('visualizing first %d samples' % opt.nvis)
    num_vis_batch = int(np.ceil(1.0*opt.nvis/opt.batch_size))
    visuals = None
    for i, data in enumerate(val_loader):
        if i == num_vis_batch:
            break
        model.set_input(data)
        model.test(compute_loss=False)
        v = model.get_current_visuals()
        if visuals is None:
Exemple #3
0
def compare_segment():
    '''
    Compare segment result of two methods:
    1. segment model used in FashionGAN (Be Your Own Prada: Fashion Synthesis with Structural Coherence, ICCV 2017)
    2. segment model used in VITON (VITON: An Image-based Virtual Try-on Network, CVPR 2018)
        - Look into Person: Joint Body Parsing & Pose Estimation Network and A new Benchmark, T-PAMI 2018
    '''
    import torch
    import torchvision
    from misc.visualizer import GANVisualizer_V3

    pair_list = io.load_json(
        'datasets/DF_Pose/Label/pair_split.json')['test'][0:64]
    img_dir = 'datasets/DF_Pose/Img/img_df/'
    seg_atr_dir = 'datasets/DF_Pose/Img/seg_df/'
    seg_lip_dir = 'datasets/DF_Pose/Img/seg-lip_df/'
    seg_lip_rv_dir = 'datasets/DF_Pose/Img/seg-lip_df_revised/'

    visuals = defaultdict(lambda: [])
    for (id_1, id_2) in pair_list:
        visuals['img_1'].append(imageio.imread(img_dir + id_1 + '.jpg'))
        visuals['seg_atr_1'].append(imageio.imread(seg_atr_dir + id_1 +
                                                   '.bmp'))
        visuals['seg_lip_1'].append(imageio.imread(seg_lip_dir + id_1 +
                                                   '.bmp'))
        visuals['seg_lip_rv_1'].append(
            imageio.imread(seg_lip_rv_dir + id_1 + '.bmp'))

        visuals['img_2'].append(imageio.imread(img_dir + id_2 + '.jpg'))
        visuals['seg_atr_2'].append(imageio.imread(seg_atr_dir + id_2 +
                                                   '.bmp'))
        visuals['seg_lip_2'].append(imageio.imread(seg_lip_dir + id_2 +
                                                   '.bmp'))
        visuals['seg_lip_rv_2'].append(
            imageio.imread(seg_lip_rv_dir + id_2 + '.bmp'))

    visuals = {k: np.stack(v, axis=0) for k, v in visuals.iteritems()}
    for name in ['img_1', 'img_2']:
        visuals[name] = (torch.Tensor(visuals[name].transpose(
            0, 3, 1, 2)).div_(127.5).sub_(1), 'rgb')
    for name in [
            'seg_atr_1', 'seg_atr_2', 'seg_lip_1', 'seg_lip_2', 'seg_lip_rv_1',
            'seg_lip_rv_2'
    ]:
        visuals[name] = (torch.Tensor(visuals[name][:,
                                                    np.newaxis, :, :]), 'seg')

    visuals_ordered = OrderedDict()
    for k in [
            'img_1', 'seg_atr_1', 'seg_lip_1', 'seg_lip_rv_1', 'img_2',
            'seg_atr_2', 'seg_lip_2', 'seg_lip_rv_2'
    ]:
        visuals_ordered[k] = visuals[k]
    imgs, vis_list = GANVisualizer_V3.merge_visual(visuals_ordered)
    print(vis_list)
    output_dir = 'temp/df_seg/'
    io.mkdir_if_missing(output_dir)
    torchvision.utils.save_image(imgs,
                                 output_dir +
                                 'compare_segment_atr-and-lip.jpg',
                                 nrow=8,
                                 normalize=True)