Beispiel #1
0
def create_split():
    '''
    Create split following the original partition
    '''

    split_list = io.load_str_list(ca_root + 'Eval/list_eval_partition.txt')[2:]
    split = {'train': [], 'val': [], 'test': []}
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    img2id = {
        s['img_path_org'][s['img_path_org'].find('img')::]: s_id
        for s_id, s in samples.iteritems()
    }

    for s in split_list:
        img_path, status = s.split()
        s_id = img2id[img_path]
        split[status].append(s_id)

    io.mkdir_if_missing(design_root + 'Split')
    io.save_json(split, design_root + 'Split/ca_split.json')

    print('create split')
    for status in ['train', 'val', 'test']:
        print('%s: %d' % (status, len(split[status])))

    split_trainval = {
        'train': split['train'] + split['val'],
        'test': split['test']
    }
    io.save_json(split_trainval, design_root + 'Split/ca_split_trainval.json')
Beispiel #2
0
def create_cloth_edge_map():
    '''
    create edge map that only contains cloth edge (inside the cloth mask)
    '''
    # config
    mask_dilate = 5

    seg_dir = design_root + 'Img/seg_ca_syn_256/'
    edge_dir = design_root + 'Img/edge_ca_256/'
    output_dir = design_root + 'Img/edge_ca_256_cloth/'
    io.mkdir_if_missing(output_dir)

    split = io.load_json(design_root +
                         'Split/ca_gan_split_trainval_upper.json')
    id_list = split['train'] + split['test']

    for i, s_id in enumerate(id_list):
        print('%d/%d' % (i, len(id_list)))
        seg_map = image.imread(seg_dir + s_id + '.bmp', 'grayscale')
        edge_map = image.imread(edge_dir + s_id + '.jpg', 'grayscale')
        assert seg_map.shape == edge_map.shape
        mask = ((seg_map == 3) | (seg_map == 4)).astype(np.uint8)
        mask = cv2.dilate(mask, kernel=np.ones((mask_dilate, mask_dilate)))
        edge_map_cloth = edge_map * mask
        image.imwrite(edge_map_cloth, output_dir + s_id + '.jpg')
Beispiel #3
0
def visualize_samples():

    num_sample = 10
    dir_out = 'temp/attr_example'

    io.mkdir_if_missing(dir_out)
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    attr_label = io.load_data(design_root + 'Label/ca_attr_label.pkl')
    attr_entry = io.load_json(design_root + 'Label/attr_entry.json')

    id_set = set(samples.keys())

    for i, att in enumerate(attr_entry):
        print('attribute %d / %d: %s' % (i, len(attr_entry), att['entry']))
        dir_att = os.path.join(dir_out, att['entry'])
        io.mkdir_if_missing(dir_att)
        pos_id_list = [
            s_id for s_id, label in attr_label.iteritems() if label[i] == 1
        ]
        np.random.shuffle(pos_id_list)
        for s_id in pos_id_list[0:num_sample]:
            fn_src = samples[s_id]['img_path']
            fn_tar = os.path.join(dir_att, 'pos_' + s_id + '.jpg')
            shutil.copyfile(fn_src, fn_tar)

        neg_id_list = list(id_set - set(pos_id_list))
        np.random.shuffle(neg_id_list)
        for s_id in neg_id_list[0:num_sample]:
            fn_src = samples[s_id]['img_path']
            fn_tar = os.path.join(dir_att, 'neg_' + s_id + '.jpg')
            shutil.copyfile(fn_src, fn_tar)
Beispiel #4
0
def create_silhouette():
    # DeepFashion
    #smpl_pred_dir = 'datasets/DF_Pose/3d/hmr_dfm_v2/pred/'
    #output_dir = 'datasets/DF_Pose/Img/silhouette24/'
    #image_split = io.load_json('datasets/DF_Pose/Label/image_split_dfm.json')

    # Market-1501
    smpl_pred_dir = 'datasets/market1501/3d/hmr/pred/'
    output_dir = 'datasets/market1501/Images/silhouette24/'
    image_split = io.load_json('datasets/market1501/Label/image_split.json')

    faces = np.load('scripts/3d/smpl_faces.npy')
    vert2part = io.load_json('scripts/3d/smpl_vert_to_bodypart.json')

    def _func(face_id):
        if face_id == 4294967295:
            return 0
        else:
            verts = faces[face_id]
            part_id = vert2part[verts[0]] + 1
            return part_id

    _vfunc = np.vectorize(_func)

    io.mkdir_if_missing(output_dir)
    id_list = image_split['train'] + image_split['test']
    for sid in tqdm.tqdm(id_list):
        pred = io.load_data(smpl_pred_dir + '%s.pkl' % sid)
        vis = pred['visibility']
        silh = _vfunc(vis).astype(np.uint8)
        silh = cv2.medianBlur(silh, 5)
        imageio.imwrite(output_dir + '%s.bmp' % sid, silh)
Beispiel #5
0
    def visualize_image_matrix(self,
                               imgs,
                               imgs_title=None,
                               label='default',
                               vis_dir='vis'):
        '''
        Input:
            imgs (tensor): image matrix, tensor of size n_row*n_col*C*H*W
            imgs_title (tensor): title images, tensor of size n*C*H*W (must have n==n_row==n_col)
            label (str): output filename

        '''
        vis_dir = os.path.join('checkpoints', self.opt.id, vis_dir)
        io.mkdir_if_missing(vis_dir)

        n_row, n_col, c, h, w = imgs.size()
        if imgs_title is not None:
            assert imgs_title.size(0) == n_row == n_col
            # insert title images at the head of each row
            imgs = torch.cat((imgs_title.view(n_row, 1, c, h, w), imgs), 1)
            # add a title row
            img_blank = torch.zeros([1] + list(imgs_title.size()[1::]))
            imgs_title = torch.cat((img_blank, imgs_title), 0)
            imgs = torch.cat((imgs_title.view(1, n_col + 1, c, h, w), imgs), 0)

            n_col += 1
            n_row += 1

        imgs = imgs.view(n_row * n_col, c, h, w)
        fn_img = os.path.join(vis_dir, label + '.jpg')
        torchvision.utils.save_image(imgs, fn_img, nrow=n_col, normalize=True)
Beispiel #6
0
def keypoint_guided_tps():

    num_sample = 64
    pair_list = io.load_json(
        'datasets/DF_Pose/Label/pair_split.json')['test'][0:num_sample]
    pose_label = io.load_data('datasets/DF_Pose/Label/pose_label.pkl')
    image_dir = 'datasets/DF_Pose/Img/img_df/'
    seg_dir = 'datasets/DF_Pose/Img/seg-lip_df_revised/'
    output_dir = 'temp/patch_matching/output/tps_keypoint/'
    io.mkdir_if_missing(output_dir)
    tps = cv2.createThinPlateSplineShapeTransformer()

    for i, (id_1, id_2) in enumerate(tqdm.tqdm(pair_list)):
        kp_1 = np.array(pose_label[id_1][1:14],
                        dtype=np.float64).reshape(1, -1, 2)
        kp_2 = np.array(pose_label[id_2][1:14],
                        dtype=np.float64).reshape(1, -1, 2)
        kp_matches = []
        for j in range(kp_1.shape[1]):
            if (kp_1[0, j] >= 0).all() and (kp_2[0, j] >= 0).all():
                kp_matches.append(cv2.DMatch(j, j, 0))
        if len(kp_matches) == 0:
            continue

        tps.estimateTransformation(kp_2, kp_1, kp_matches)
        img_1 = cv2.imread(image_dir + id_1 + '.jpg')
        img_2 = cv2.imread(image_dir + id_2 + '.jpg')

        img_w = tps.warpImage(img_1)
        seg = cv2.imread(seg_dir + id_2 + '.bmp', cv2.IMREAD_GRAYSCALE)
        mask = ((seg == 3) | (seg == 7)).astype(img_w.dtype)[:, :, np.newaxis]
        img_out = img_w * mask + img_2 * (1 - mask)

        cv2.imwrite(output_dir + '%d_%s_%s.jpg' % (i, id_1, id_2), img_out)
        cv2.imwrite(output_dir + 'w%d_%s_%s.jpg' % (i, id_1, id_2), img_w)
Beispiel #7
0
def test_affine_augmentation():
    img = image.imread(
        'datasets/DeepFashion/Fashion_design/Img/img_ca_256/ca_9.jpg')
    assert img is not None

    output_dir = 'temp/affine_augmentation'
    io.mkdir_if_missing(output_dir)

    # config
    scale = [0.05, 0.1, 0.15]
    num_per_scale = 10

    w, h = img.shape[1], img.shape[0]
    keypoint_src = np.array([[0, 0], [w, 0], [0, h]], dtype=np.float32)
    for s in scale:
        for i in range(num_per_scale):
            offset = (np.random.rand(3, 2) * 2 - 1) * np.array([w, h]) * s
            offset = offset.astype(np.float32)
            keypoint_dst = keypoint_src + offset
            m = cv2.getAffineTransform(keypoint_src, keypoint_dst)
            img_trans = cv2.warpAffine(img,
                                       m,
                                       dsize=(w, h),
                                       flags=cv2.INTER_LINEAR,
                                       borderMode=cv2.BORDER_REPLICATE)
            img_trans = img_trans * 0.8 + img * 0.2
            image.imwrite(
                img_trans, os.path.join(output_dir,
                                        'affine_%f_%d.jpg' % (s, i)))
Beispiel #8
0
def create_color_map():
    #### test
    img = image.imread(
        'datasets/DeepFashion/Fashion_design/Img/img_ca_256/ca_9.jpg')
    assert img is not None

    output_dir = 'temp/color_map_generation_test'
    io.mkdir_if_missing(output_dir)
    # color map by gaussian blur
    kernel_size = 21
    for sigma in [1, 2, 5, 10, 20]:
        img_blur = cv2.GaussianBlur(img, (kernel_size, kernel_size), sigma)
        image.imwrite(
            img_blur,
            os.path.join(output_dir,
                         'gaussian_%d_%f.jpg' % (kernel_size, sigma)))
    # color map by downsampling
    for scale in [2, 4, 8, 16, 32]:
        w, h = img.shape[1], img.shape[0]
        dw, dh = w // scale, h // scale
        img_blur = cv2.resize(cv2.resize(img, (dw, dh),
                                         interpolation=cv2.INTER_LINEAR),
                              (w, h),
                              interpolation=cv2.INTER_LINEAR)
        image.imwrite(img_blur,
                      os.path.join(output_dir, 'downsample_%d.jpg') % scale)
Beispiel #9
0
def visualize_seg_map():

    num_sample = 1000
    output_dir = 'temp/seg_map'
    io.mkdir_if_missing(output_dir)

    samples = io.load_json(design_root + 'Label/ca_samples.json')
    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']
    id_list = [s_id for s_id in id_list if samples[s_id]['cloth_type'] == 3]

    seg_dir_list = [
        design_root + 'Img/seg_ca_256/', design_root + 'Img/seg_ca_syn_256/'
    ]

    for i, s_id in enumerate(id_list[0:num_sample]):
        s = samples[s_id]
        img = image.imread(s['img_path'])
        imgs = [img]
        for seg_dir in seg_dir_list:
            seg = image.imread(seg_dir + s_id + '.bmp') * 20
            mask = img * (seg > 0).astype(np.float)
            imgs += [seg, mask]

        img = image.stitch(imgs, 0)
        image.imwrite(img, os.path.join(output_dir, s_id + '.jpg'))
        print(i)
Beispiel #10
0
def revise_coat_label():
    '''
    Reivese the segment label of coat(7) and upperbody(3).
    '''
    import cv2

    img_dir = 'datasets/DF_Pose/Img/img_df/'
    seg_dir = 'datasets/DF_Pose/Img/seg-lip_df/'
    output_dir = 'datasets/DF_Pose/Img/seg-lip_df_revised/'
    split = io.load_json('datasets/DF_Pose/Label/split.json')
    id_list = split['train'] + split['test']
    pid_to_sids = defaultdict(lambda: [])
    for sid in id_list:
        pid = sid[0:5]
        pid_to_sids[pid].append(sid)
    print('find %d person ids' % len(pid_to_sids))

    n_revised = 0
    io.mkdir_if_missing(output_dir)
    for i, (pid, sids) in enumerate(pid_to_sids.items()):
        seg_0 = cv2.imread(seg_dir + pid + '_1.bmp',
                           cv2.IMREAD_GRAYSCALE)  # try to load frontal image
        if (seg_0 is not None) and (7 in seg_0) and (3 in seg_0):
            n_revised += 1
            img_0 = cv2.imread(img_dir + pid + '_1.jpg')
            mask_u = (seg_0 == 3).astype(np.uint8)
            mask_c = (seg_0 == 7).astype(np.uint8)
            hist_u = cv2.calcHist([img_0], [0, 1, 2], mask_u, [8] * 3,
                                  [0, 256] * 3)
            hist_c = cv2.calcHist([img_0], [0, 1, 2], mask_c, [8] * 3,
                                  [0, 256] * 3)
            for sid in sids:
                if sid == pid + '_1':
                    shutil.copyfile(seg_dir + sid + '.bmp',
                                    output_dir + sid + '.bmp')
                else:
                    seg_i = cv2.imread(seg_dir + sid + '.bmp',
                                       cv2.IMREAD_GRAYSCALE)
                    img_i = cv2.imread(img_dir + sid + '.jpg')
                    mask_u_i = (seg_i == 3).astype(np.uint8)
                    mask_c_i = (seg_i == 7).astype(np.uint8)
                    for mask_i in [mask_u_i, mask_c_i]:
                        if mask_i.any():
                            hist_i = cv2.calcHist([img_i], [0, 1, 2], mask_i,
                                                  [8] * 3, [0, 256] * 3)
                            if cv2.compareHist(
                                    hist_i, hist_u,
                                    cv2.HISTCMP_CORREL) < cv2.compareHist(
                                        hist_i, hist_c, cv2.HISTCMP_CORREL):
                                seg_i[mask_i] = 3
                            else:
                                seg_i[mask_i] = 7
                    cv2.imwrite(output_dir + sid + '.bmp', seg_i)
        else:
            for sid in sids:
                shutil.copyfile(seg_dir + sid + '.bmp',
                                output_dir + sid + '.bmp')
        print('%d / %d (%d revised)' % (i, len(pid_to_sids), n_revised))
Beispiel #11
0
 def visualize_results(self, visuals, filename):
     io.mkdir_if_missing(os.path.dirname(filename))
     imgs, vis_item_list = merge_visual(visuals)
     torchvision.utils.save_image(imgs,
                                  filename,
                                  nrow=len(visuals),
                                  normalize=True)
     fn_list = os.path.join(os.path.dirname(filename), 'vis_item_list.txt')
     io.save_str_list(vis_item_list, fn_list)
Beispiel #12
0
    def visualize_image(self, epoch, subset, visuals):
        opt = self.opt
        vis_dir = os.path.join('checkpoints', opt.id, 'vis')
        io.mkdir_if_missing(vis_dir)
        print('[%s] visualizing %s images' % (opt.id, subset))

        # post-process
        if 'seg_map' in visuals:
            visuals['seg_map'] = self._seg_map_to_img(visuals['seg_map'])
        if 'landmark_heatmap' in visuals:
            visuals['landmark_heatmap'] = visuals['landmark_heatmap'].max(
                dim=1, keepdim=True)[0].expand_as(visuals['img_real'])
        if 'edge_map' in visuals:
            visuals['edge_map'] = visuals['edge_map'].expand_as(
                visuals['img_real'])
        if 'seg_mask_aug' in visuals:
            visuals['seg_mask_aug'] = visuals['seg_mask_aug'][:, 1::].sum(
                dim=1, keepdim=True).expand_as(visuals['img_real'])
        if 'edge_map_aug' in visuals:
            visuals['edge_map_aug'] = visuals['edge_map_aug'].expand_as(
                visuals['img_real'])
        if 'color_map' in visuals and visuals['color_map'].size(1) == 6:
            visuals['color_map'] = visuals['color_map'][:, 0:3] + visuals[
                'color_map'][:, 3:6]
        if 'color_map_aug' in visuals and visuals['color_map_aug'].size(
                1) == 6:
            visuals['color_map_aug'] = visuals[
                'color_map_aug'][:, 0:3] + visuals['color_map_aug'][:, 3:6]

        # display
        num_vis = min(opt.max_n_vis, visuals['img_real'].size(0))
        item_list = [
            'img_real',
            'img_real_raw',
            'img_fake',
            'img_fake_raw',
            'seg_map',
            'edge_map',
            'color_map',
            'landmark_heatmap',
            'seg_mask_aug',
            'edge_map_aug',
            'color_map_aug',
        ]

        imgs = [
            visuals[item_name] for item_name in item_list
            if item_name in visuals
        ]
        imgs = torch.stack(imgs, dim=1)[0:num_vis]
        imgs = imgs.view(
            imgs.size(0) * imgs.size(1), imgs.size(2), imgs.size(3),
            imgs.size(4))
        nrow = int(imgs.size(0) / num_vis)
        fn_img = os.path.join(vis_dir, '%s_epoch%d.jpg' % (subset, epoch))
        torchvision.utils.save_image(imgs, fn_img, nrow=nrow, normalize=True)
Beispiel #13
0
def resize_and_pad():
    '''
    resize the image that its longer side equals to new_size. Then pad the image to have the size [new_size, new_size]
    create new pose label at the same time
    '''

    # config
    new_size = 256

    img_root = zalando_root + 'Img/img_zalando/'
    output_dir = zalando_root + 'Img/img_zalando_%d/' % new_size
    split = io.load_json(zalando_root + 'Split/zalando_split.json')
    pose_label = io.load_data(zalando_root + 'Label/zalando_pose_label.pkl')

    io.mkdir_if_missing(output_dir)
    id_list = split['train'] + split['test']
    # id_list = id_list[0:10]
    new_pose_label = {}

    for i, s_id in enumerate(id_list):
        print('%d / %d' % (i, len(id_list)))
        # resize image
        img = cv2.imread(img_root + s_id + '_0.jpg')
        w, h = img.shape[1], img.shape[0]
        if w < h:
            top = 0
            bottom = 0
            left = (h-w)//2
            right = h-w-left
            ratio = new_size/h
        else:
            top = (w-h)//2
            bottom = w-h-top
            left = 0
            right = 0
            ratio = new_size/w

        img = cv2.copyMakeBorder(img, top, bottom, left, right, borderType=cv2.BORDER_REPLICATE)
        img = cv2.resize(img, dsize=(new_size, new_size), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(output_dir + s_id + '_0.jpg', img)

        # resize clothing image
        img1 = cv2.imread(img_root + s_id + '_1.jpg')
        if not (img1.shape[0] == h and img1.shape[1] == w):
            img1 = cv2.resize(img1, dsize=(w,h))
        img1 = cv2.copyMakeBorder(img1, top, bottom, left, right, borderType=cv2.BORDER_REPLICATE)
        img1 = cv2.resize(img1, dsize=(new_size, new_size), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(output_dir + s_id + '_1.jpg', img1)

        # modify pose label
        pose = pose_label[s_id]
        new_pose = [[(p[0]+left)*ratio, (p[1]+top)*ratio] if p != [-1,-1] else [-1,-1] for p in pose]
        new_pose_label[s_id] = new_pose

    io.save_data(new_pose_label, zalando_root + 'Label/zalando_pose_label_%d.pkl' % new_size)
Beispiel #14
0
 def save(self, fn=None):
     if self.opt is None:
         raise Exception("parse options before saving!")
     if fn is None:
         expr_dir = os.path.join('checkpoints', self.opt.id)
         io.mkdir_if_missing(expr_dir)
         if self.opt.is_train:
             fn = os.path.join(expr_dir, 'train_opt.json')
         else:
             fn = os.path.join(expr_dir, 'test_opt.json')
     io.save_json(vars(self.opt), fn)
def show_patch():
    from options.pose_transfer_options import TrainPoseTransferOptions
    from data.data_loader import CreateDataLoader
    import imageio
    import cv2

    opt = TrainPoseTransferOptions().parse()

    # output_dir = 'temp/visualize_patch/%d/' % opt.patch_size
    output_dir = 'temp/visualize_patch/test/'
    io.mkdir_if_missing(output_dir)

    loader = iter(CreateDataLoader(opt, 'test'))
    model = TwoStagePoseTransferModel()
    model.initialize(opt)
    data = next(loader)
    model.set_input(data)

    patch_1 = model.get_patch(model.input['img_1'], model.input['joint_c_1'],
                              opt.patch_size, opt.patch_indices)
    patch_2 = model.get_patch(model.input['img_2'], model.input['joint_c_2'],
                              opt.patch_size, opt.patch_indices)
    images = [model.input['img_1'], model.input['img_2']]
    patches = [patch_1, patch_2]
    joints = [model.input['joint_c_1'], model.input['joint_c_2']]
    id_list = model.input['id']

    def _tensor_to_numpy(img_t):
        img = img_t.cpu().detach().numpy().transpose(1, 2, 0)
        img = (img * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
        return img

    for idx in range(opt.batch_size):
        for i in range(2):
            fn_i = output_dir + '%d_%d.jpg' % (idx, i)
            img = _tensor_to_numpy(images[i][idx])
            for j, joint_idx in enumerate(opt.patch_indices):
                fn_p = output_dir + '%d_%d_%d.jpg' % (idx, i, joint_idx)
                p = _tensor_to_numpy(patches[i][idx, j])
                imageio.imwrite(fn_p, p)

                x_c, y_c = joints[i][idx, joint_idx]
                if x_c > 0 and y_c > 0:
                    x_1, y_1 = x_c - opt.patch_size // 2, y_c - opt.patch_size // 2
                    x_2, y_2 = x_c + opt.patch_size // 2, y_c + opt.patch_size // 2
                    cv2.rectangle(img, (x_1, y_1), (x_2, y_2),
                                  color=(0, 255, 0))
                    cv2.putText(img,
                                str(joint_idx).encode('ascii',
                                                      'replace'), (x_c, y_c),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 0))

            imageio.imwrite(fn_i, img)
Beispiel #16
0
def create_inner_edge_map():
    '''
    extract the edges inside the clothing regions
    '''

    # config
    kernel_size = 7
    threshold = 0

    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']
    edge_root = design_root + 'Img/edge_ca_256'
    seg_root = design_root + 'Img/seg_ca_256'
    output_dir = design_root + 'Img/edge_ca_256_inner'
    io.mkdir_if_missing(output_dir)

    kernel = np.zeros((kernel_size, kernel_size), np.uint8)
    k = (kernel_size - 1) / 2
    for i in range(kernel_size):
        for j in range(kernel_size):
            if np.abs(i - k) + np.abs(j - k) <= k:
                kernel[i, j] = 1

    for i, s_id in enumerate(id_list):
        edge = image.imread(os.path.join(edge_root, s_id + '.jpg'),
                            'grayscale')
        seg = image.imread(os.path.join(seg_root, s_id + '.bmp'), 'grayscale')
        mask_upper = cv2.erode((seg == 3).astype(np.uint8), kernel)
        mask_lower = cv2.erode((seg == 4).astype(np.uint8), kernel)
        mask = mask_upper | mask_lower
        edge_inner = edge * mask
        edge_inner = (edge_inner >= threshold).astype(np.uint8) * edge_inner
        image.imwrite(edge_inner, os.path.join(output_dir, s_id + '.jpg'))
        print('extracting inner edge %d / %d' % (i, len(id_list)))

    # create labels
    edge_paths = {
        s_id: os.path.join(output_dir, s_id + '.jpg')
        for s_id in id_list
    }
    split_debug = io.load_json(design_root + 'Split/debugca_gan_split.json')
    edge_paths_debug = {
        s_id: p
        for s_id, p in edge_paths.iteritems()
        if s_id in split_debug['train'] + split_debug['test']
    }

    io.save_json(edge_paths, design_root + 'Label/ca_edge_inner_paths.json')
    io.save_json(edge_paths_debug,
                 design_root + 'Label/debugca_edge_inner_paths.json')
Beispiel #17
0
def extract_feature(opt, save_feat=True):

    fn_feat = os.path.join(
        os.path.join('checkpoints', opt.id, 'feat',
                     '%s.pkl' % opt.which_epoch))

    if os.path.isfile(fn_feat):
        print('loading feature from %s' % fn_feat)
        feat_data = io.load_data(fn_feat)
        # feat_data['feat_train']
        # feat_data['feat_test']
        # feat_data['id_list_train']
        # feat_data['id_list_test']

    else:
        # create model
        model = AttributeEncoder()
        model.initialize(opt)
        model.eval()
        feat_data = {
            'feat_train': [],
            'feat_test': [],
            'id_list_train': [],
            'id_list_test': []
        }

        for split in ['train', 'test']:
            loader = CreateDataLoader(opt, split=split)

            for i, data in enumerate(loader):
                model.set_input(data)
                model.extract_feat()
                feat_data['feat_%s' %
                          split].append(model.output['feat'].data.cpu().numpy(
                          ))  # size: batch_size * feat_size
                feat_data['id_list_%s' %
                          split] += data['id']  # list of length batch_size
                print('\r[%s] extract feature from %s samples %d/%d' %
                      (opt.id, split, i, len(loader)),
                      end='')
            print('\n')
            feat_data['feat_%s' % split] = np.vstack(feat_data['feat_%s' %
                                                               split])

        if save_feat:
            io.mkdir_if_missing(os.path.join('checkpoints', opt.id, 'feat'))
            io.save_data(feat_data, fn_feat)

    return feat_data
Beispiel #18
0
def create_split():
    '''
    create split file. follow the partition used in VITON paper
    '''
    train_pairs = io.load_str_list(zalando_root + 'Source/viton_train_pairs.txt')
    test_piars = io.load_str_list(zalando_root + 'Source/viton_test_pairs.txt')

    split = {}

    for subset, pairs in [('train', train_pairs), ('test', test_piars)]:
        id_list = [p.split()[0][0:6] for p in pairs]
        split[subset] = id_list

    split['debug'] = split['train'][0:32]

    io.mkdir_if_missing(zalando_root + 'Split')
    io.save_json(split, zalando_root + 'Split/zalando_split.json')
Beispiel #19
0
def search_image(query_fn_list, gallery_fn_list, output_dir, method='cos'):
    num_cand = 20;
    cache_image = True

    if method == 'cos':
        func_similarity = similarity_cos
    if cache_image:
        img_g_dict = dict()

    io.mkdir_if_missing(output_dir)
    result = []
    for idx,fn_q in enumerate(query_fn_list):
        print('searching sample %d/%d' % (idx, len(query_fn_list)))
        io.mkdir_if_missing(output_dir+'/%d/'%idx)
        img_q = imageio.imread(fn_q)
        cand_list = [(None, None, -1)]
        for fn_g in tqdm.tqdm(gallery_fn_list):
            if cache_image:
                if fn_g in img_g_dict:
                    img_g = img_g_dict[fn_g]
                else:
                    img_g = imageio.imread(fn_g)
                    img_g_dict[fn_g] = img_g
            else:
                img_g = imageio.imread(fn_g)
            score = func_similarity(img_q, img_g)
            i_insert = -1
            for i in range(len(cand_list)):
                if score > cand_list[i][2]:
                    i_insert = i
                    break
            if i_insert >= 0:
                cand_list.insert(i_insert, (fn_g, img_g, score))
                if len(cand_list) > num_cand:
                    cand_list = cand_list[0:num_cand]

        imageio.imwrite(output_dir+'/%d/query.jpg'%idx, img_q)
        for i, (fn_g, img_g, score) in enumerate(cand_list):
            if fn_g:
                imageio.imwrite(output_dir + '/%d/cand_%d.jpg'%(idx, i), img_g)
        output_info = [fn_q]
        output_info += ['%d %f %s' % (i, score, fn) for i, (fn, _, score) in enumerate(cand_list)]
        io.save_str_list(output_info, output_dir + '/%d/result.txt'%idx)
        result.append('%d %s %s' % (idx, fn_q, cand_list[0][0]))

    io.save_str_list(result, output_dir+'result.txt')
def calc_correspondence_from_smpl():
    '''
    Compute pixel-wise correspondence between image pair by SMPL model (http://smpl.is.tue.mpg.de/). 
    The SMPL fit result is predicted by HMR(https://github.com/akanazawa/hmr), with following format:
        pred = {
            'id': sid,
            'theta': theta,
            'proc_param': proc_param,
            'verts2d': verts2d,
            'verts_z': verts_z,
            'visibility': visibility, # a map with same size of img, each pixel is its corresponding SMPL face index (or 4294967295 if it's corresponding to no face)
        }
    See '/data2/ynli/human3d/hmr/run_hmr.py' for more details
    '''

    # num_pair = 64
    # pair_split_fn = 'datasets/DF_Pose/Label/pair_split.json'
    # hmr_pred_dir = 'temp/3d_hmr/hmr_df_openpose/pred/'
    # output_dir = 'temp/3d_hmr/corr/'

    num_pair = -1
    pair_split_fn = 'datasets/DF_Pose/Label/pair_split_dfm.json'
    hmr_pred_dir = 'datasets/DF_Pose/3d/hmr_dfm/pred/'
    output_dir = 'datasets/DF_Pose/3d/hmr_dfm/corr/'

    io.mkdir_if_missing(output_dir)

    # load pair ids
    pairs = io.load_json(pair_split_fn)
    pair_list = pairs['test'] + pairs['train']
    if num_pair > 0:
        pair_list = pair_list[:num_pair]

    for id_1, id_2 in tqdm.tqdm(pair_list):
        pred_1 = io.load_data(hmr_pred_dir + id_1 + '.pkl')
        pred_2 = io.load_data(hmr_pred_dir + id_2 + '.pkl')

        corr_2to1, vis_mask_2 = calc_correspondence_from_smpl_internal(
            pred_2, pred_1)
        flow_util.write_corr(output_dir + '%s_%s.corr' % (id_2, id_1),
                             corr_2to1, vis_mask_2)

        corr_1to2, vis_mask_1 = calc_correspondence_from_smpl_internal(
            pred_1, pred_2)
        flow_util.write_corr(output_dir + '%s_%s.corr' % (id_1, id_2),
                             corr_1to2, vis_mask_1)
Beispiel #21
0
def pad_image_for_segmentation():
    '''
    resize and padding image for segmentation (using fashionGAN code)
    Todo: add inshop version
    '''

    sz_tar = 256
    output_dir = 'datasets/DeepFashion/Fashion_design/Img/img_ca_pad'

    io.mkdir_if_missing(output_dir)
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']

    # update landmark and bbox
    lm_label = io.load_data(design_root + 'Label/ca_landmark_label.pkl')
    bbox_label = io.load_data(design_root + 'Label/ca_bbox_label.pkl')
    lm_label_pad = {}
    bbox_label_pad = {}

    io.save_str_list(id_list, os.path.join(output_dir, 'img_ca_pad.txt'))
    for i, s_id in enumerate(id_list):
        img_org = image.imread(samples[s_id]['img_path_org'])
        h, w = img_org.shape[0:2]

        if h > w:
            img = image.resize(img_org, (-1, sz_tar))
            scale = 1. * sz_tar / h
        else:
            img = image.resize(img_org, (sz_tar, -1))
            scale = 1. * sz_tar / w

        # img = image.pad_square(img, sz_tar, padding_value = 255, mode = 'lefttop')
        # image.imwrite(img, os.path.join(output_dir, s_id + '.jpg'))

        bbox_label_pad[s_id] = [c * scale for c in bbox_label[s_id]]
        lm_label_pad[s_id] = []
        for x, y, v in lm_label[s_id]:
            lm_label_pad[s_id].append([x * scale, y * scale, v])

        print('padding image %d / %d' % (i, len(id_list)))

    io.save_data(lm_label_pad,
                 design_root + 'Label/ca_landmark_label_pad_%d.pkl' % sz_tar)
    io.save_data(bbox_label_pad,
                 design_root + 'Label/ca_bbox_label_pad_%d.pkl' % sz_tar)
Beispiel #22
0
    def visualize_image(self, epoch, subset, visuals):
        opt = self.opt
        vis_dir = os.path.join('checkpoints', opt.id, 'vis')
        io.mkdir_if_missing(vis_dir)
        print('[%s] visualizing %s images' % (opt.id, subset))

        imgs, vis_list = self.merge_visual(visuals,
                                           kword_params={
                                               'shape_with_face':
                                               'shape_with_face' in opt
                                               and opt.shape_with_face
                                           })
        fn_img = os.path.join(vis_dir, '%s_epoch%s.jpg' % (subset, epoch))
        torchvision.utils.save_image(imgs,
                                     fn_img,
                                     nrow=len(visuals),
                                     normalize=True)
        io.save_str_list(vis_list, os.path.join(vis_dir, 'vis_name_list.txt'))
Beispiel #23
0
def prepro_image(img_dir, output_dir):

    h_out, w_out = [256, 256]
    io.mkdir_if_missing(output_dir)
    for fn in tqdm.tqdm(os.listdir(img_dir)):
        img_org = cv2.imread(img_dir+fn)
        h, w = img_org.shape[0:2]
        if h/h_out > w/w_out:
            w1 = int(w/h*h_out)
            img_rsz = cv2.resize(img_org, (w1, h_out))
            img_out = np.ones((h_out, w_out, 3), dtype=np.uint8)*255
            img_out[:,((w_out-w1)//2):((w_out-w1)//2+w1),:] = img_rsz
        else:
            h1 = int(h/w*w_out)
            img_rsz = cv2.resize(img_org, (w_out, h1))
            img_out = np.ones((h_out, w_out, 3), dtype=np.uint8)*255
            img_out[((h_out-h1)//2):((h_out-h1)//2+h1),:,:] = img_rsz
        cv2.imwrite(output_dir+fn, img_out)
Beispiel #24
0
def merge_silhouette():
    # for DeepFashion
    #image_split = io.load_json('datasets/DF_Pose/Label/image_split_dfm.json')
    #input_dir = 'datasets/DF_Pose/Img/silhouette24/'
    #output_dir = 'datasets/DF_Pose/Img/silhouette6/'

    # for Market-1501
    image_split = io.load_json('datasets/market1501/Label/image_split.json')
    input_dir = 'datasets/market1501/Images/silhouette24/'
    output_dir = 'datasets/market1501/Images/silhouette6/'

    id_list = image_split['train'] + image_split['test']
    io.mkdir_if_missing(output_dir)

    head = {13, 16}
    torso = {1, 4, 7, 10}
    larm = {14, 17, 19, 21, 23}
    rarm = {15, 18, 20, 22, 24}
    lleg = {2, 5, 8, 11}
    rleg = {3, 6, 9, 12}

    def _func_merge(x):
        if x == 0:
            return 0
        elif x in head:
            return 1
        elif x in torso:
            return 2
        elif x in larm:
            return 3
        elif x in rarm:
            return 4
        elif x in lleg:
            return 5
        elif x in rleg:
            return 6

    _vfunc_merge = np.vectorize(_func_merge, otypes=[np.uint8])

    for sid in tqdm.tqdm(id_list):
        silh = imageio.imread(input_dir + '%s.bmp' % sid)
        silhm = _vfunc_merge(silh)
        imageio.imwrite(output_dir + '%s.bmp' % sid, silhm)
def create_seg_input():

    # samples = io.load_json(root + 'Anno/samples_highres.json')
    # in_dir = root + 'Img/img1_highres/'

    samples = io.load_json(root + 'Label/samples.json')
    in_dir = root + 'Img/img1/'

    out_dir = root + 'Img/input/'

    io.mkdir_if_missing(out_dir)

    for img_id, s in samples.iteritems():

        fn_src = in_dir + s['img_path']
        fn_tar = out_dir + '%s.jpg' % img_id

        shutil.copyfile(fn_src, fn_tar)

        print(img_id)
Beispiel #26
0
def visualize_seg():
    '''
    visualize segmentation
    '''

    num_vis = 100
    img_root = zalando_root + 'Img/img_zalando_256/'
    seg_root = zalando_root + 'Img/seg_zalando_256/'
    output_dir = 'temp/zalando_seg_vis/'
    io.mkdir_if_missing(output_dir)

    split = io.load_json(zalando_root + 'Split/zalando_split.json')
    id_list = split['train'] + split['test']

    for s_id in id_list[0:num_vis]:
        img = cv2.imread(img_root + s_id + '_0.jpg')
        seg = cv2.imread(seg_root + s_id + '_0.bmp')        
        seg = seg * 20
        img = np.concatenate((img, seg), axis = 1)
        cv2.imwrite(output_dir + s_id + '.jpg', img)
Beispiel #27
0
    def parse(self,
              ord_str=None,
              save_to_file=True,
              display=True,
              set_gpu=True):

        if not self.initialized:
            self.initialize()

        if ord_str is None:
            self.opt = self.parser.parse_args()
        else:
            ord_list = ord_str.split()
            self.opt = self.parser.parse_args(ord_list)

        self.auto_set()

        if len(self.opt.gpu_ids) > 0 and set_gpu:
            os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
                [str(i) for i in self.opt.gpu_ids])
            self.opt.gpu_ids = range(len(self.opt.gpu_ids))
            torch.cuda.set_device(0)
        args = vars(self.opt)
        # display options
        if display:
            print('------------ Options -------------')
            for k, v in sorted(args.items()):
                print('%s: %s' % (str(k), str(v)))
            print('-------------- End ----------------')

        # save to disk
        if save_to_file:
            expr_dir = os.path.join('checkpoints', self.opt.id)
            io.mkdir_if_missing(expr_dir)
            if self.opt.is_train:
                fn_out = os.path.join(expr_dir, 'train_opt.json')
            else:
                fn_out = os.path.join(expr_dir, 'test_opt.json')
            io.save_json(args, fn_out)

        return self.opt
Beispiel #28
0
def create_cloth_edge_map():
    '''
    extract edges inside the cloth region
    '''
    mask_dilate = 5

    split = io.load_json(zalando_root + 'Split/zalando_split.json')
    id_list = split['train'] + split['test']
    edge_dir = zalando_root + 'Img/edge_zalando_256/'
    seg_dir = zalando_root + 'Img/seg_zalando_256/'
    output_dir = zalando_root + 'Img/edge_zalando_256_cloth/'
    io.mkdir_if_missing(output_dir)

    for i, s_id in enumerate(id_list):
        print('%d/%d' % (i, len(id_list)))
        edge = cv2.imread(edge_dir + s_id + '.jpg', cv2.IMREAD_GRAYSCALE)
        seg = cv2.imread(seg_dir + s_id + '.bmp', cv2.IMREAD_GRAYSCALE)
        assert edge.shape == seg.shape
        mask = ((seg==3) | (seg==4)).astype(np.uint8)
        mask = cv2.dilate(mask, kernel = np.ones((mask_dilate, mask_dilate)))
        edge_cloth = edge * mask
        cv2.imwrite(output_dir + s_id + '.jpg', edge_cloth)
Beispiel #29
0
    def visualize_image_matrix(self,
                               imgs,
                               imgs_title_top=None,
                               imgs_title_left=None,
                               label='default',
                               vis_dir='vis'):
        '''
        Input:
            imgs (tensor): image matrix, tensor of size n_row*n_col*C*H*W
            imgs_title_top (tensor): top title images, tensor of size n_col*C*H*W
            imgs_title_left (tensor): left title images, tensor of size n_row*C*H*W
            label (str): output filename

        '''
        vis_dir = os.path.join('checkpoints', self.opt.id, vis_dir)
        io.mkdir_if_missing(vis_dir)

        n_row, n_col, c, h, w = imgs.size()

        if imgs_title_top is not None:
            assert imgs_title_top.size(0) == n_col
            imgs = torch.cat((imgs_title_top.view(1, n_col, c, h, w), imgs), 0)
            n_row += 1
        if imgs_title_left is not None:
            assert imgs_title_left.size(0) in {n_row, n_row - 1}
            if imgs_title_left.size(0) == n_row - 1:
                img_blank = torch.zeros([1] +
                                        list(imgs_title_left.size()[1::]))
                imgs_title_left = torch.cat((img_blank, imgs_title_left), 0)
            imgs = torch.cat((imgs_title_left.view(n_row, 1, c, h, w), imgs),
                             1)
            n_col += 1

        imgs = imgs.view(n_row * n_col, c, h, w)
        fn_img = os.path.join(vis_dir, label + '.jpg')
        torchvision.utils.save_image(imgs, fn_img, nrow=n_col, normalize=True)
Beispiel #30
0
def visualize_pose():
    '''
    visualize pose label
    '''
    num_vis = 10

    # img_root = zalando_root + 'Img/img_zalando/'
    # pose_label = io.load_data(zalando_root + 'Label/zalando_pose_label.pkl')
    
    img_root = zalando_root + 'Img/img_zalando_256/'
    pose_label = io.load_data(zalando_root + 'Label/zalando_pose_label_256.pkl')

    output_dir = 'temp/zalando_pose_vis/'
    io.mkdir_if_missing(output_dir)

    for i, (s_id, pose) in enumerate(pose_label.items()[0:num_vis]):
        print('%d / %d' % (i, num_vis))
        img = cv2.imread(img_root + s_id + '_0.jpg')
        assert img is not None
        for p in pose:
            if p[0] != -1:
                c = (int(p[0]), int(p[1]))
                cv2.circle(img, center=c, radius=5, color=(0,255,0), thickness=-1)
            cv2.imwrite(output_dir + s_id + '.jpg', img)