Esempio n. 1
0
 def __init__(self, opt, split='train'):
     assert split in {'train', 'val', 'test'}
     super(Dataset, self).__init__()
     self.opt = opt
     self.split = split
     if split in {'train', 'val'}:
         self.label = io.load_json(opt.fn_label)['label']
         self.id_list = io.load_json(opt.fn_split)['train' if split=='train' else 'test']
         self.image_dir = opt.img_dir
     else:
         self.label = None
         self.image_dir = opt.test_dir
         self.id_list = [fn[0:-4] for fn in os.listdir(self.image_dir)]
         
     if split == 'train':
         self.transform = transforms.Compose([
             transforms.RandomResizedCrop(opt.crop_size),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])
         ])
     else:
         self.transform = transforms.Compose([
             transforms.Resize(opt.rescale_size),
             transforms.CenterCrop(opt.crop_size),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])
         ])
         
     if opt.debug:
         self.id_list = self.id_list[0:64]
Esempio n. 2
0
def visualize_seg_map():

    num_sample = 1000
    output_dir = 'temp/seg_map'
    io.mkdir_if_missing(output_dir)

    samples = io.load_json(design_root + 'Label/ca_samples.json')
    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']
    id_list = [s_id for s_id in id_list if samples[s_id]['cloth_type'] == 3]

    seg_dir_list = [
        design_root + 'Img/seg_ca_256/', design_root + 'Img/seg_ca_syn_256/'
    ]

    for i, s_id in enumerate(id_list[0:num_sample]):
        s = samples[s_id]
        img = image.imread(s['img_path'])
        imgs = [img]
        for seg_dir in seg_dir_list:
            seg = image.imread(seg_dir + s_id + '.bmp') * 20
            mask = img * (seg > 0).astype(np.float)
            imgs += [seg, mask]

        img = image.stitch(imgs, 0)
        image.imwrite(img, os.path.join(output_dir, s_id + '.jpg'))
        print(i)
Esempio n. 3
0
def visualize_samples():

    num_sample = 10
    dir_out = 'temp/attr_example'

    io.mkdir_if_missing(dir_out)
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    attr_label = io.load_data(design_root + 'Label/ca_attr_label.pkl')
    attr_entry = io.load_json(design_root + 'Label/attr_entry.json')

    id_set = set(samples.keys())

    for i, att in enumerate(attr_entry):
        print('attribute %d / %d: %s' % (i, len(attr_entry), att['entry']))
        dir_att = os.path.join(dir_out, att['entry'])
        io.mkdir_if_missing(dir_att)
        pos_id_list = [
            s_id for s_id, label in attr_label.iteritems() if label[i] == 1
        ]
        np.random.shuffle(pos_id_list)
        for s_id in pos_id_list[0:num_sample]:
            fn_src = samples[s_id]['img_path']
            fn_tar = os.path.join(dir_att, 'pos_' + s_id + '.jpg')
            shutil.copyfile(fn_src, fn_tar)

        neg_id_list = list(id_set - set(pos_id_list))
        np.random.shuffle(neg_id_list)
        for s_id in neg_id_list[0:num_sample]:
            fn_src = samples[s_id]['img_path']
            fn_tar = os.path.join(dir_att, 'neg_' + s_id + '.jpg')
            shutil.copyfile(fn_src, fn_tar)
Esempio n. 4
0
def create_test_pair():
    np.random.rand(0)
    split = io.load_json(design_root +
                         'Split/ca_gan_split_trainval_upper.json')
    cat_label = io.load_data(design_root + 'Label/ca_cat_label.pkl')
    cat_entry = io.load_json(design_root + 'Label/cat_entry.json')
    # group samples by category label
    cat_to_ids = defaultdict(lambda: [])
    for s_id in split['test']:
        c = cat_label[s_id]
        cat_to_ids[c].append(s_id)
    n = 0
    pair_list = []
    for c, s_list in cat_to_ids.iteritems():
        print('[%d/%d] %s: %d samples...' %
              (n, len(cat_to_ids), cat_entry[c]['entry'], len(s_list)))
        n += 1
        s_list_org = [s_id for s_id in s_list]
        for i in range(len(s_list) - 1):
            j = np.random.randint(i + 1, len(s_list))
            temp = s_list[i]
            s_list[i] = s_list[j]
            s_list[j] = temp
        pair_list += zip(s_list_org, s_list)

    pair_dict = {s_tar: s_src for s_tar, s_src in pair_list}
    io.save_json(pair_dict, design_root + 'Temp/ca_test_tps_pair.json')

    io.save_str_list(pair_dict.keys(),
                     design_root + 'Temp/ca_test_tps_tar.txt')
    io.save_str_list(pair_dict.values(),
                     design_root + 'Temp/ca_test_tps_src.txt')
Esempio n. 5
0
def create_silhouette():
    # DeepFashion
    #smpl_pred_dir = 'datasets/DF_Pose/3d/hmr_dfm_v2/pred/'
    #output_dir = 'datasets/DF_Pose/Img/silhouette24/'
    #image_split = io.load_json('datasets/DF_Pose/Label/image_split_dfm.json')

    # Market-1501
    smpl_pred_dir = 'datasets/market1501/3d/hmr/pred/'
    output_dir = 'datasets/market1501/Images/silhouette24/'
    image_split = io.load_json('datasets/market1501/Label/image_split.json')

    faces = np.load('scripts/3d/smpl_faces.npy')
    vert2part = io.load_json('scripts/3d/smpl_vert_to_bodypart.json')

    def _func(face_id):
        if face_id == 4294967295:
            return 0
        else:
            verts = faces[face_id]
            part_id = vert2part[verts[0]] + 1
            return part_id

    _vfunc = np.vectorize(_func)

    io.mkdir_if_missing(output_dir)
    id_list = image_split['train'] + image_split['test']
    for sid in tqdm.tqdm(id_list):
        pred = io.load_data(smpl_pred_dir + '%s.pkl' % sid)
        vis = pred['visibility']
        silh = _vfunc(vis).astype(np.uint8)
        silh = cv2.medianBlur(silh, 5)
        imageio.imwrite(output_dir + '%s.bmp' % sid, silh)
Esempio n. 6
0
    def initialize(self, opt, split):
        self.opt = opt
        self.root = opt.data_root
        self.split = split

        print('loading data ...')
        samples = io.load_json(os.path.join(opt.data_root, opt.fn_sample))
        attr_label = io.load_data(os.path.join(opt.data_root, opt.fn_label))
        attr_entry = io.load_json(os.path.join(opt.data_root, opt.fn_entry))
        attr_split = io.load_json(os.path.join(opt.data_root, opt.fn_split))
        lm_label = io.load_data(os.path.join(opt.data_root, opt.fn_landmark))

        self.id_list = attr_split[split]
        if opt.max_dataset_size != float('inf'):
            self.id_list = self.id_list[0:opt.max_dataset_size]
        self.sample_list = [samples[s_id] for s_id in self.id_list]
        self.attr_label_list = [attr_label[s_id] for s_id in self.id_list]
        self.lm_list = [lm_label[s_id] for s_id in self.id_list]
        self.attr_entry = attr_entry

        # check data
        assert len(self.attr_entry) == len(
            self.attr_label_list[0]
        ) == opt.n_attr, 'Attribute number not match!'
        print('dataset created (%d samples)' % len(self))

        # get transform
        self.to_tensor = transforms.ToTensor()

        if opt.image_normalize == 'imagenet':
            self.tensor_normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                                         [0.229, 0.224, 0.225])
        else:
            self.tensor_normalize = transforms.Normalize([0.5, 0.5, 0.5],
                                                         [0.5, 0.5, 0.5])
Esempio n. 7
0
 def load_attr_data(self):
     opt = self.opt
     self.samples = io.load_json(os.path.join(opt.data_root, opt.fn_sample))
     self.attr_label = io.load_data(
         os.path.join(opt.data_root, opt.fn_label))
     self.attr_entry = io.load_json(
         os.path.join(opt.data_root, opt.fn_entry))
     self.data_loaded = True
Esempio n. 8
0
    def initialize(self, opt, split):

        self.opt = opt
        self.root = opt.data_root

        # get transform
        transform_list = []

        if opt.resize_or_crop == 'resize':
            # only resize image
            transform_list.append(transforms.Resize(opt.fine_size, Image.BICUBIC))

        elif opt.resize_or_crop == 'resize_and_crop':
            # scale and crop
            transform_list.append(transforms.Resize(opt.load_size, Image.BICUBIC))
            if split == 'train':
                transform_list.append(transforms.RandomCrop(opt.fine_size))
                transform_list.append(transforms.RandomHorizontalFlip())
            else:
                transform_list.append(transforms.CenterCrop(opt.fine_size))

        transform_list.append(transforms.ToTensor())

        if opt.image_normalize == 'imagenet':
            transform_list.append(transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))
        else:
            transform_list.append(transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]))

        self.transform = transforms.Compose(transform_list)


        # load sample list
        print('loading data ...')
        samples = io.load_json(os.path.join(opt.data_root, opt.fn_sample))
        attr_label = io.load_data(os.path.join(opt.data_root, opt.fn_label))
        attr_entry = io.load_json(os.path.join(opt.data_root, opt.fn_entry))
        attr_split = io.load_json(os.path.join(opt.data_root, opt.fn_split))

        self.id_list = attr_split[split]
        if opt.max_dataset_size != float('inf'):
            self.id_list = self.id_list[0:opt.max_dataset_size]

        self.sample_list = [samples[s_id] for s_id in self.id_list]
        self.attr_label_list = [attr_label[s_id] for s_id in self.id_list]
        self.attr_entry = attr_entry

        if opt.joint_cat:
            cat_label = io.load_data(os.path.join(opt.data_root, opt.fn_cat))
            self.cat_list = [cat_label[s_id] for s_id in self.id_list]

        if opt.unmatch:
            np.random.shuffle(self.sample_list)

        # check data
        assert len(self.attr_entry) == len(self.attr_label_list[0]) == opt.n_attr, 'Attribute number not match!'
        print('dataset created (%d samples)' % len(self))
Esempio n. 9
0
def create_inner_edge_map():
    '''
    extract the edges inside the clothing regions
    '''

    # config
    kernel_size = 7
    threshold = 0

    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']
    edge_root = design_root + 'Img/edge_ca_256'
    seg_root = design_root + 'Img/seg_ca_256'
    output_dir = design_root + 'Img/edge_ca_256_inner'
    io.mkdir_if_missing(output_dir)

    kernel = np.zeros((kernel_size, kernel_size), np.uint8)
    k = (kernel_size - 1) / 2
    for i in range(kernel_size):
        for j in range(kernel_size):
            if np.abs(i - k) + np.abs(j - k) <= k:
                kernel[i, j] = 1

    for i, s_id in enumerate(id_list):
        edge = image.imread(os.path.join(edge_root, s_id + '.jpg'),
                            'grayscale')
        seg = image.imread(os.path.join(seg_root, s_id + '.bmp'), 'grayscale')
        mask_upper = cv2.erode((seg == 3).astype(np.uint8), kernel)
        mask_lower = cv2.erode((seg == 4).astype(np.uint8), kernel)
        mask = mask_upper | mask_lower
        edge_inner = edge * mask
        edge_inner = (edge_inner >= threshold).astype(np.uint8) * edge_inner
        image.imwrite(edge_inner, os.path.join(output_dir, s_id + '.jpg'))
        print('extracting inner edge %d / %d' % (i, len(id_list)))

    # create labels
    edge_paths = {
        s_id: os.path.join(output_dir, s_id + '.jpg')
        for s_id in id_list
    }
    split_debug = io.load_json(design_root + 'Split/debugca_gan_split.json')
    edge_paths_debug = {
        s_id: p
        for s_id, p in edge_paths.iteritems()
        if s_id in split_debug['train'] + split_debug['test']
    }

    io.save_json(edge_paths, design_root + 'Label/ca_edge_inner_paths.json')
    io.save_json(edge_paths_debug,
                 design_root + 'Label/debugca_edge_inner_paths.json')
Esempio n. 10
0
    def initialize(self, opt, split):
        self.opt = opt
        self.root = opt.data_root
        self.split = split

        print('loading data ...')
        samples = io.load_json(os.path.join(opt.data_root, opt.fn_sample))
        # attr_label = io.load_data(os.path.join(opt.data_root, opt.fn_label))
        # attr_entry = io.load_json(os.path.join(opt.data_root, opt.fn_entry))
        data_split = io.load_json(os.path.join(opt.data_root, opt.fn_split))
        lm_label = io.load_data(os.path.join(opt.data_root, opt.fn_landmark))
        seg_paths = io.load_json(os.path.join(opt.data_root, opt.fn_seg_path))
        edge_paths = io.load_json(os.path.join(opt.data_root,
                                               opt.fn_edge_path))
        # color_paths = io.load_json(os.path.join(opt.data_root, opt.fn_color_path))
        flx_seg_paths = io.load_json(
            os.path.join(opt.data_root, opt.fn_flx_seg_path))

        self.id_list = data_split[split]
        # self.attr_entry = attr_entry
        if opt.max_dataset_size != float('inf'):
            self.id_list = self.id_list[0:opt.max_dataset_size]
        self.sample_list = [samples[s_id] for s_id in self.id_list]
        # self.attr_label_list = [attr_label[s_id] for s_id in self.id_list]
        self.lm_list = [lm_label[s_id] for s_id in self.id_list]
        self.seg_path_list = [seg_paths[s_id] for s_id in self.id_list]
        self.edge_path_list = [edge_paths[s_id] for s_id in self.id_list]
        # self.color_path_list = [color_paths[s_id] for s_id in self.id_list]
        self.flx_seg_path_list = [flx_seg_paths[s_id] for s_id in self.id_list]

        # check data
        # assert len(self.attr_entry) == len(self.attr_label_list[0]) == opt.n_attr, 'Attribute number not match!'
        print('dataset created (%d samples)' % len(self))

        # get transform
        self.to_tensor = transforms.ToTensor()

        # use standard normalization, which is different from attribute dataset
        # image will be normalized again (under imagenet distribution) before fed into attribute encoder in GAN model
        self.tensor_normalize_std = transforms.Normalize([0.5, 0.5, 0.5],
                                                         [0.5, 0.5, 0.5])
        self.tensor_normalize_imagenet = transforms.Normalize(
            [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        if self.opt.color_jitter:
            self.color_jitter = transforms.ColorJitter(brightness=0.3,
                                                       contrast=0.3,
                                                       saturation=0.3,
                                                       hue=0.3)
            self.to_pil_image = transforms.ToPILImage()
Esempio n. 11
0
def create_cloth_edge_map():
    '''
    create edge map that only contains cloth edge (inside the cloth mask)
    '''
    # config
    mask_dilate = 5

    seg_dir = design_root + 'Img/seg_ca_syn_256/'
    edge_dir = design_root + 'Img/edge_ca_256/'
    output_dir = design_root + 'Img/edge_ca_256_cloth/'
    io.mkdir_if_missing(output_dir)

    split = io.load_json(design_root +
                         'Split/ca_gan_split_trainval_upper.json')
    id_list = split['train'] + split['test']

    for i, s_id in enumerate(id_list):
        print('%d/%d' % (i, len(id_list)))
        seg_map = image.imread(seg_dir + s_id + '.bmp', 'grayscale')
        edge_map = image.imread(edge_dir + s_id + '.jpg', 'grayscale')
        assert seg_map.shape == edge_map.shape
        mask = ((seg_map == 3) | (seg_map == 4)).astype(np.uint8)
        mask = cv2.dilate(mask, kernel=np.ones((mask_dilate, mask_dilate)))
        edge_map_cloth = edge_map * mask
        image.imwrite(edge_map_cloth, output_dir + s_id + '.jpg')
Esempio n. 12
0
def create_debug_ca_dataset():
    '''
    Create a mini subset of Category_and_Attribute data. Assume standard CA index file and label files already exist.
    '''

    num_train = 10
    num_test = 10
    same_train_test = True

    samples = io.load_json(design_root + 'Label/ca_samples.json')
    attr_label = io.load_data(design_root + 'Label/ca_attr_label.pkl')
    bbox_label = io.load_data(design_root + 'Label/ca_bbox_label_256.pkl')
    lm_label = io.load_data(design_root + 'Label/ca_landmark_label_256.pkl')


    if same_train_test:
        id_list = samples.keys()[0:num_train]
        split = {'train': id_list, 'test': id_list}
    else:
        id_list = samples.keys()[0:(num_train + num_test)]
        split = {'train': id_list[0:num_train], 'test': id_list[num_train::]}


    samples = {s_id:samples[s_id] for s_id in id_list}
    attr_label = {s_id:attr_label[s_id] for s_id in id_list}
    bbox_label = {s_id:bbox_label[s_id] for s_id in id_list}
    lm_label = {s_id:lm_label[s_id] for s_id in id_list}
   

    io.save_json(samples, design_root + 'Label/debugca_samples.json')
    io.save_data(attr_label, design_root + 'Label/debugca_attr_label.pkl')
    io.save_data(bbox_label, design_root + 'Label/debugca_bbox_label.pkl')
    io.save_data(lm_label, design_root + 'Label/debugca_landmark_label.pkl')
    io.save_json(split, design_root + 'Split/debugca_split.json')
Esempio n. 13
0
 def initialize(self, opt, split):
     self.opt = opt
     self.root = opt.data_root
     if opt.debug:
         split = 'debug'
     self.split = split
     #############################
     # load data
     #############################
     print('loading data ...')
     # data split
     data_split = io.load_json(os.path.join(opt.data_root, opt.fn_split))
     self.pose_label = io.load_data(os.path.join(opt.data_root,
                                                 opt.fn_pose))
     self.img_dir = os.path.join(opt.data_root, opt.img_dir)
     self.seg_dir = os.path.join(opt.data_root, opt.seg_dir)
     self.edge_dir = os.path.join(opt.data_root, opt.edge_dir)
     #############################
     # create index list
     #############################
     self.id_list = data_split[split]
     #############################
     # other
     #############################
     self.tensor_normalize_std = transforms.Normalize([0.5, 0.5, 0.5],
                                                      [0.5, 0.5, 0.5])
     self.color_jitter = transforms.ColorJitter(brightness=0.3,
                                                contrast=0.3,
                                                saturation=0.3,
                                                hue=0.3)
     self.to_pil_image = transforms.ToPILImage()
def create_attribute_label():
    img_split = io.load_json('datasets/DF_Pose/Label/image_split_dfm_new.json')
    id_list = img_split['train'] + img_split['test']
    attr_entry = io.load_str_list(
        'datasets/DeepFashion/In-shop/Anno/list_attr_cloth.txt')[2:]
    attr_anno = io.load_str_list(
        'datasets/DeepFashion/In-shop/Anno/list_attr_items.txt')
    attr_anno = attr_anno[2:]
    attr_anno = [l.replace('-1', '0').split() for l in attr_anno]
    attr_anno = {l[0]: np.array(l[1:], dtype=np.int) for l in attr_anno}

    label = {}
    for sid in id_list:
        s = sid.index('id') + 2
        e = s + 8
        sid_ori = 'id_' + sid[s:e]
        label[sid] = attr_anno[sid_ori]

    # remove attribute entries with no positive sample
    label_mat = np.array(label.values())
    valid_idx = label_mat.sum(axis=0) > 0
    print('%d valid attribute entries' % (valid_idx.sum()))
    label = {k: v[valid_idx] for k, v in label.iteritems()}
    attr_entry = [e for i, e in enumerate(attr_entry) if valid_idx[i]]
    attr_label = {'label': label, 'entry': attr_entry}

    io.save_data(attr_label, 'datasets/DF_Pose/Label/attr_label.pkl')
Esempio n. 15
0
def create_attr_label():

    # attr_list = io.load_str_list(root + 'Anno/list_attr_cloth.txt')[2::]
    # attr_anno = io.load_str_list(root + 'Anno/list_attr_items.txt')[2::]
    # fn_out = root + 'Label/attribute_inshop.json'
    # num_attr = 463
    # n_top = 5

    attr_list = io.load_str_list('/data2/ynli/datasets/DeepFashion/Category_and_Attribute/Anno/list_attr_cloth.txt')[2::]
    attr_list = [' '.join(a.split()[0:-1]) for a in attr_list]
    attr_anno = io.load_str_list('/data2/ynli/datasets/DeepFashion/Category_and_Attribute/Anno/list_attr_img.txt')[2::]
    fn_out = root + 'Label/attribute_ca.json'
    num_attr = 1000
    n_top = 5

    # create label data
    if not os.path.isfile(fn_out):
        attr_data = {}
        for line in attr_anno:
            line = line.split()
            item_id = line[0]
            label = [int(c) for c in line[1::]]
            assert len(label) == num_attr

            attr_data[item_id] = label

        io.save_json(attr_data, fn_out)
    else:
        attr_data = io.load_json(fn_out)

    num_sample = len(attr_data)

    # most frequent attribute in each attribute type
    attr_list_ref = io.load_str_list('/data2/ynli/datasets/DeepFashion/Category_and_Attribute/Anno/list_attr_cloth.txt')[2::]
    attr_type = {' '.join(a.split()[0:-1]): a.split()[-1] for a in attr_list_ref}
    
    
    attr_mat = np.array(attr_data.values(), dtype = float)
    attr_count = np.where(attr_mat > 0, 1, 0).sum(axis = 0)
    

    attr_count_type = {}

    for i, attr_name in enumerate(attr_list):
        t = attr_type[attr_name] if attr_name in attr_type else '-1'
        if t not in attr_count_type:
            attr_count_type[t] = []
        attr_count_type[t].append((attr_name, attr_count[i]))

    for t in {'1', '2', '3', '4', '5', '-1'}:
        if t not in attr_count_type:
            continue
        attr_count_list = attr_count_type[t]
        attr_count_list.sort(key = lambda x: x[1], reverse = True)
        
        print('attribute type: %s' % t)

        for attr_name, count in attr_count_list[0:n_top]:
            print('%s: %d (%.1f %%)' % (attr_name, count, 100. * count / num_sample))
        print('\n')
 def _create_stage_1_net(self, opt):
     '''
     stage-1 network should be a pretrained pose transfer model.
     assume it is a vunet for now
     '''
     # load options
     opt_s1 = argparse.Namespace()
     dict_opt_s1 = io.load_json(
         os.path.join('checkpoints', opt.which_model_stage_1,
                      'train_opt.json'))
     opt_s1.__dict__.update(dict_opt_s1)
     self.opt_s1 = opt_s1
     # create model
     if opt_s1.which_model_T == 'vunet':
         self.netT_s1 = networks.VariationalUnet(
             input_nc_dec=self.get_pose_dim(opt_s1.pose_type),
             input_nc_enc=self.get_appearance_dim(opt_s1.appearance_type),
             output_nc=self.get_output_dim(opt_s1.output_type),
             nf=opt_s1.vunet_nf,
             max_nf=opt_s1.vunet_max_nf,
             input_size=opt_s1.fine_size,
             n_latent_scales=opt_s1.vunet_n_latent_scales,
             bottleneck_factor=opt_s1.vunet_bottleneck_factor,
             box_factor=opt_s1.vunet_box_factor,
             n_residual_blocks=2,
             norm_layer=networks.get_norm_layer(opt_s1.norm),
             activation=nn.ReLU(False),
             use_dropout=False,
             gpu_ids=opt.gpu_ids,
             output_tanh=False,
         )
         if opt.gpu_ids:
             self.netT_s1.cuda()
     else:
         raise NotImplementedError()
Esempio n. 17
0
def gather_pose_estimation_result():
    '''
    We use the state-of-the-art human pose estimation method (https://github.com/tensorboy/pytorch_Realtime_Multi-Person_Pose_Estimation) to get key points
    This function is for gathering estimation results.
    '''
    num_key_p = 18
    rst_dir = 'datasets/DeepFashion/Fashion_design/Temp/pose_pkl/'
    split = io.load_json(
        'datasets/DeepFashion/Fashion_design/Split/ca_gan_split_trainval_upper.json'
    )
    id_list = split['train'] + split['test']

    pose_label = {}
    n_fail = 0
    for idx, s_id in enumerate(id_list):
        print('%d/%d : %s' % (idx, len(id_list), s_id))
        fn_pose = rst_dir + s_id + '.pkl'
        if not os.path.isfile(fn_pose):
            pose_label[s_id] = [[-1, -1] for _ in range(num_key_p)]
            n_fail += 1
        else:
            pose = io.load_data(fn_pose)
            assert len(pose) == num_key_p
            # p[i][j] = (x, y, score, id) is the j-th keypoints of i-th type.
            # we assume that j <= 1, because our image contains single person
            pose_label[s_id] = [[p[0][0], p[0][1]] if len(p) > 0 else [-1, -1]
                                for p in pose]

    io.save_data(
        pose_label,
        'datasets/DeepFashion/Fashion_design/Label/ca_gan_pose_label_256.pkl')
    print('%d (out of %d) samples failed' % (n_fail, len(id_list)))
Esempio n. 18
0
def create_descriptor_seg():
    model_id = 'PoseTransfer_7.5'
    seg_dir = 'datasets/DF_Pose/Img/seg-lip_df/'
    seg_dir_gen = '/data2/ynli/Fashion/fashionHD/checkpoints/%s/test_seg/' % model_id

    image_info = io.load_json('temp/patch_matching/label/image_info.json')
    id_1 = image_info['id_1']
    id_2 = image_info['id_2']

    desc_1 = []
    desc_2 = []
    desc_gen = []
    for i in range(num_sample):
        fn_1 = seg_dir + id_1[i] + '.bmp'
        fn_2 = seg_dir + id_2[i] + '.bmp'
        fn_gen = seg_dir_gen + '%s_%s.bmp' % (id_1[i], id_2[i])
        desc_1.append(_read_seg(fn_1))
        desc_2.append(_read_seg(fn_2))
        desc_gen.append(_read_seg(fn_gen))

    data_dict_gt = {
        'desc_1': np.stack(desc_1),
        'desc_2': np.stack(desc_2),
        'name': 'gt_seg'
    }
    data_dict_gen = {
        'desc_1': np.stack(desc_1),
        'desc_2': np.stack(desc_gen),
        'name': 'gen_seg'
    }

    scipy.io.matlab.savemat('temp/patch_matching/descriptor/desc_gt_seg.mat',
                            data_dict_gt)
    scipy.io.matlab.savemat('temp/patch_matching/descriptor/desc_gen_seg.mat',
                            data_dict_gen)
Esempio n. 19
0
 def initialize(self, opt, split):
     self.opt = opt
     self.data_root = opt.data_root
     self.split = split
     #############################
     # set path / load label
     #############################
     data_split = io.load_json(os.path.join(opt.data_root, opt.fn_split))
     self.img_dir = os.path.join(opt.data_root, opt.img_dir)
     self.seg_dir = os.path.join(opt.data_root, opt.seg_dir)
     self.corr_dir = os.path.join(opt.data_root, opt.corr_dir)
     self.pose_label = io.load_data(os.path.join(opt.data_root,
                                                 opt.fn_pose))
     #############################
     # create index list
     #############################
     self.id_list = data_split[split]
     #############################
     # other
     #############################
     if opt.debug:
         self.id_list = self.id_list[0:32]
     self.tensor_normalize_std = transforms.Normalize([0.5, 0.5, 0.5],
                                                      [0.5, 0.5, 0.5])
     self.to_pil_image = transforms.ToPILImage()
     self.pil_to_tensor = transforms.ToTensor()
     self.color_jitter = transforms.ColorJitter(brightness=0.0,
                                                contrast=0.0,
                                                saturation=0.0,
                                                hue=0.2)
Esempio n. 20
0
def keypoint_guided_tps():

    num_sample = 64
    pair_list = io.load_json(
        'datasets/DF_Pose/Label/pair_split.json')['test'][0:num_sample]
    pose_label = io.load_data('datasets/DF_Pose/Label/pose_label.pkl')
    image_dir = 'datasets/DF_Pose/Img/img_df/'
    seg_dir = 'datasets/DF_Pose/Img/seg-lip_df_revised/'
    output_dir = 'temp/patch_matching/output/tps_keypoint/'
    io.mkdir_if_missing(output_dir)
    tps = cv2.createThinPlateSplineShapeTransformer()

    for i, (id_1, id_2) in enumerate(tqdm.tqdm(pair_list)):
        kp_1 = np.array(pose_label[id_1][1:14],
                        dtype=np.float64).reshape(1, -1, 2)
        kp_2 = np.array(pose_label[id_2][1:14],
                        dtype=np.float64).reshape(1, -1, 2)
        kp_matches = []
        for j in range(kp_1.shape[1]):
            if (kp_1[0, j] >= 0).all() and (kp_2[0, j] >= 0).all():
                kp_matches.append(cv2.DMatch(j, j, 0))
        if len(kp_matches) == 0:
            continue

        tps.estimateTransformation(kp_2, kp_1, kp_matches)
        img_1 = cv2.imread(image_dir + id_1 + '.jpg')
        img_2 = cv2.imread(image_dir + id_2 + '.jpg')

        img_w = tps.warpImage(img_1)
        seg = cv2.imread(seg_dir + id_2 + '.bmp', cv2.IMREAD_GRAYSCALE)
        mask = ((seg == 3) | (seg == 7)).astype(img_w.dtype)[:, :, np.newaxis]
        img_out = img_w * mask + img_2 * (1 - mask)

        cv2.imwrite(output_dir + '%d_%s_%s.jpg' % (i, id_1, id_2), img_out)
        cv2.imwrite(output_dir + 'w%d_%s_%s.jpg' % (i, id_1, id_2), img_w)
Esempio n. 21
0
def create_synthesis_to_CA_index():
    '''
    create an index map A: img_syn[i] = img_ca[A[i]]
    '''
    import scipy.io

    syn_name_list = io.load_str_list(
        'datasets/DeepFashion/Fashion_synthesis/data_release/benchmark/name_list.txt'
    )

    samples = io.load_json(
        'datasets/DeepFashion/Fashion_design/Label/ca_samples.json')
    ca_name2idx = {
        s['img_path_org'][s['img_path_org'].find('img/')::]: int(s_id[3::])
        for s_id, s in samples.iteritems()
    }
    ca_name2sz = {}
    for i, s in enumerate(samples.values()):
        img = image.imread(s['img_path_org'])
        h, w = img.shape[0:2]
        ca_name2sz[s['img_path_org'][s['img_path_org'].find('img/')::]] = (w,
                                                                           h)
        print('load ca image size: %d/%d' % (i, len(samples)))

    syn_idx_list = [ca_name2idx[name] for name in syn_name_list]
    syn_org_size_list = [ca_name2sz[name] for name in syn_name_list]

    data_out = {
        'syn2ca_index': syn_idx_list,
        'syn2ca_width': [w for w, _ in syn_org_size_list],
        'syn2ca_height': [h for _, h in syn_org_size_list]
    }
    fn_out = 'datasets/DeepFashion/Fashion_synthesis/data_release/benchmark/index_to_Category_and_Attribute.mat'
    scipy.io.savemat(fn_out, data_out)
Esempio n. 22
0
def create_attr_entry():
    '''
    Create attribute entry list, which contains original 1000 attributes used in Category_and_Attribute benchmark
    '''

    print('loading data...')
    attr_entry_list = io.load_str_list(ca_root +
                                       'Anno/list_attr_cloth.txt')[2::]
    attr_label = io.load_data(design_root + 'Label/ca_attr_label.pkl')
    split = io.load_json(design_root + 'Split/ca_split.json')
    train_ids = set(split['train'])
    attr_mat = np.array(
        [v for k, v in attr_label.iteritems() if k in train_ids],
        dtype=np.float32)

    print('computing positive rates')
    num_sample = len(train_ids)
    pos_rate = attr_mat.sum(axis=0) / num_sample

    attr_entry = []
    for idx, s in enumerate(attr_entry_list):
        s = s.split()
        attr_name = ' '.join(s[0:-1])
        attr_type = int(s[-1])
        attr_entry.append({
            'entry': attr_name,
            'type': attr_type,
            'pos_rate': pos_rate[idx]
        })

    io.save_json(attr_entry, design_root + 'Label/attr_entry.json')
Esempio n. 23
0
def create_split():
    '''
    Create split following the original partition
    '''

    split_list = io.load_str_list(ca_root + 'Eval/list_eval_partition.txt')[2:]
    split = {'train': [], 'val': [], 'test': []}
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    img2id = {
        s['img_path_org'][s['img_path_org'].find('img')::]: s_id
        for s_id, s in samples.iteritems()
    }

    for s in split_list:
        img_path, status = s.split()
        s_id = img2id[img_path]
        split[status].append(s_id)

    io.mkdir_if_missing(design_root + 'Split')
    io.save_json(split, design_root + 'Split/ca_split.json')

    print('create split')
    for status in ['train', 'val', 'test']:
        print('%s: %d' % (status, len(split[status])))

    split_trainval = {
        'train': split['train'] + split['val'],
        'test': split['test']
    }
    io.save_json(split_trainval, design_root + 'Split/ca_split_trainval.json')
Esempio n. 24
0
def create_category_label():

    samples = io.load_json(design_root + 'Label/ca_samples.json')
    cat_entry_list = io.load_str_list(ca_root +
                                      'Anno/list_category_cloth.txt')[2::]
    cat_list = io.load_str_list(ca_root + 'Anno/list_category_img.txt')[2::]

    # create category entry
    cat_entry = []
    for cat_str in cat_entry_list:
        cat_name = ' '.join(cat_str.split()[0:-1])
        cat_type = int(cat_str.split()[-1])
        cat_entry.append({'entry': cat_name, 'type': cat_type})

    io.save_json(cat_entry, design_root + 'Label/cat_entry.json')
    print('create category entry')

    # create category label
    img2id = {
        s['img_path_org'][s['img_path_org'].find('img')::]: s_id
        for s_id, s in samples.iteritems()
    }
    cat_label = {}

    for idx, s in enumerate(cat_list):
        s = s.split()
        s_id = img2id[s[0]]
        cat = int(s[1]) - 1
        cat_label[s_id] = cat

    io.save_data(cat_label, design_root + 'Label/ca_cat_label.pkl')
    print('create category label')
Esempio n. 25
0
def create_pose_label():
    '''
    create 18-keypoint pose label. follow the setting in VITON
    '''
    pose = io.load_data(zalando_root + 'Source/pose.pkl')
    split = io.load_json(zalando_root + 'Split/zalando_split.json')
    id_list = split['train'] + split['test']

    pose_label = {}
    for idx, s_id in enumerate(id_list):
        print('%d / %d' % (idx, len(id_list)))
        subset = pose[s_id + '_0']['subset'] # [i1, i2, ..., in, totial_score, n]
        candidate = pose[s_id + '_0']['candidate'] # [[x_i, y_i, score_i, id_i]]
        label = []
        for i in subset[0][0:-2]:
            i = int(i)
            if i == -1:
                label.append([-1, -1])
            else:
                x = candidate[i][0]
                y = candidate[i][1]
                label.append([x, y])
        pose_label[s_id] = label

    io.save_data(pose_label, zalando_root + 'Label/zalando_pose_label.pkl')
Esempio n. 26
0
def pad_image_for_segmentation():
    '''
    resize and padding image for segmentation (using fashionGAN code)
    Todo: add inshop version
    '''

    sz_tar = 256
    output_dir = 'datasets/DeepFashion/Fashion_design/Img/img_ca_pad'

    io.mkdir_if_missing(output_dir)
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']

    # update landmark and bbox
    lm_label = io.load_data(design_root + 'Label/ca_landmark_label.pkl')
    bbox_label = io.load_data(design_root + 'Label/ca_bbox_label.pkl')
    lm_label_pad = {}
    bbox_label_pad = {}

    io.save_str_list(id_list, os.path.join(output_dir, 'img_ca_pad.txt'))
    for i, s_id in enumerate(id_list):
        img_org = image.imread(samples[s_id]['img_path_org'])
        h, w = img_org.shape[0:2]

        if h > w:
            img = image.resize(img_org, (-1, sz_tar))
            scale = 1. * sz_tar / h
        else:
            img = image.resize(img_org, (sz_tar, -1))
            scale = 1. * sz_tar / w

        # img = image.pad_square(img, sz_tar, padding_value = 255, mode = 'lefttop')
        # image.imwrite(img, os.path.join(output_dir, s_id + '.jpg'))

        bbox_label_pad[s_id] = [c * scale for c in bbox_label[s_id]]
        lm_label_pad[s_id] = []
        for x, y, v in lm_label[s_id]:
            lm_label_pad[s_id].append([x * scale, y * scale, v])

        print('padding image %d / %d' % (i, len(id_list)))

    io.save_data(lm_label_pad,
                 design_root + 'Label/ca_landmark_label_pad_%d.pkl' % sz_tar)
    io.save_data(bbox_label_pad,
                 design_root + 'Label/ca_bbox_label_pad_%d.pkl' % sz_tar)
Esempio n. 27
0
def revise_coat_label():
    '''
    Reivese the segment label of coat(7) and upperbody(3).
    '''
    import cv2

    img_dir = 'datasets/DF_Pose/Img/img_df/'
    seg_dir = 'datasets/DF_Pose/Img/seg-lip_df/'
    output_dir = 'datasets/DF_Pose/Img/seg-lip_df_revised/'
    split = io.load_json('datasets/DF_Pose/Label/split.json')
    id_list = split['train'] + split['test']
    pid_to_sids = defaultdict(lambda: [])
    for sid in id_list:
        pid = sid[0:5]
        pid_to_sids[pid].append(sid)
    print('find %d person ids' % len(pid_to_sids))

    n_revised = 0
    io.mkdir_if_missing(output_dir)
    for i, (pid, sids) in enumerate(pid_to_sids.items()):
        seg_0 = cv2.imread(seg_dir + pid + '_1.bmp',
                           cv2.IMREAD_GRAYSCALE)  # try to load frontal image
        if (seg_0 is not None) and (7 in seg_0) and (3 in seg_0):
            n_revised += 1
            img_0 = cv2.imread(img_dir + pid + '_1.jpg')
            mask_u = (seg_0 == 3).astype(np.uint8)
            mask_c = (seg_0 == 7).astype(np.uint8)
            hist_u = cv2.calcHist([img_0], [0, 1, 2], mask_u, [8] * 3,
                                  [0, 256] * 3)
            hist_c = cv2.calcHist([img_0], [0, 1, 2], mask_c, [8] * 3,
                                  [0, 256] * 3)
            for sid in sids:
                if sid == pid + '_1':
                    shutil.copyfile(seg_dir + sid + '.bmp',
                                    output_dir + sid + '.bmp')
                else:
                    seg_i = cv2.imread(seg_dir + sid + '.bmp',
                                       cv2.IMREAD_GRAYSCALE)
                    img_i = cv2.imread(img_dir + sid + '.jpg')
                    mask_u_i = (seg_i == 3).astype(np.uint8)
                    mask_c_i = (seg_i == 7).astype(np.uint8)
                    for mask_i in [mask_u_i, mask_c_i]:
                        if mask_i.any():
                            hist_i = cv2.calcHist([img_i], [0, 1, 2], mask_i,
                                                  [8] * 3, [0, 256] * 3)
                            if cv2.compareHist(
                                    hist_i, hist_u,
                                    cv2.HISTCMP_CORREL) < cv2.compareHist(
                                        hist_i, hist_c, cv2.HISTCMP_CORREL):
                                seg_i[mask_i] = 3
                            else:
                                seg_i[mask_i] = 7
                    cv2.imwrite(output_dir + sid + '.bmp', seg_i)
        else:
            for sid in sids:
                shutil.copyfile(seg_dir + sid + '.bmp',
                                output_dir + sid + '.bmp')
        print('%d / %d (%d revised)' % (i, len(pid_to_sids), n_revised))
Esempio n. 28
0
def create_attribute_label():
    '''
    Create attribute label using predifined attribute entries
    '''

    # config
    attr_entry = io.load_json(design_root + 'Label/attr_entry.json')

    puncs = u'.,!?"%'
    trans_table = {ord(c): u' ' for c in puncs}

    # load attribute entry
    num_attr = len(attr_entry)
    item2attr = defaultdict(lambda: [0] * num_attr)

    # load language description
    desc_list = io.load_json(inshop_root + 'Anno/list_description_inshop.json')
    item2desc = {d['item']: d for d in desc_list}

    # attribute matching
    i_item = 0

    for item_id, d in item2desc.iteritems():

        color = d['color'].replace('-', ' ')
        d_str = ' ' + ' '.join([color] + d['description']) + ' '
        d_str = d_str.lower().translate(trans_table)
        label = item2attr[item_id]

        for idx, att in enumerate(attr_entry):

            if ' ' + att['entry'] + ' ' in d_str:
                label[idx] = 1

        print('extract attribute label: %d / %d' % (i_item, len(item2desc)))
        i_item += 1

    samples = io.load_json(design_root + 'Label/inshop_samples.json')
    attr_label = {
        s_id: item2attr[s['item_id']]
        for s_id, s in samples.iteritems()
    }

    io.save_data(attr_label, design_root + 'Label/inshop_attr_label.pkl')
    print('create attribute label')
Esempio n. 29
0
def create_edge_path():

    edge_root = design_root + 'Img/edge_ca_256'
    samples = io.load_json(design_root + 'Label/ca_samples.json')

    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    edge_path = {
        s_id: os.path.join(edge_root, s_id + '.jpg')
        for s_id in split['train'] + split['test']
    }
    io.save_json(edge_path, design_root + 'Label/ca_edge_paths.json')

    split = io.load_json(design_root + 'Split/debugca_gan_split.json')
    edge_path = {
        s_id: os.path.join(edge_root, s_id + '.jpg')
        for s_id in split['train'] + split['test']
    }
    io.save_json(edge_path, design_root + 'Label/debugca_edge_paths.json')
Esempio n. 30
0
def resize_and_pad():
    '''
    resize the image that its longer side equals to new_size. Then pad the image to have the size [new_size, new_size]
    create new pose label at the same time
    '''

    # config
    new_size = 256

    img_root = zalando_root + 'Img/img_zalando/'
    output_dir = zalando_root + 'Img/img_zalando_%d/' % new_size
    split = io.load_json(zalando_root + 'Split/zalando_split.json')
    pose_label = io.load_data(zalando_root + 'Label/zalando_pose_label.pkl')

    io.mkdir_if_missing(output_dir)
    id_list = split['train'] + split['test']
    # id_list = id_list[0:10]
    new_pose_label = {}

    for i, s_id in enumerate(id_list):
        print('%d / %d' % (i, len(id_list)))
        # resize image
        img = cv2.imread(img_root + s_id + '_0.jpg')
        w, h = img.shape[1], img.shape[0]
        if w < h:
            top = 0
            bottom = 0
            left = (h-w)//2
            right = h-w-left
            ratio = new_size/h
        else:
            top = (w-h)//2
            bottom = w-h-top
            left = 0
            right = 0
            ratio = new_size/w

        img = cv2.copyMakeBorder(img, top, bottom, left, right, borderType=cv2.BORDER_REPLICATE)
        img = cv2.resize(img, dsize=(new_size, new_size), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(output_dir + s_id + '_0.jpg', img)

        # resize clothing image
        img1 = cv2.imread(img_root + s_id + '_1.jpg')
        if not (img1.shape[0] == h and img1.shape[1] == w):
            img1 = cv2.resize(img1, dsize=(w,h))
        img1 = cv2.copyMakeBorder(img1, top, bottom, left, right, borderType=cv2.BORDER_REPLICATE)
        img1 = cv2.resize(img1, dsize=(new_size, new_size), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(output_dir + s_id + '_1.jpg', img1)

        # modify pose label
        pose = pose_label[s_id]
        new_pose = [[(p[0]+left)*ratio, (p[1]+top)*ratio] if p != [-1,-1] else [-1,-1] for p in pose]
        new_pose_label[s_id] = new_pose

    io.save_data(new_pose_label, zalando_root + 'Label/zalando_pose_label_%d.pkl' % new_size)