Beispiel #1
0
def create_pose_label():
    '''
    create 18-keypoint pose label. follow the setting in VITON
    '''
    pose = io.load_data(zalando_root + 'Source/pose.pkl')
    split = io.load_json(zalando_root + 'Split/zalando_split.json')
    id_list = split['train'] + split['test']

    pose_label = {}
    for idx, s_id in enumerate(id_list):
        print('%d / %d' % (idx, len(id_list)))
        subset = pose[s_id + '_0']['subset'] # [i1, i2, ..., in, totial_score, n]
        candidate = pose[s_id + '_0']['candidate'] # [[x_i, y_i, score_i, id_i]]
        label = []
        for i in subset[0][0:-2]:
            i = int(i)
            if i == -1:
                label.append([-1, -1])
            else:
                x = candidate[i][0]
                y = candidate[i][1]
                label.append([x, y])
        pose_label[s_id] = label

    io.save_data(pose_label, zalando_root + 'Label/zalando_pose_label.pkl')
Beispiel #2
0
def gather_pose_estimation_result():
    '''
    We use the state-of-the-art human pose estimation method (https://github.com/tensorboy/pytorch_Realtime_Multi-Person_Pose_Estimation) to get key points
    This function is for gathering estimation results.
    '''
    num_key_p = 18
    rst_dir = 'datasets/DeepFashion/Fashion_design/Temp/pose_pkl/'
    split = io.load_json(
        'datasets/DeepFashion/Fashion_design/Split/ca_gan_split_trainval_upper.json'
    )
    id_list = split['train'] + split['test']

    pose_label = {}
    n_fail = 0
    for idx, s_id in enumerate(id_list):
        print('%d/%d : %s' % (idx, len(id_list), s_id))
        fn_pose = rst_dir + s_id + '.pkl'
        if not os.path.isfile(fn_pose):
            pose_label[s_id] = [[-1, -1] for _ in range(num_key_p)]
            n_fail += 1
        else:
            pose = io.load_data(fn_pose)
            assert len(pose) == num_key_p
            # p[i][j] = (x, y, score, id) is the j-th keypoints of i-th type.
            # we assume that j <= 1, because our image contains single person
            pose_label[s_id] = [[p[0][0], p[0][1]] if len(p) > 0 else [-1, -1]
                                for p in pose]

    io.save_data(
        pose_label,
        'datasets/DeepFashion/Fashion_design/Label/ca_gan_pose_label_256.pkl')
    print('%d (out of %d) samples failed' % (n_fail, len(id_list)))
def create_attribute_label():
    img_split = io.load_json('datasets/DF_Pose/Label/image_split_dfm_new.json')
    id_list = img_split['train'] + img_split['test']
    attr_entry = io.load_str_list(
        'datasets/DeepFashion/In-shop/Anno/list_attr_cloth.txt')[2:]
    attr_anno = io.load_str_list(
        'datasets/DeepFashion/In-shop/Anno/list_attr_items.txt')
    attr_anno = attr_anno[2:]
    attr_anno = [l.replace('-1', '0').split() for l in attr_anno]
    attr_anno = {l[0]: np.array(l[1:], dtype=np.int) for l in attr_anno}

    label = {}
    for sid in id_list:
        s = sid.index('id') + 2
        e = s + 8
        sid_ori = 'id_' + sid[s:e]
        label[sid] = attr_anno[sid_ori]

    # remove attribute entries with no positive sample
    label_mat = np.array(label.values())
    valid_idx = label_mat.sum(axis=0) > 0
    print('%d valid attribute entries' % (valid_idx.sum()))
    label = {k: v[valid_idx] for k, v in label.iteritems()}
    attr_entry = [e for i, e in enumerate(attr_entry) if valid_idx[i]]
    attr_label = {'label': label, 'entry': attr_entry}

    io.save_data(attr_label, 'datasets/DF_Pose/Label/attr_label.pkl')
Beispiel #4
0
def create_category_label():

    samples = io.load_json(design_root + 'Label/ca_samples.json')
    cat_entry_list = io.load_str_list(ca_root +
                                      'Anno/list_category_cloth.txt')[2::]
    cat_list = io.load_str_list(ca_root + 'Anno/list_category_img.txt')[2::]

    # create category entry
    cat_entry = []
    for cat_str in cat_entry_list:
        cat_name = ' '.join(cat_str.split()[0:-1])
        cat_type = int(cat_str.split()[-1])
        cat_entry.append({'entry': cat_name, 'type': cat_type})

    io.save_json(cat_entry, design_root + 'Label/cat_entry.json')
    print('create category entry')

    # create category label
    img2id = {
        s['img_path_org'][s['img_path_org'].find('img')::]: s_id
        for s_id, s in samples.iteritems()
    }
    cat_label = {}

    for idx, s in enumerate(cat_list):
        s = s.split()
        s_id = img2id[s[0]]
        cat = int(s[1]) - 1
        cat_label[s_id] = cat

    io.save_data(cat_label, design_root + 'Label/ca_cat_label.pkl')
    print('create category label')
Beispiel #5
0
def resize_and_pad():
    '''
    resize the image that its longer side equals to new_size. Then pad the image to have the size [new_size, new_size]
    create new pose label at the same time
    '''

    # config
    new_size = 256

    img_root = zalando_root + 'Img/img_zalando/'
    output_dir = zalando_root + 'Img/img_zalando_%d/' % new_size
    split = io.load_json(zalando_root + 'Split/zalando_split.json')
    pose_label = io.load_data(zalando_root + 'Label/zalando_pose_label.pkl')

    io.mkdir_if_missing(output_dir)
    id_list = split['train'] + split['test']
    # id_list = id_list[0:10]
    new_pose_label = {}

    for i, s_id in enumerate(id_list):
        print('%d / %d' % (i, len(id_list)))
        # resize image
        img = cv2.imread(img_root + s_id + '_0.jpg')
        w, h = img.shape[1], img.shape[0]
        if w < h:
            top = 0
            bottom = 0
            left = (h-w)//2
            right = h-w-left
            ratio = new_size/h
        else:
            top = (w-h)//2
            bottom = w-h-top
            left = 0
            right = 0
            ratio = new_size/w

        img = cv2.copyMakeBorder(img, top, bottom, left, right, borderType=cv2.BORDER_REPLICATE)
        img = cv2.resize(img, dsize=(new_size, new_size), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(output_dir + s_id + '_0.jpg', img)

        # resize clothing image
        img1 = cv2.imread(img_root + s_id + '_1.jpg')
        if not (img1.shape[0] == h and img1.shape[1] == w):
            img1 = cv2.resize(img1, dsize=(w,h))
        img1 = cv2.copyMakeBorder(img1, top, bottom, left, right, borderType=cv2.BORDER_REPLICATE)
        img1 = cv2.resize(img1, dsize=(new_size, new_size), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(output_dir + s_id + '_1.jpg', img1)

        # modify pose label
        pose = pose_label[s_id]
        new_pose = [[(p[0]+left)*ratio, (p[1]+top)*ratio] if p != [-1,-1] else [-1,-1] for p in pose]
        new_pose_label[s_id] = new_pose

    io.save_data(new_pose_label, zalando_root + 'Label/zalando_pose_label_%d.pkl' % new_size)
Beispiel #6
0
def extract_feature(opt, save_feat=True):

    fn_feat = os.path.join(
        os.path.join('checkpoints', opt.id, 'feat',
                     '%s.pkl' % opt.which_epoch))

    if os.path.isfile(fn_feat):
        print('loading feature from %s' % fn_feat)
        feat_data = io.load_data(fn_feat)
        # feat_data['feat_train']
        # feat_data['feat_test']
        # feat_data['id_list_train']
        # feat_data['id_list_test']

    else:
        # create model
        model = AttributeEncoder()
        model.initialize(opt)
        model.eval()
        feat_data = {
            'feat_train': [],
            'feat_test': [],
            'id_list_train': [],
            'id_list_test': []
        }

        for split in ['train', 'test']:
            loader = CreateDataLoader(opt, split=split)

            for i, data in enumerate(loader):
                model.set_input(data)
                model.extract_feat()
                feat_data['feat_%s' %
                          split].append(model.output['feat'].data.cpu().numpy(
                          ))  # size: batch_size * feat_size
                feat_data['id_list_%s' %
                          split] += data['id']  # list of length batch_size
                print('\r[%s] extract feature from %s samples %d/%d' %
                      (opt.id, split, i, len(loader)),
                      end='')
            print('\n')
            feat_data['feat_%s' % split] = np.vstack(feat_data['feat_%s' %
                                                               split])

        if save_feat:
            io.mkdir_if_missing(os.path.join('checkpoints', opt.id, 'feat'))
            io.save_data(feat_data, fn_feat)

    return feat_data
Beispiel #7
0
def create_hmr_pose_label():
    '''
        name: [openpose_index, hmr_index]:
        
        nose: 0,14
        neck: 1, 12
        right_shoulder: 2, 8
        right_elow: 3, 7
        right_wrist: 4, 6
        left_shoulder: 5, 9
        left_elbow: 6, 10
        left_wrist: 7, 11
        right_hip: 8, 2
        right_knee: 9, 1
        right_ankle: 10, 0
        left_hip: 11, 3
        left_knee: 12, 4
        left_ankle: 13, 5
        right_eye: 14, 16
        left_eye: 15, 15
        right_ear: 16, 18
        left_ear: 17, 17
        head_top: -, 13
    '''
    # DeepFashion
    #image_split = io.load_json('datasets/DF_Pose/Label/image_split_dfm.json')
    #smpl_pred_dir = 'datasets/DF_Pose/3d/hmr_dfm_v3/pred/'
    #fn_out = 'datasets/DF_pose/Label/pose_label_hmr.pkl'

    # Market-1501
    image_split = io.load_json('datasets/market1501/Label/image_split.json')
    smpl_pred_dir = 'datasets/market1501/3d/hmr/pred/'
    fn_out = 'datasets/market1501/Label/pose_label_hmr.pkl'

    id_list = image_split['train'] + image_split['test']
    kp_order_map = [
        14, 12, 8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 16, 15, 18, 17
    ]

    pose_label_hmr = {}
    for sid in tqdm.tqdm(id_list):
        pred = io.load_data(smpl_pred_dir + '%s.pkl' % sid)
        joints = pred['joints']
        pts = joints[kp_order_map]
        pts[(pts[:, 0] < 0) | (pts[:, 0] > 255) | (pts[:, 1] < 0) |
            (pts[:, 1] > 255)] = -1
        pose_label_hmr[sid] = pts.tolist()
    io.save_data(pose_label_hmr, fn_out)
Beispiel #8
0
def create_hmr_pose_label_adapt():
    '''
    This is to create a version of hmr_pose joint, which is adapted to openpose joint:
    - compute "neck" using a regressor, trained useing openpose joints (due to the different definition of neck point"
    - invalidate the joint points which is invalid in dfm_pose
    '''
    from sklearn.linear_model import RidgeCV

    # DeepFashion
    #joint_label = io.load_data('datasets/DF_Pose/Label/pose_label_dfm.pkl')
    #joint_label_hmr = io.load_data('datasets/DF_Pose/Label/pose_label_hmr.pkl')
    #fn_out = 'datasets/DF_Pose/Label/pose_label_hmr_adapt.pkl'

    # Market-1501
    joint_label = io.load_data('datasets/market1501/Label/pose_label.pkl')
    joint_label_hmr = io.load_data(
        'datasets/market1501/Label/pose_label_hmr.pkl')
    fn_out = 'datasets/market1501/Label/pose_label_hmr_adapt.pkl'

    # train a linear regressor, which predict neck point location from left/right shoulder locations
    print('training regressor...')
    pts_dfm = np.array(joint_label.values())  #(N,18,2)
    v = (pts_dfm[:, [1, 2, 5], :].reshape(-1, 6) >= 0).all(axis=1)
    x_train = (pts_dfm[v])[:, [2, 5]].reshape(-1, 4)  #shoulder points
    y_train = (pts_dfm[v])[:, 1].reshape(-1, 2)  #neck points
    reg = RidgeCV(normalize=False)
    reg.fit(x_train, y_train)

    pts_hmr = np.array(joint_label_hmr.values())
    x_test = pts_hmr[:, [2, 5], :].reshape(-1, 4)
    y_test = reg.predict(x_test).reshape(-1, 2)

    # generate adapted joint label
    joint_label_adapt = {}
    for idx, sid in enumerate(tqdm.tqdm(joint_label_hmr.keys())):
        p_h = np.array(joint_label_hmr[sid])
        p_d = np.array(joint_label[sid])
        if (p_h[[2, 5], :] >= 0).all():
            p_h[1, :] = y_test[idx]

        inv = (p_d < 0).any(axis=1) | (p_h < 0).any(axis=1) | (p_h > 255).any(
            axis=1
        )  # invalid joint points in joint_dfm will also be marked as invalid in joint_hmr
        p_h[inv, :] = -1
        joint_label_adapt[sid] = p_h.tolist()

    io.save_data(joint_label_adapt, fn_out)
Beispiel #9
0
def pad_image_for_segmentation():
    '''
    resize and padding image for segmentation (using fashionGAN code)
    Todo: add inshop version
    '''

    sz_tar = 256
    output_dir = 'datasets/DeepFashion/Fashion_design/Img/img_ca_pad'

    io.mkdir_if_missing(output_dir)
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']

    # update landmark and bbox
    lm_label = io.load_data(design_root + 'Label/ca_landmark_label.pkl')
    bbox_label = io.load_data(design_root + 'Label/ca_bbox_label.pkl')
    lm_label_pad = {}
    bbox_label_pad = {}

    io.save_str_list(id_list, os.path.join(output_dir, 'img_ca_pad.txt'))
    for i, s_id in enumerate(id_list):
        img_org = image.imread(samples[s_id]['img_path_org'])
        h, w = img_org.shape[0:2]

        if h > w:
            img = image.resize(img_org, (-1, sz_tar))
            scale = 1. * sz_tar / h
        else:
            img = image.resize(img_org, (sz_tar, -1))
            scale = 1. * sz_tar / w

        # img = image.pad_square(img, sz_tar, padding_value = 255, mode = 'lefttop')
        # image.imwrite(img, os.path.join(output_dir, s_id + '.jpg'))

        bbox_label_pad[s_id] = [c * scale for c in bbox_label[s_id]]
        lm_label_pad[s_id] = []
        for x, y, v in lm_label[s_id]:
            lm_label_pad[s_id].append([x * scale, y * scale, v])

        print('padding image %d / %d' % (i, len(id_list)))

    io.save_data(lm_label_pad,
                 design_root + 'Label/ca_landmark_label_pad_%d.pkl' % sz_tar)
    io.save_data(bbox_label_pad,
                 design_root + 'Label/ca_bbox_label_pad_%d.pkl' % sz_tar)
Beispiel #10
0
def create_attribute_label():
    '''
    Create attribute label using predifined attribute entries
    '''

    # config
    attr_entry = io.load_json(design_root + 'Label/attr_entry.json')

    puncs = u'.,!?"%'
    trans_table = {ord(c): u' ' for c in puncs}

    # load attribute entry
    num_attr = len(attr_entry)
    item2attr = defaultdict(lambda: [0] * num_attr)

    # load language description
    desc_list = io.load_json(inshop_root + 'Anno/list_description_inshop.json')
    item2desc = {d['item']: d for d in desc_list}

    # attribute matching
    i_item = 0

    for item_id, d in item2desc.iteritems():

        color = d['color'].replace('-', ' ')
        d_str = ' ' + ' '.join([color] + d['description']) + ' '
        d_str = d_str.lower().translate(trans_table)
        label = item2attr[item_id]

        for idx, att in enumerate(attr_entry):

            if ' ' + att['entry'] + ' ' in d_str:
                label[idx] = 1

        print('extract attribute label: %d / %d' % (i_item, len(item2desc)))
        i_item += 1

    samples = io.load_json(design_root + 'Label/inshop_samples.json')
    attr_label = {
        s_id: item2attr[s['item_id']]
        for s_id, s in samples.iteritems()
    }

    io.save_data(attr_label, design_root + 'Label/inshop_attr_label.pkl')
    print('create attribute label')
Beispiel #11
0
def create_debug_ca_dataset():
    '''
    Create a mini subset of Category_and_Attribute data. Assume standard CA index file and label files already exist.
    '''

    num_train = 10
    num_test = 10
    same_train_test = True

    samples = io.load_json(design_root + 'Label/ca_samples.json')
    attr_label = io.load_data(design_root + 'Label/ca_attr_label.pkl')
    bbox_label = io.load_data(design_root + 'Label/ca_bbox_label_256.pkl')
    lm_label = io.load_data(design_root + 'Label/ca_landmark_label_256.pkl')


    if same_train_test:
        id_list = samples.keys()[0:num_train]
        split = {'train': id_list, 'test': id_list}
    else:
        id_list = samples.keys()[0:(num_train + num_test)]
        split = {'train': id_list[0:num_train], 'test': id_list[num_train::]}


    samples = {s_id:samples[s_id] for s_id in id_list}
    attr_label = {s_id:attr_label[s_id] for s_id in id_list}
    bbox_label = {s_id:bbox_label[s_id] for s_id in id_list}
    lm_label = {s_id:lm_label[s_id] for s_id in id_list}
   

    io.save_json(samples, design_root + 'Label/debugca_samples.json')
    io.save_data(attr_label, design_root + 'Label/debugca_attr_label.pkl')
    io.save_data(bbox_label, design_root + 'Label/debugca_bbox_label.pkl')
    io.save_data(lm_label, design_root + 'Label/debugca_landmark_label.pkl')
    io.save_json(split, design_root + 'Split/debugca_split.json')
Beispiel #12
0
def create_color_entry_and_label():
    '''
    Create color attribute entries and color labels
    '''

    print('loading data')
    # load description
    desc_list = io.load_json(inshop_root + 'Anno/list_description_inshop.json')
    item2color = {
        d['item']: d['color'].lower().replace('-', ' ').split()
        for d in desc_list
    }
    colors = set([c[0] for c in item2color.values() if len(c) == 1])
    color_entry = [{'entry': c, 'type': 0, 'pos_rate': -1} for c in colors]

    # load sample index
    samples = io.load_json(design_root + 'Label/inshop_samples.json')
    split = io.load_json(design_root + 'Split/inshop_split.json')
    train_ids = set(split['train'])

    print('computing positive rates')
    color_label = {}
    for s_id, s in samples.iteritems():
        color = item2color[s['item_id']]
        label = [1 if c['entry'] in color else 0 for c in color_entry]
        color_label[s_id] = label

    color_mat = np.array(
        [v for k, v in color_label.iteritems() if k in train_ids],
        dtype=np.float32)
    num_sample = len(train_ids)
    pos_rate = (color_mat.sum(axis=0) / num_sample).tolist()

    for idx, att in enumerate(color_entry):
        color_entry[idx]['pos_rate'] = pos_rate[idx]

    print('saving data')
    io.save_json(color_entry, design_root + 'Label/color_entry.json')
    io.save_data(color_label, design_root + 'Label/inshop_attr_label.pkl')
Beispiel #13
0
def create_color_attribute_label():
    '''
    attributes related to color
    attribute types: 1-texture, 2-fabrix, 3-shape, 4-part, 5-style
    '''

    attr_entry = io.load_json(design_root + 'Label/attr_entry.json')
    attr_label = io.load_data(design_root + 'Label/ca_attr_label.pkl')
    index = [
        i for i, entry in enumerate(attr_entry) if entry['type'] in {1, 2, 5}
    ]
    n = 0
    new_attr_label = {}
    for s_id, label in attr_label.iteritems():
        new_attr_label[s_id] = [label[i] for i in index]
        n += 1
        print('%d/%d' % (n, len(attr_label)))

    new_attr_entry = [attr_entry[i] for i in index]
    print('%d color-related attributes' % len(index))

    io.save_data(new_attr_label, design_root + 'Label/ca_color_attr_label.pkl')
    io.save_json(new_attr_entry, design_root + 'Label/color_attr_entry.json')
Beispiel #14
0
def create_image_index():
    ''' create image index, split and pose label from original index.p file '''
    index = io.load_data(root + 'Anno/index.pkl')

    split = {'train': [], 'test': []}
    pose_label = {}

    for img, joints, is_train in zip(index['imgs'], index['joints'],
                                     index['train']):
        s_id = img.split('/')[1][0:7]
        if is_train:
            split['train'].append(s_id)
        else:
            split['test'].append(s_id)

        for i in range(len(joints)):
            if not (joints[i][0] == -1 and joints[i][1] == -1):
                joints[i][0] *= 256
                joints[i][1] *= 256
        pose_label[s_id] = joints.tolist()

    io.save_json(split, root + 'Label/split.json')
    io.save_data(pose_label, root + 'Label/pose_label.pkl')
Beispiel #15
0
def _svm_test_attr_unit(worker_idx, idx_attr_rng, feat_train, feat_test,
                        label_train, label_test, attr_entry, cache_dir):
    idx_list = range(idx_attr_rng[0], idx_attr_rng[1])
    c_list = [0.1, 1., 10.]
    pred = np.zeros((label_test.shape[0], len(idx_list)), dtype=np.float32)
    for i, idx in enumerate(idx_list):
        t = time.time()
        l_train = label_train[:, idx].astype(np.int)
        l_test = label_test[:, idx].astype(np.int)
        w1 = l_train.size / l_train.sum() - 1
        # w1 = 1.
        # if param_C_by_CV:
        #     c, _ = liblinear.train(l_train, feat_train, '-s 0 -B 1. -C -w1 %f -q' % w1)
        #     c = max(0.1, c)
        # else:
        #     c = 512.
        best_acc = -1.
        for c in c_list:
            svm_model = liblinear.train(l_train, feat_train,
                                        '-s 0 -B 1. -c %f -w1 %f -q' % (c, w1))
            svm_out = liblinear.predict(l_test, feat_test, svm_model,
                                        '-b 1 -q')
            acc = svm_out[1][0]
            if acc > best_acc:
                best_acc = acc
                best_c = c
                k = svm_model.get_labels().index(1)
                prob = np.array(svm_out[2])[:, k]

        pred[:, i] = prob
        print(
            'worker [%d]: "%s(%d)" [%d/%d], acc: %f, c: %f, time cost: %.2f sec'
            % (worker_idx, attr_entry[idx]['entry'], idx, i, len(idx_list),
               best_acc, best_c, time.time() - t))

    io.save_data(pred, os.path.join(cache_dir, '%d.pkl' % worker_idx))
Beispiel #16
0
def create_dug_ca_gan_dataset():
    num_train = 10
    num_test = 10
    same_train_test = True

    samples = io.load_json(design_root + 'Label/ca_samples.json')
    attr_label = io.load_data(design_root + 'Label/ca_attr_label.pkl')
    bbox_label = io.load_data(design_root + 'Label/ca_bbox_label_256.pkl')
    lm_label = io.load_data(design_root + 'Label/ca_landmark_label_256.pkl')
    seg_path_list = io.load_json(design_root + 'Label/ca_seg_paths.json')
    ca_split = io.load_json(design_root + 'Split/ca_gan_split_trainval_upper.json')

    if same_train_test:
        split = {
            'train': ca_split['train'][0:num_train],
            'test': ca_split['train'][0:num_train]
        }
        id_list = split['train']
    else:
        split = {
            'train': ca_split['train'][0:num_train],
            'test': ca_split['test'][0:num_test]
        }
        id_list = split['train'] + split['test']

    samples = {s_id: samples[s_id] for s_id in id_list}
    attr_label = {s_id:attr_label[s_id] for s_id in id_list}
    bbox_label = {s_id:bbox_label[s_id] for s_id in id_list}
    lm_label = {s_id:lm_label[s_id] for s_id in id_list}
    seg_path_list = {s_id: seg_path_list[s_id] for s_id in id_list}

    io.save_json(samples, design_root + 'Label/debugca_gan_samples.json')
    io.save_data(attr_label, design_root + 'Label/debugca_gan_attr_label.pkl')
    io.save_data(bbox_label, design_root + 'Label/debugca_gan_bbox_label.pkl')
    io.save_data(lm_label, design_root + 'Label/debugca_gan_landmark_label.pkl')
    io.save_json(seg_path_list, design_root + 'Label/debugca_seg_paths.json')
    io.save_json(split, design_root + 'Split/debugca_gan_split.json')
Beispiel #17
0
def align_and_resize_image(benchmark):
    '''
    Resize images to standard size, and align the clothing region at the center of the image.
    '''

    ###################
    # config
    ###################
    img_size = 256
    region_rate = 0.8
    num_worker = 16

    ###################

    print('loading data')
    if benchmark == 'ca':
        samples = io.load_json(design_root + 'Label/ca_samples.json')
        bbox_label = io.load_data(design_root + 'Label/ca_bbox_label.pkl')
        lm_label = io.load_data(design_root + 'Label/ca_landmark_label.pkl')
        output_dir = design_root + 'Img/img_ca_%d/' % img_size
        fn_out_sample = design_root + 'Label/ca_samples.json'
        fn_out_bbox_label = design_root + 'Label/ca_bbox_label_%d.pkl' % img_size
        fn_out_lm_label = design_root + 'Label/ca_landmark_label_%d.pkl' % img_size
        interp_method = cv2.INTER_CUBIC
        update_label = True

    elif benchmark == 'inshop':
        samples = io.load_json(design_root + 'Label/inshop_samples.json')
        bbox_label = io.load_data(design_root + 'Label/inshop_bbox_label.pkl')
        lm_label = io.load_data(design_root +
                                'Label/inshop_landmark_label.pkl')
        output_dir = design_root + 'Img/img_inshop_%d' % img_size
        fn_out_sample = design_root + 'Label/inshop_samples.json'
        fn_out_bbox_label = design_root + 'Label/inshop_bbox_label_%d.pkl' % img_size
        fn_out_lm_label = design_root + 'Label/inshop_landmark_label_%d.pkl' % img_size
        interp_method = cv2.INTER_CUBIC
        update_label = True

    elif benchmark == 'ca_seg_pad':
        # this setting is to align segmentation map from org+pad size to standard size
        # this will not update landmark and bbox label
        input_dir = design_root + 'Img/seg_ca_pad_%d' % img_size
        bbox_label = io.load_data(design_root +
                                  'Label/ca_bbox_label_pad_%d.pkl' % img_size)
        output_dir = design_root + 'Img/seg_ca_%d/' % img_size
        lm_label = None
        fn_out_sample = fn_out_bbox_label = fn_out_lm_label = None
        interp_method = cv2.INTER_NEAREST
        update_label = False

        # change img_path_org to input segmentation map file (org+pad size)
        # change img_path to output segmentation map file (standard size)
        samples = io.load_json(design_root + 'Label/ca_samples.json')
        split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
        samples = {
            s_id: samples[s_id]
            for s_id in split['train'] + split['test']
        }
        for s_id, s in samples.iteritems():
            samples[s_id]['img_path_org'] = os.path.join(
                input_dir, s_id + '.bmp')
            samples[s_id]['img_path'] = os.path.join(output_dir, s_id + '.bmp')

    elif benchmark == 'ca_seg_syn':
        # this setting is to align segmentation map from org size to standard size
        # this will not update landmark and bbox label
        input_dir = design_root + 'Img/seg_ca_org_syn'
        bbox_label = io.load_data(design_root + 'Label/ca_bbox_label.pkl')
        output_dir = design_root + 'Img/seg_ca_syn_%d' % img_size
        lm_label = None
        fn_out_sample = fn_out_bbox_label = fn_out_lm_label = None
        interp_method = cv2.INTER_NEAREST
        update_label = False

        # change img_path_org to input segmentation map file (org+pad size)
        # change img_path to output segmentation map file (standard size)
        samples = io.load_json(design_root + 'Label/ca_samples.json')
        split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
        samples = {
            s_id: samples[s_id]
            for s_id in split['train'] + split['test']
        }

        for s_id, s in samples.iteritems():
            samples[s_id]['img_path_org'] = os.path.join(
                input_dir, s_id + '.bmp')
            samples[s_id]['img_path'] = os.path.join(output_dir, s_id + '.bmp')
            assert os.path.isfile(
                samples[s_id]['img_path_org']
            ), 'cannot find image: %s' % samples[s_id]['img_path_org']

    io.mkdir_if_missing(design_root + 'Img')
    io.mkdir_if_missing(output_dir)

    # update sample
    if update_label:
        print('updating sample index')
        for s_id in samples.keys():
            samples[s_id]['img_path'] = os.path.join(output_dir, s_id + '.jpg')
        io.save_json(samples, fn_out_sample)

    # process images
    from multiprocessing import Process, Manager

    id_list = samples.keys()
    block_size = len(id_list) // num_worker + 1

    if update_label:
        manager = Manager()
        aligned_bbox_label = manager.dict()
        aligned_lm_label = manager.dict()
    else:
        aligned_bbox_label = aligned_lm_label = None

    p_list = []
    for worker_idx in range(num_worker):
        id_sublist = id_list[block_size * worker_idx:block_size *
                             (worker_idx + 1)]
        p = Process(target = _align_and_resize_image_unit,\
            args = (worker_idx, id_sublist, samples, bbox_label, lm_label, img_size, region_rate, interp_method, aligned_bbox_label, aligned_lm_label))
        p.start()
        p_list.append(p)

    for p in p_list:
        p.join()

    if update_label:
        aligned_bbox_label = dict(aligned_bbox_label)
        aligned_lm_label = dict(aligned_lm_label)
        io.save_data(aligned_bbox_label, fn_out_bbox_label)
        io.save_data(aligned_lm_label, fn_out_lm_label)
Beispiel #18
0
def create_sample_index_and_label():
    '''
    Create sample index and label for Category_and_Attribute data
    - sample index
    - landmark label
    - bbox label
    - attribute label
    '''

    # config
    dir_label = design_root + 'Label/'

    # create sample index and landmark label

    landmark_list = io.load_str_list(ca_root + 'Anno/list_landmarks.txt')[2::]
    img_root_org = ca_root + 'Img/'

    samples = {}
    landmark_label = {}

    for idx, s in enumerate(landmark_list):
        img_id = 'ca_' + str(idx)

        s = s.split()
        img_path_org = os.path.join(img_root_org, s[0])

        # 1: upper-body, 2: lower-body, 3: full-body
        cloth_type = int(s[1])
        pose_type = -1

        lm_str = s[2::]
        if cloth_type == 1:
            assert len(lm_str) == 18
        elif cloth_type == 2:
            assert len(lm_str) == 12
        elif cloth_type == 3:
            assert len(lm_str) == 24

        # lm is a list: [(x_i, y_i, v_i)]
        lm = [(float(lm_str[i + 1]), float(lm_str[i + 2]), int(lm_str[i]))
              for i in range(0, len(lm_str), 3)]

        samples[img_id] = {
            'img_id': img_id,
            'cloth_type': cloth_type,
            'pose_type': pose_type,
            'img_path_org': img_path_org
        }

        landmark_label[img_id] = lm

    io.mkdir_if_missing(dir_label)
    io.save_json(samples, os.path.join(dir_label, 'ca_samples.json'))
    io.save_data(landmark_label,
                 os.path.join(dir_label, 'ca_landmark_label.pkl'))

    print('create sample index (%d samples)' % len(samples))
    print('create landmark label')

    img2id = {
        s['img_path_org'][s['img_path_org'].find('img')::]: s_id
        for s_id, s in samples.iteritems()
    }

    # create bbox label
    bbox_list = io.load_str_list(ca_root + 'Anno/list_bbox.txt')[2::]
    bbox_label = {}

    for s in bbox_list:
        s = s.split()
        assert len(s) == 5
        s_id = img2id[s[0]]
        bbox = [float(x) for x in s[1::]]
        bbox_label[s_id] = bbox

    io.save_data(bbox_label, os.path.join(dir_label, 'ca_bbox_label.pkl'))
    print('create bbox label')

    # create attribute label
    attr_list = io.load_str_list(ca_root + 'Anno/list_attr_img.txt')[2::]
    attr_label = {}

    for idx, s in enumerate(attr_list):
        s = s.split()
        s_id = img2id[s[0]]
        att = [1 if c == '1' else 0 for c in s[1::]]
        assert len(att) == 1000

        attr_label[s_id] = att
        print('\rcreating attribute label %d / %d' % (idx, len(attr_list)),
              end='')

    io.save_data(attr_label, os.path.join(dir_label, 'ca_attr_label.pkl'))
    print('\ncreate attribute label')
Beispiel #19
0
def create_sample_index_and_label():
    '''
    Create sample index and label for In-shop datasets
    - sample index
    - landmark label
    - bbox label
    '''

    # config
    dir_label = design_root + 'Label/'

    # create sampel index and landmark label
    landmark_list = io.load_str_list(inshop_root +
                                     'Anno/list_landmarks_inshop.txt')[2::]
    img_root_org = inshop_root + 'Img/'

    samples = {}
    landmark_label = {}

    for idx, s in enumerate(landmark_list):
        img_id = 'inshop_' + str(idx)

        s = s.split()
        img_path_org = os.path.join(img_root_org, s[0])

        item_id = img_path_org.split('/')[-2]
        category = img_path_org.split('/')[-3]

        # 1: upper-body, 2: lower-body, 3: full-body
        cloth_type = int(s[1])

        # 1: normal, 2: medium, 3: large, 4: medium zoom-in, 5: larg zoom-in, 6: flat (no person)
        pose_type = int(s[2])

        lm_str = s[3::]
        if cloth_type == 1:
            assert len(lm_str) == 18
        elif cloth_type == 2:
            assert len(lm_str) == 12
        elif cloth_type == 3:
            assert len(lm_str) == 24

        # lm is a list: [(x_i, y_i, v_i)]
        lm = [(float(lm_str[i + 1]), float(lm_str[i + 2]), int(lm_str[i]))
              for i in range(0, len(lm_str), 3)]

        samples[img_id] = {
            'img_id': img_id,
            'item_id': item_id,
            'category': category,
            'cloth_type': cloth_type,
            'pose_type': pose_type,
            'img_path_org': img_path_org
        }

        landmark_label[img_id] = lm

    io.mkdir_if_missing(dir_label)
    io.save_json(samples, os.path.join(dir_label, 'inshop_samples.json'))
    io.save_data(landmark_label,
                 os.path.join(dir_label, 'inshop_landmark_label.pkl'))

    print('create sample index (%d samples)' % len(samples))
    print('create landmark label')

    img2id = {
        s['img_path_org'][s['img_path_org'].find('img')::]: s_id
        for s_id, s in samples.iteritems()
    }

    # create bbox label
    bbox_list = io.load_str_list(inshop_root +
                                 'Anno/list_bbox_inshop.txt')[2::]
    bbox_label = {}

    for s in bbox_list:
        s = s.split()
        assert len(s) == 7
        s_id = img2id[s[0]]
        bbox = [float(x) for x in s[3::]]
        bbox_label[s_id] = bbox

    io.save_data(bbox_label, os.path.join(dir_label, 'inshop_bbox_label.pkl'))
    print('create bbox label')
Beispiel #20
0
L,E,transforms = aligner.fit(data_keyframes, vol_shape)

# Align all frames in parallel
# Here we specify a smaller number of iterations as we're
# fixing the template and typically converge rapidly.
aligner.max_iter = 8
final_transforms = aligner.parallel_align(data, n_jobs=-1,chunks=10)#,scales=[4,3,2])

# Save transforms
transform_fn = os.path.join(output_dir, base_name + '_transforms.npy')
np.save(transform_fn, final_transforms)

# Transform original dataset
# order specifies the interpolation order
# For now higher-order interpolation does
# some weird things so we stick with linear.
del data
datareader = load_data(output_h5)
vol_shape = datareader.vol_shape
data = datareader.data

aligned_fn = os.path.join(output_dir, base_name + '_aligned.h5')
aligned_data = transform_dataset(data, vol_shape, final_transforms, order=1)
save_data(aligned_data, vol_shape, aligned_fn)

# Generate output movie comparing unaligned (left) and aligned (right)
# You will need ffmpeg for this to work
aligned_movie_fn = os.path.join(output_dir, base_name + '_movie.mp4')
composite_movie([data, aligned_data], vol_shape, aligned_movie_fn)