Пример #1
0
def create_test_pair():
    np.random.rand(0)
    split = io.load_json(design_root +
                         'Split/ca_gan_split_trainval_upper.json')
    cat_label = io.load_data(design_root + 'Label/ca_cat_label.pkl')
    cat_entry = io.load_json(design_root + 'Label/cat_entry.json')
    # group samples by category label
    cat_to_ids = defaultdict(lambda: [])
    for s_id in split['test']:
        c = cat_label[s_id]
        cat_to_ids[c].append(s_id)
    n = 0
    pair_list = []
    for c, s_list in cat_to_ids.iteritems():
        print('[%d/%d] %s: %d samples...' %
              (n, len(cat_to_ids), cat_entry[c]['entry'], len(s_list)))
        n += 1
        s_list_org = [s_id for s_id in s_list]
        for i in range(len(s_list) - 1):
            j = np.random.randint(i + 1, len(s_list))
            temp = s_list[i]
            s_list[i] = s_list[j]
            s_list[j] = temp
        pair_list += zip(s_list_org, s_list)

    pair_dict = {s_tar: s_src for s_tar, s_src in pair_list}
    io.save_json(pair_dict, design_root + 'Temp/ca_test_tps_pair.json')

    io.save_str_list(pair_dict.keys(),
                     design_root + 'Temp/ca_test_tps_tar.txt')
    io.save_str_list(pair_dict.values(),
                     design_root + 'Temp/ca_test_tps_src.txt')
Пример #2
0
 def visualize_results(self, visuals, filename):
     io.mkdir_if_missing(os.path.dirname(filename))
     imgs, vis_item_list = merge_visual(visuals)
     torchvision.utils.save_image(imgs,
                                  filename,
                                  nrow=len(visuals),
                                  normalize=True)
     fn_list = os.path.join(os.path.dirname(filename), 'vis_item_list.txt')
     io.save_str_list(vis_item_list, fn_list)
Пример #3
0
def search_image(query_fn_list, gallery_fn_list, output_dir, method='cos'):
    num_cand = 20;
    cache_image = True

    if method == 'cos':
        func_similarity = similarity_cos
    if cache_image:
        img_g_dict = dict()

    io.mkdir_if_missing(output_dir)
    result = []
    for idx,fn_q in enumerate(query_fn_list):
        print('searching sample %d/%d' % (idx, len(query_fn_list)))
        io.mkdir_if_missing(output_dir+'/%d/'%idx)
        img_q = imageio.imread(fn_q)
        cand_list = [(None, None, -1)]
        for fn_g in tqdm.tqdm(gallery_fn_list):
            if cache_image:
                if fn_g in img_g_dict:
                    img_g = img_g_dict[fn_g]
                else:
                    img_g = imageio.imread(fn_g)
                    img_g_dict[fn_g] = img_g
            else:
                img_g = imageio.imread(fn_g)
            score = func_similarity(img_q, img_g)
            i_insert = -1
            for i in range(len(cand_list)):
                if score > cand_list[i][2]:
                    i_insert = i
                    break
            if i_insert >= 0:
                cand_list.insert(i_insert, (fn_g, img_g, score))
                if len(cand_list) > num_cand:
                    cand_list = cand_list[0:num_cand]

        imageio.imwrite(output_dir+'/%d/query.jpg'%idx, img_q)
        for i, (fn_g, img_g, score) in enumerate(cand_list):
            if fn_g:
                imageio.imwrite(output_dir + '/%d/cand_%d.jpg'%(idx, i), img_g)
        output_info = [fn_q]
        output_info += ['%d %f %s' % (i, score, fn) for i, (fn, _, score) in enumerate(cand_list)]
        io.save_str_list(output_info, output_dir + '/%d/result.txt'%idx)
        result.append('%d %s %s' % (idx, fn_q, cand_list[0][0]))

    io.save_str_list(result, output_dir+'result.txt')
Пример #4
0
def pad_image_for_segmentation():
    '''
    resize and padding image for segmentation (using fashionGAN code)
    Todo: add inshop version
    '''

    sz_tar = 256
    output_dir = 'datasets/DeepFashion/Fashion_design/Img/img_ca_pad'

    io.mkdir_if_missing(output_dir)
    samples = io.load_json(design_root + 'Label/ca_samples.json')
    split = io.load_json(design_root + 'Split/ca_gan_split_trainval.json')
    id_list = split['train'] + split['test']

    # update landmark and bbox
    lm_label = io.load_data(design_root + 'Label/ca_landmark_label.pkl')
    bbox_label = io.load_data(design_root + 'Label/ca_bbox_label.pkl')
    lm_label_pad = {}
    bbox_label_pad = {}

    io.save_str_list(id_list, os.path.join(output_dir, 'img_ca_pad.txt'))
    for i, s_id in enumerate(id_list):
        img_org = image.imread(samples[s_id]['img_path_org'])
        h, w = img_org.shape[0:2]

        if h > w:
            img = image.resize(img_org, (-1, sz_tar))
            scale = 1. * sz_tar / h
        else:
            img = image.resize(img_org, (sz_tar, -1))
            scale = 1. * sz_tar / w

        # img = image.pad_square(img, sz_tar, padding_value = 255, mode = 'lefttop')
        # image.imwrite(img, os.path.join(output_dir, s_id + '.jpg'))

        bbox_label_pad[s_id] = [c * scale for c in bbox_label[s_id]]
        lm_label_pad[s_id] = []
        for x, y, v in lm_label[s_id]:
            lm_label_pad[s_id].append([x * scale, y * scale, v])

        print('padding image %d / %d' % (i, len(id_list)))

    io.save_data(lm_label_pad,
                 design_root + 'Label/ca_landmark_label_pad_%d.pkl' % sz_tar)
    io.save_data(bbox_label_pad,
                 design_root + 'Label/ca_bbox_label_pad_%d.pkl' % sz_tar)
Пример #5
0
    def visualize_image(self, epoch, subset, visuals):
        opt = self.opt
        vis_dir = os.path.join('checkpoints', opt.id, 'vis')
        io.mkdir_if_missing(vis_dir)
        print('[%s] visualizing %s images' % (opt.id, subset))

        imgs, vis_list = self.merge_visual(visuals,
                                           kword_params={
                                               'shape_with_face':
                                               'shape_with_face' in opt
                                               and opt.shape_with_face
                                           })
        fn_img = os.path.join(vis_dir, '%s_epoch%s.jpg' % (subset, epoch))
        torchvision.utils.save_image(imgs,
                                     fn_img,
                                     nrow=len(visuals),
                                     normalize=True)
        io.save_str_list(vis_list, os.path.join(vis_dir, 'vis_name_list.txt'))
Пример #6
0
def create_vis_pair():
    '''
    select a small group of targets from test set. prepare several edge source items for each target.
    '''
    np.random.rand(0)
    num_tar = 500
    num_src_per_tar = 6

    split = io.load_json(design_root +
                         'Split/ca_gan_split_trainval_upper.json')
    cat_label = io.load_data(design_root + 'Label/ca_cat_label.pkl')
    cat_entry = io.load_json(design_root + 'Label/cat_entry.json')
    id_list = split['test']
    # group samples by category label
    cat_to_ids = defaultdict(lambda: [])
    for s_id in split['test']:
        c = cat_label[s_id]
        cat_to_ids[c].append(s_id)
    n = 0
    # target list
    np.random.shuffle(id_list)
    tar_list = id_list[0:num_tar]
    # select src for each target
    group_dict = {}
    for tar_id in tar_list:
        c = cat_label[tar_id]
        src_list = [s_id for s_id in cat_to_ids[c] if s_id != tar_id]
        np.random.shuffle(src_list)
        group_dict[tar_id] = src_list[0:num_src_per_tar]

    io.save_json(group_dict, design_root + 'Temp/ca_vis_tps_group.json')

    output_src_list = []
    output_tar_list = []
    for tar_id, src_list in group_dict.iteritems():
        output_tar_list += [tar_id] * len(src_list)
        output_src_list += src_list
    io.save_str_list(output_tar_list, design_root + 'Temp/ca_vis_tps_tar.txt')
    io.save_str_list(output_src_list, design_root + 'Temp/ca_vis_tps_src.txt')
import tqdm

############################################
# Initialize
############################################
opt = TestPoseParsingOptions().parse()
train_opt = io.load_json(os.path.join('checkpoints', opt.id, 'train_opt.json'))
preserved_opt = {'gpu_ids', 'is_train'}
for k, v in train_opt.iteritems():
    if k in opt and (k not in preserved_opt):
        setattr(opt, k, v)
# create model
model = PoseParsingModel()
model.initialize(opt)
# save terminal order line
io.save_str_list([' '.join(sys.argv)], os.path.join(model.save_dir, 'order_line.txt'))
# create data loader
val_loader = CreateDataLoader(opt, split='test')
# create visualizer
visualizer = GANVisualizer_V3(opt)

############################################
# Visualize
############################################
if opt.nvis > 0:
    print('visualizing first %d samples' % opt.nvis)
    num_vis_batch = int(np.ceil(1.0*opt.nvis/opt.batch_size))
    visuals = None
    for i, data in enumerate(val_loader):
        if i == num_vis_batch:
            break
Пример #8
0
def warp_image_by_flow():
    ################################
    # config
    ################################
    num_sample = 64
    # mode = 'real2real'
    # mode = 'real2fake'
    mode = 'fake2fake'

    if mode == 'real2real':
        output_dir = 'temp/flow/flownet2.0_real2real/'
        io.mkdir_if_missing(output_dir)
        pairs = io.load_json('datasets/DF_Pose/Label/pair_split.json')['test'][0:num_sample]
        # flow direction: img_flow2 ->  img_flow1
        img_flow1_dir = 'datasets/DF_Pose/Img/img_df/'
        img_flow1_namefmt = 'id_1'
        img_flow2_dir = 'datasets/DF_Pose/Img/img_df/'
        img_flow2_namefmt = 'id_2'
        # warping direction: img_warp1 -> img_warp2
        img_warp1_dir = 'datasets/DF_Pose/Img/img_df/'
        img_warp1_namefmt = 'id_1'
        img_warp2_dir = 'datasets/DF_Pose/Img/img_df/'
        img_warp2_namefmt = 'id_2'
        # segmentation map as mask
        seg_dir = 'datasets/DF_Pose/Img/seg-lip_df/'
        seg_namefmt = 'id_2'
        # target image
        img_tar_dir = 'datasets/DF_Pose/Img/img_df/'
        img_tar_namefmt = 'id_2'
    elif mode == 'real2fake':
        output_dir = 'temp/flow/flownet2.0_real2fake/'
        io.mkdir_if_missing(output_dir)
        pairs = io.load_json('datasets/DF_Pose/Label/pair_split.json')['test'][0:num_sample]
        # flow direction: img_flow2 ->  img_flow1
        img_flow1_dir = 'datasets/DF_Pose/Img/img_df/'
        img_flow1_namefmt = 'id_1'
        img_flow2_dir = 'checkpoints/PoseTransfer_7.5/test/'
        img_flow2_namefmt = 'pair'
        # warping direction: img_warp1 -> img_warp2
        img_warp1_dir = 'datasets/DF_Pose/Img/img_df/'
        img_warp1_namefmt = 'id_1'
        img_warp2_dir = 'checkpoints/PoseTransfer_7.5/test/'
        img_warp2_namefmt = 'pair'
        # segmentation map as mask
        seg_dir = 'checkpoints/PoseTransfer_7.5/test_seg/'
        seg_namefmt = 'pair'
        # target image
        img_tar_dir = 'datasets/DF_Pose/Img/img_df/'
        img_tar_namefmt = 'id_2'
    elif mode == 'fake2fake':
        output_dir = 'temp/flow/flownet2.0_fake2fake/'
        io.mkdir_if_missing(output_dir)
        pairs = io.load_json('datasets/DF_Pose/Label/pair_split.json')['test'][0:num_sample]
        # flow direction: img_flow2 ->  img_flow1
        img_flow1_dir = 'checkpoints/PoseTransfer_7.5/test_ref/'
        img_flow1_namefmt = 'pair'
        img_flow2_dir = 'checkpoints/PoseTransfer_7.5/test/'
        img_flow2_namefmt = 'pair'
        # warping direction: img_warp1 -> img_warp2
        img_warp1_dir = 'datasets/DF_Pose/Img/img_df/'
        img_warp1_namefmt = 'id_1'
        img_warp2_dir = 'checkpoints/PoseTransfer_7.5/test/'
        img_warp2_namefmt = 'pair'
        # segmentation map as mask
        seg_dir = 'checkpoints/PoseTransfer_7.5/test_seg/'
        seg_namefmt = 'pair'
        # target image
        img_tar_dir = 'datasets/DF_Pose/Img/img_df/'
        img_tar_namefmt = 'id_2'
    else:
        raise Exception('invalid mode!')
    
    

    def _get_name(id_1, id_2=None, idx=None, fmt='id_1', ext='jpg'):
        if fmt =='id_1':
            return '%s.%s'%(id_1, ext)
        elif fmt =='id_2':
            return '%s.%s'%(id_2, ext)
        elif fmt == 'pair':
            return '%s_%s.%s'%(id_1, id_2, ext)
        elif fmt == 'ipair':
            return '%s_%s_%s.%s' % (idx, id_1, id_2, ext)
        else:
            raise Exception('wrong name format %s' % fmt)

    ################################
    # compute flow
    ################################
    # create pair list file for flownet2.0
    pair_list = []
    flow_file_list = []
    for idx, (id_1, id_2) in enumerate(pairs):
        fn_1 = os.path.abspath(img_flow1_dir + _get_name(id_1, id_2, idx, img_flow1_namefmt))
        fn_2 = os.path.abspath(img_flow2_dir + _get_name(id_1, id_2, idx, img_flow2_namefmt))
        fn_out = os.path.abspath(output_dir + _get_name(id_1, id_2, idx, 'ipair', 'flo'))
        flow_file_list.append(fn_out)
        pair_list.append(' '.join([fn_2, fn_1, fn_out])) # note that the flow is from img_2(target pose) to img_1(reference pose)

    # run flownet2.0 script to compute optical flow
    if True:
        fn_pair_list = output_dir + 'pair_list.txt'
        io.save_str_list(pair_list, fn_pair_list)
        # compute flow
        fn_flownet_script = flownet2_dir + 'scripts/run-flownet-many.py'
        fn_flownet_model = flownet2_dir + 'models/FlowNet2/FlowNet2_weights.caffemodel.h5'
        fn_flownet_prototxt = flownet2_dir + 'models/FlowNet2/FlowNet2_deploy.prototxt.template'

        order = 'python %s %s %s %s --gpu 0' % (fn_flownet_script, fn_flownet_model, fn_flownet_prototxt, fn_pair_list)
        print('excute command:\n%s'%order)
        os.system(order)

    ################################
    # compute flow
    ################################
    ssim_score = []
    psnr_score = []
    for idx, (id_1, id_2) in enumerate(tqdm.tqdm(pairs, desc='warping image')):
        fn_1 = img_warp1_dir + _get_name(id_1, id_2, idx, img_warp1_namefmt)
        fn_2 = img_warp2_dir + _get_name(id_1, id_2, idx, img_warp2_namefmt)
        fn_seg = seg_dir + _get_name(id_1, id_2, idx, seg_namefmt, 'bmp')
        fn_flow = flow_file_list[idx]

        img_1 = imageio.imread(fn_1)
        img_2 = imageio.imread(fn_2)
        seg = imageio.imread(fn_seg)
        mask = ((seg==3)|(seg==4)|(seg==7)).astype(np.uint8)[..., np.newaxis]
        flow_2to1 = flow_util.readFlow(fn_flow)

        img_2_warp_raw = flow_util.warp_image(img_1, flow_2to1)
        img_2_warp = img_2_warp_raw * mask + img_2 * (1 - mask)
        img_flow = flow_util.flow_to_rgb(flow_2to1)

        fn_warp = output_dir + _get_name(id_1, id_2, idx, 'ipair')
        fn_warp_raw = output_dir + _get_name(id_1, id_2, idx, 'ipair', 'raw.jpg')
        fn_visflow = output_dir + _get_name(id_1, id_2, idx, 'ipair', 'flow.jpg')

        imageio.imwrite(fn_warp, img_2_warp)
        imageio.imwrite(fn_warp_raw, img_2_warp_raw)
        imageio.imwrite(fn_visflow, img_flow)        


        img_tar = imageio.imread(img_tar_dir + _get_name(id_1, id_2, idx, img_tar_namefmt))
        ssim_score.append(compare_ssim(img_tar, img_2_warp, multichannel=True))
        psnr_score.append(compare_psnr(img_tar, img_2_warp))

    ################################
    # output result
    ################################
    print('Output Dir: %s' % output_dir)
    print('Result:')
    print('psnr: %f' % np.mean(psnr_score))
    print('ssim: %f' % np.mean(ssim_score))
Пример #9
0
def attribute_fusion_retrieval():

    upper_categorys = {
        'Tees_Tanks', 'Blouses_Shirts', 'Sweaters', 'Jackets_Coats',
        'Graphic_Tees', 'Sweatshirts_Hoodies', 'Cardigans', 'Shirts_Polos',
        'Jackets_Vests'
    }

    type_map = {
        0: 'Color',
        1: 'Texture',
        2: 'Fabric',
        3: 'Shape',
        4: 'Part',
        5: 'Style'
    }

    samples = io.load_json(
        'datasets/DeepFashion/In-shop/Label/samples_attr.json')
    attr_label = io.load_json(
        'datasets/DeepFashion/In-shop/Label/attribute_label_top500.json')
    attr_entry = io.load_json(
        'datasets/DeepFashion/In-shop/Label/attribute_entry_top500.json')

    samples = {
        s_id: s
        for s_id, s in samples.iteritems()
        if s['category'] in upper_categorys and s['pose'] == 'front'
    }
    print('valid images: %d' % len(samples))

    items = {}
    for s_id, s in samples.iteritems():
        items[s['item_id']] = {
            'id': s_id,
            'item_id': s['item_id'],
            'img_path': s['img_path'],
            'label': attr_label[s_id]
        }
    print('valid items: %d' % len(items))

    id_list = [it['id'] for it in items.values()]
    attr_mat = np.array([attr_label[s_id] for s_id in id_list],
                        dtype=np.float32)

    attr_indices_list = []
    attr_group = []

    i = 0
    for att_type in range(6):
        type_indices = [
            idx for idx, att in enumerate(attr_entry)
            if att['type'] == att_type
        ]
        attr_indices_list += type_indices

        type_size = len(type_indices)
        attr_group.append({
            'type_name': type_map[att_type],
            'i_start': i,
            'i_end': i + type_size,
        })
        i += type_size

        print('type [%s]: %d attributes' % (type_map[att_type], type_size))

    attr_mat = attr_mat[:, attr_indices_list]
    attr_entry = [attr_entry[i] for i in attr_indices_list]

    for ag in attr_group:
        attr_mat[:, ag['i_start']:ag['i_end']] /= (ag['i_end'] - ag['i_start'])

    # perform retrieval and return results

    def _get_attr_str(label):
        label = label.tolist()
        attr_str = [
            '%s[%s]' % (att['entry'], type_map[att['type']])
            for i, att in enumerate(attr_entry) if label[i] > 0
        ]
        return ' '.join(attr_str)

    N = 100
    src_idx_list = np.random.choice(range(len(id_list)), N).tolist()
    tar_idx_list = np.random.choice(range(len(id_list)), N).tolist()

    for n, (src_idx, tar_idx) in enumerate(zip(src_idx_list, tar_idx_list)):
        dir_out = 'temp/attribute_retrieval/%d' % n
        io.mkdir_if_missing(dir_out)
        txt_list = []

        src_id = id_list[src_idx]
        tar_id = id_list[tar_idx]
        io.copy(samples[src_id]['img_path'],
                os.path.join(dir_out, 'input_src.jpg'))
        io.copy(samples[tar_id]['img_path'],
                os.path.join(dir_out, 'input_tar.jpg'))

        v_src = attr_mat[src_idx, :]
        v_tar = attr_mat[tar_idx, :]

        txt_list.append('src:\t %s' % _get_attr_str(v_src))
        txt_list.append('')
        txt_list.append('tar:\t %s' % _get_attr_str(v_tar))
        txt_list.append('')
        txt_list.append('')

        for att_type in range(6):
            ag = attr_group[att_type]
            v_query = v_src.copy()
            v_query[ag['i_start']:ag['i_end']] = v_tar[
                ag['i_start']:ag['i_end']]
            dist = np.abs(attr_mat - v_query).sum(axis=1)

            rtv_idx = dist.argsort()[1]
            rtv_id = id_list[rtv_idx]
            v_rtv = attr_mat[rtv_idx, :]

            io.copy(
                samples[rtv_id]['img_path'],
                os.path.join(
                    dir_out,
                    'retrieve_%d_%s.jpg' % (att_type, type_map[att_type])))
            txt_list.append('retrieve [%s]: %s' %
                            (type_map[att_type], _get_attr_str(v_rtv)))

        io.save_str_list(txt_list, os.path.join(dir_out, 'info.txt'))
        txt_list.append('')
Пример #10
0
def output_attribute_entry():
    '''
    Output formated attribute entry to txt file.
    '''

    attr_entry = io.load_json(
        'datasets/DeepFashion/In-shop/Label/attribute_entry_top500.json')
    type_map = {
        0: 'Color',
        1: 'Texture',
        2: 'Fabric',
        3: 'Shape',
        4: 'Part',
        5: 'Style'
    }

    # output attribute entry txt
    attr_entry_list = []

    for att_type, type_name in sorted(type_map.items()):
        attr_this_type = [att for att in attr_entry if att['type'] == att_type]
        attr_this_type.sort(key=lambda x: x['pos_rate'], reverse=True)
        attr_entry_list += attr_this_type

    attr_txt_list = [
        '%s\t%s(%d)\t%.3f' %
        (att['entry'], type_map[att['type']], att['type'], att['pos_rate'])
        for att in attr_entry_list
    ]
    attr_name_list = [att['entry'] for att in attr_entry_list]

    dir_out = 'temp/attribute_entry/'
    io.mkdir_if_missing(dir_out)
    io.save_str_list(attr_txt_list, os.path.join(dir_out,
                                                 'attribute_entry.txt'))
    io.save_str_list(attr_name_list,
                     os.path.join(dir_out, 'attribute_entry_name.txt'))

    io.save_json(
        attr_entry_list,
        'datasets/DeepFashion/In-shop/Label/attribute_entry_top500_byType.json'
    )

    # output positive samples for each attribute entry
    num_example = 10
    samples = io.load_json(
        'datasets/DeepFashion/In-shop/Label/samples_attr.json')
    attr_label = io.load_json(
        'datasets/DeepFashion/In-shop/Label/attribute_label_top500.json')
    items = {}

    for s_id, s in samples.iteritems():
        items[s['item_id']] = {
            'id': s_id,
            'item_id': s['item_id'],
            'img_path': s['img_path'],
            'label': attr_label[s_id]
        }

    item_list = items.values()

    dir_example = os.path.join(dir_out, 'examples')
    io.mkdir_if_missing(dir_example)

    for idx, att in enumerate(attr_entry):
        print('search examples for attribute %d / %d: %s' %
              (idx, len(attr_entry), att['entry']))
        dir_example_this_att = os.path.join(dir_example,
                                            att['entry'].replace(' ', '_'))
        io.mkdir_if_missing(dir_example_this_att)
        pos_list = [item for item in item_list if item['label'][idx] == 1]
        np.random.shuffle(pos_list)

        for item in pos_list[0:num_example]:
            fn_src = item['img_path']
            fn_tar = os.path.join(dir_example_this_att,
                                  item['item_id'] + '.jpg')
            io.copy(fn_src, fn_tar)
Пример #11
0
    def show_attr_pred_statistic(self, result):
        '''
        Save attribute predction statistic information into json and txt file.
        Input:
            result (dict): test result with fields
        '''

        if hasattr(self, 'attr_entry'):
            attr_entry = self.attr_entry
        else:
            attr_entry = io.load_json(
                os.path.join(self.opt.data_root, self.opt.fn_entry))

        # add results of interest
        type_entry = {
            1: 'texture',
            2: 'fabric',
            3: 'shape',
            4: 'part',
            5: 'style'
        }
        result['type_info'] = []
        for t in range(1, 6):
            idx_list = [
                i for i, att in enumerate(attr_entry) if att['type'] == t
            ]
            ap = (np.array(result['AP_list'])[idx_list]).mean().tolist()
            rec3 = (np.array(result['rec3_list'])[idx_list]).mean().tolist()
            rec5 = (np.array(result['rec5_list'])[idx_list]).mean().tolist()
            result['type_info'].append({
                'type': '%d-%s' % (t, type_entry[t]),
                'ap': ap,
                'rec3': rec3,
                'rec5': rec5
            })

        ap_order = np.argsort(result['AP_list'])
        result['top_attr_list'] = [(attr_entry[i]['entry'], attr_entry[i]['type'], result['AP_list'][i]) \
            for i in ap_order[::-1][0:20]]
        result['worst_attr_list'] = [(attr_entry[i]['entry'], attr_entry[i]['type'], result['AP_list'][i]) \
            for i in ap_order[0:20]]

        # display
        result_disp = OrderedDict()
        for k, v in result.iteritems():
            if isinstance(v, float):
                result_disp[k] = v
        self.print_error(result_disp)

        # save json
        dir_test = os.path.join('checkpoints', self.opt.id, 'test')
        io.mkdir_if_missing(dir_test)
        fn_json = os.path.join(dir_test, 'test_result.json')
        for k, v in result.iteritems():
            if isinstance(v, np.ndarray):
                result[k] = v.tolist()

        io.save_json(result, fn_json)

        # save txt summary
        str_output = ['AttributeType\tAP\trec3\trec5']
        str_output += [
            '%s\t%f\t%f\t%f' % (t['type'], t['ap'], t['rec3'], t['rec5'])
            for t in result['type_info']
        ]
        str_output += ['', 'Top Attribute']
        str_output += ['%s(%d)\t%f' % a for a in result['top_attr_list']]
        str_output += ['', 'Worst Attribute']
        str_output += ['%s(%d)\t%f' % a for a in result['worst_attr_list']]

        fn_txt = os.path.join('checkpoints', self.opt.id, 'test',
                              'test_result_summary.txt')
        io.save_str_list(str_output, fn_txt)

        # save txt detail
        str_output = [
            '%s\t%d\t%f\t%.2f\t%.2f\t%.2f' %
            (att['entry'], att['type'], att['pos_rate'], ap, rec3, rec5)
            for (att, ap, rec3,
                 rec5) in zip(attr_entry, result['AP_list'],
                              result['rec5_list'], result['rec5_list'])
        ]
        fn_txt = os.path.join('checkpoints', self.opt.id, 'test',
                              'test_result_detail.txt')

        io.save_str_list(str_output, fn_txt)
Пример #12
0
def search_unmatched_HR_image():
    '''
    Search high-resolution images which do not match their low-resolution version.
    '''

    # config
    hsz = 256  # histgram size
    threshold = 0.9

    # define matching function
    import cv2

    def _match_image_pair(img1, img2, hsz):
        hists = []
        for img in [img1, img2]:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            h, w = img.shape
            sz = int(max(h, w) * 0.5)
            x1 = int((w - sz) / 2)
            y1 = int((h - sz) / 2)
            x2 = int(w - (w - sz) / 2)
            y2 = int(h - (h - sz) / 2)
            img = img[y1:y2, x1:x2]

            hist = cv2.calcHist([img], [0], None, [hsz], [0, 256])
            hist = hist / np.linalg.norm(hist)
            hists.append(hist)

        return (hists[0] * hists[1]).sum()

    # matching images
    samples = io.load_json(
        'datasets/DeepFashion/Fashion_design/Label/inshop_samples.json')

    unmatch_list = []  # path of LR images which do not match HR images
    missing_list = []  # path of LR images which do not have HR images

    for idx, s in enumerate(samples.values()):
        img_path_lr = s['img_path_org']
        img_path_hr = img_path_lr.replace('/img/', '/img_highres/')
        assert os.path.isfile(img_path_lr)

        if not os.path.isfile(img_path_hr):
            missing_list.append(img_path_lr[img_path_lr.find('/img')::])
        else:
            img_lr = image.imread(img_path_lr)
            img_hr = image.imread(img_path_hr)
            score = _match_image_pair(img_lr, img_hr, hsz)

            if score < threshold:
                unmatch_list.append(img_path_lr[img_path_lr.find('/img')::])
            print('score: %.3f, %d / %d' % (score, idx, len(samples)))

        # print('checking HR and LR images are matched: %d / %d' % (idx, len(samples)))

    unmatch_list.sort()
    missing_list.sort()

    print('')
    print('unmatched images: %d' % len(unmatch_list))
    print('missing images: %d' % len(missing_list))

    output_dir = 'temp/check_HR_LR_matching'
    io.mkdir_if_missing(output_dir)
    io.save_str_list(unmatch_list,
                     os.path.join(output_dir, 'unmatched_images.txt'))
    io.save_str_list(missing_list,
                     os.path.join(output_dir, 'missing_images.txt'))