def render_vocab(vocab): """ Renders the average patch of each word. This is a quick visualization of the entire vocabulary. CommandLine: python -m ibeis.algo.smk.vocab_indexer render_vocab --show Example: >>> from ibeis.algo.smk.vocab_indexer import * # NOQA >>> vocab = testdata_vocab('PZ_MTEST', num_words=64) >>> all_words = vocab.render_vocab() >>> ut.quit_if_noshow() >>> import plottool as pt >>> pt.qt4ensure() >>> pt.imshow(all_words) >>> ut.show_if_requested() """ import plottool as pt wx_list = list(range(len(vocab))) # wx_list = ut.strided_sample(wx_list, 64) wx_list = ut.strided_sample(wx_list, 64) word_patch_list = [] for wx in ut.ProgIter(wx_list, bs=True, lbl='building patches'): word = vocab.wx_to_word[wx] word_patch = vt.inverted_sift_patch(word, 64) word_patch = pt.render_sift_on_patch(word_patch, word) word_patch_list.append(word_patch) all_words = vt.stack_square_images(word_patch_list) return all_words
def visualize_vocab_word(ibs, invassign, wx, fnum=None): """ Example: >>> from ibeis.new_annots import * # NOQA >>> import plottool as pt >>> pt.qt4ensure() >>> ibs, aid_list, vocab = testdata_vocab() >>> #aid_list = aid_list[0:1] >>> fstack = StackedFeatures(ibs, aid_list) >>> nAssign = 2 >>> invassign = fstack.inverted_assignment(vocab, nAssign) >>> sortx = ut.argsort(invassign.num_list)[::-1] >>> wx_list = ut.take(invassign.wx_list, sortx) >>> wx = wx_list[0] """ import plottool as pt pt.qt4ensure() vecs = invassign.get_vecs(wx) word = invassign.vocab.wx2_word[wx] word_patches = invassign.get_patches(wx) average_patch = np.mean(word_patches, axis=0) average_vec = vecs.mean(axis=0) average_vec = word word with_sift = True fnum = 2 fnum = pt.ensure_fnum(fnum) if with_sift: patch_img = pt.render_sift_on_patch(average_patch, average_vec) #sift_word_patches = [pt.render_sift_on_patch(patch, vec) for patch, vec in ut.ProgIter(list(zip(word_patches, vecs)))] #stacked_patches = vt.stack_square_images(word_patches) #stacked_patches = vt.stack_square_images(sift_word_patches) else: patch_img = average_patch stacked_patches = vt.stack_square_images(word_patches) solidbar = np.zeros((patch_img.shape[0], int(patch_img.shape[1] * .1), 3), dtype=patch_img.dtype) border_color = (100, 10, 10) # bgr, darkblue if ut.is_float(solidbar): solidbar[:, :, :] = (np.array(border_color) / 255)[None, None] else: solidbar[:, :, :] = np.array(border_color)[None, None] word_img = vt.stack_image_list([patch_img, solidbar, stacked_patches], vert=False, modifysize=True) pt.imshow(word_img, fnum=fnum) #pt.imshow(patch_img, pnum=(1, 2, 1), fnum=fnum) #patch_size = 64 #half_size = patch_size / 2 #pt.imshow(stacked_patches, pnum=(1, 2, 2), fnum=fnum) pt.iup()
def render_vocab(vocab, inva=None, use_data=False): """ Renders the average patch of each word. This is a quick visualization of the entire vocabulary. CommandLine: python -m ibeis.algo.smk.vocab_indexer render_vocab --show python -m ibeis.algo.smk.vocab_indexer render_vocab --show --use-data python -m ibeis.algo.smk.vocab_indexer render_vocab --show --debug-depc Example: >>> from ibeis.algo.smk.vocab_indexer import * # NOQA >>> ibs, aid_list, voab = testdata_vocab('PZ_MTEST', num_words=10000) >>> use_data = ut.get_argflag('--use-data') >>> vocab = inva.vocab >>> all_words = vocab.render_vocab(inva, use_data=use_data) >>> ut.quit_if_noshow() >>> import plottool as pt >>> pt.qt4ensure() >>> pt.imshow(all_words) >>> ut.show_if_requested() """ # Get words with the most assignments sortx = ut.argsort(list(inva.wx_to_num.values()))[::-1] wx_list = ut.take(list(inva.wx_to_num.keys()), sortx) wx_list = ut.strided_sample(wx_list, 64) word_patch_list = [] for wx in ut.ProgIter(wx_list, bs=True, lbl='building patches'): word = inva.vocab.wx_to_word[wx] if use_data: word_patch = inva.get_word_patch(wx) else: word_patch = vt.inverted_sift_patch(word, 64) import plottool as pt word_patch = pt.render_sift_on_patch(word_patch, word) word_patch_list.append(word_patch) #for wx, p in zip(wx_list, word_patch_list): # inva._word_patches[wx] = p all_words = vt.stack_square_images(word_patch_list) return all_words
def render_inverted_vocab(inva, ibs, use_data=False): """ Renders the average patch of each word. This is a visualization of the entire vocabulary. CommandLine: python -m ibeis.algo.smk.inverted_index render_inverted_vocab --show python -m ibeis.algo.smk.inverted_index render_inverted_vocab --show --use-data python -m ibeis.algo.smk.inverted_index render_inverted_vocab --show --debug-depc Example: >>> from ibeis.algo.smk.inverted_index import * # NOQA >>> qreq_, inva = testdata_inva() >>> ibs = qreq_.ibs >>> all_words = inva.render_inverted_vocab(ibs) >>> ut.quit_if_noshow() >>> import plottool as pt >>> pt.qt4ensure() >>> pt.imshow(all_words) >>> ut.show_if_requested() """ import plottool as pt # Get words with the most assignments vocab = ibs.depc['vocab'].get_row_data([inva.vocab_rowid], 'words')[0] wx_list = ut.strided_sample(inva.wx_list, 64) word_patch_list = [] for wx in ut.ProgIter(wx_list, bs=True, lbl='building patches'): word = vocab.wx_to_word[wx] word_patch = inva.get_word_patch(wx, ibs) word_patch = pt.render_sift_on_patch(word_patch, word) word_patch_list.append(word_patch) all_words = vt.stack_square_images(word_patch_list) return all_words
def render_inverted_vocab_word(inva, wx, ibs, fnum=None): """ Creates a visualization of a visual word. This includes the average patch, the SIFT-like representation of the centroid, and some of the patches that were assigned to it. CommandLine: python -m ibeis.algo.smk.inverted_index render_inverted_vocab_word --show Example: >>> from ibeis.algo.smk.inverted_index import * # NOQA >>> import plottool as pt >>> qreq_, inva = testdata_inva() >>> ibs = qreq_.ibs >>> wx_list = list(inva.wx_to_aids.keys()) >>> wx = wx_list[0] >>> ut.qtensure() >>> fnum = 2 >>> fnum = pt.ensure_fnum(fnum) >>> # Interactive visualization of many words >>> for wx in ut.InteractiveIter(wx_list): >>> word_img = inva.render_inverted_vocab_word(wx, ibs, fnum) >>> pt.imshow(word_img, fnum=fnum, title='Word %r/%r' % (wx, '?')) >>> pt.update() """ import plottool as pt # Create the contributing patch image word_patches = inva.get_patches(wx, ibs) word_patches_ = ut.strided_sample(word_patches, 64) stacked_patches = vt.stack_square_images(word_patches_) # Create the average word image vocab = ibs.depc['vocab'].get_row_data([inva.vocab_rowid], 'words')[0] word = vocab.wx_to_word[wx] average_patch = np.mean(word_patches, axis=0) #vecs = inva.get_vecs(wx) #assert np.allclose(word, vecs.mean(axis=0)) with_sift = True if with_sift: patch_img = pt.render_sift_on_patch(average_patch, word) else: patch_img = average_patch # Stack them together solidbar = np.zeros( (patch_img.shape[0], int(patch_img.shape[1] * .1), 3), dtype=patch_img.dtype) border_color = (100, 10, 10) # bgr, darkblue if ut.is_float(solidbar): solidbar[:, :, :] = (np.array(border_color) / 255)[None, None] else: solidbar[:, :, :] = np.array(border_color)[None, None] patch_img2 = vt.inverted_sift_patch(word) # Fix types patch_img = vt.rectify_to_uint8(patch_img) patch_img2 = vt.rectify_to_uint8(patch_img2) solidbar = vt.rectify_to_uint8(solidbar) stacked_patches = vt.rectify_to_uint8(stacked_patches) # Stack everything together patch_img2, patch_img = vt.make_channels_comparable( patch_img2, patch_img) img_list = [patch_img, solidbar, patch_img2, solidbar, stacked_patches] word_img = vt.stack_image_list(img_list, vert=False, modifysize=True) return word_img