def show_many_chips(ibs, aid_list, config2_=None, fnum=None, pnum=None, vert=True): r""" CommandLine: python -m ibeis.viz.viz_chip --test-show_many_chips python -m ibeis.viz.viz_chip --test-show_many_chips --show python -m ibeis.viz.viz_chip --test-show_many_chips --show --db NNP_Master3 --aids=13276,14047,14489,14906,10194,10201,12656,10150,11002,15315,7191,13127,15591,12838,13970,14123,14167 --no-annote --dpath figures --save ~/latex/crall-candidacy-2015/figures/challengechips.jpg '--caption=challenging images' Example: >>> # ENABLE_DOCTEST >>> from ibeis.viz.viz_chip import * # NOQA >>> import numpy as np >>> in_image = False >>> ibs, aid_list, kwargs, config2_ = testdata_showchip() >>> # execute function >>> show_many_chips(ibs, aid_list, config2_) >>> ut.show_if_requested() """ if ut.VERBOSE: print('[viz] show_many_chips') in_image = False chip_list = vh.get_chips(ibs, aid_list, in_image=in_image, config2_=config2_) import vtool_ibeis as vt stacked_chips = vt.stack_image_recurse(chip_list, modifysize=True, vert=vert) pt.imshow(stacked_chips, fnum=None, pnum=None)
def chip_montage(ibs, qaids, config=None): r""" CommandLine: python -m ibeis.viz.viz_other chip_montage --show --db PZ_MTEST python -m ibeis.viz.viz_other chip_montage --show --db GZ_ALL Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_other import * # NOQA >>> defaltdb = 'seaturtles' >>> import ibeis >>> a = ['default'] >>> ibs = ibeis.opendb(defaultdb=defaltdb) >>> ibs, qaids, daids = ibeis.testdata_expanded_aids(ibs=ibs, a=a) >>> config = None >>> chip_montage(ibs, qaids, config) >>> ut.quit_if_noshow() >>> import plottool_ibeis as pt >>> ut.show_if_requested() """ import vtool_ibeis as vt chip_list = ibs.get_annot_chips(qaids, config2_=config) height = 2000 dsize = (int(height * ut.PHI), height) dst = vt.montage(chip_list, dsize) pt.imshow(dst)
def show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids, fnum=None): """ DEPRICATE """ import plottool_ibeis as pt from ibeis import viz fnum = pt.ensure_fnum(fnum) idx_list = ut.dict_take(cm.daid2_idx, daids) nPlots = len(idx_list) + 1 nRows, nCols = pt.get_square_row_cols(nPlots) pnum_ = pt.make_pnum_nextgen(nRows, nCols) pt.figure(fnum=fnum, pnum=(1, 2, 1)) # Draw coverage masks with bbox # <FlipHack> #weight_mask_m = np.fliplr(np.flipud(weight_mask_m)) #weight_mask = np.fliplr(np.flipud(weight_mask)) # </FlipHack> stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m, weight_mask, return_sf=True) (woff, hoff) = offset_tup[1] wh1 = weight_mask_m.shape[0:2][::-1] wh2 = weight_mask.shape[0:2][::-1] pt.imshow(255 * (stacked_weights), fnum=fnum, pnum=pnum_(0), title='(query image) What did match vs what should match') pt.draw_bbox((0, 0) + wh1, bbox_color=(0, 0, 1)) pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1)) # Get contributing matches qaid = cm.qaid daid_list = daids fm_list = ut.take(cm.fm_list, idx_list) fs_list = ut.take(cm.fs_list, idx_list) # Draw matches for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list), start=1): viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs, draw_pts=False, draw_lines=True, draw_ell=False, fnum=fnum, pnum=pnum_(px), darken=.5) coverage_score = score_matching_mask(weight_mask_m, weight_mask) pt.set_figtitle('score=%.4f' % (coverage_score, ))
def update_ui(co_wgt): if not co_wgt.hack: if co_wgt.current_gindex == 0: co_wgt.button_list[0].setEnabled(False) else: co_wgt.button_list[0].setEnabled(True) if co_wgt.current_gindex == len(co_wgt.gid_list) - 1: co_wgt.button_list[1].setEnabled(False) else: co_wgt.button_list[1].setEnabled(True) def extract_tuple(li, idx): return list(zip(*li)[idx]) # Update option setting, assume datetime has been updated co_wgt.combo_list[1].setCurrentIndex(extract_tuple(co_wgt.opt_list['year'], 1).index(co_wgt.dtime.year)) co_wgt.combo_list[3].setCurrentIndex(extract_tuple(co_wgt.opt_list['month'], 1).index(co_wgt.dtime.month)) co_wgt.combo_list[5].setCurrentIndex(extract_tuple(co_wgt.opt_list['day'], 1).index(co_wgt.dtime.day)) co_wgt.combo_list[7].setCurrentIndex(extract_tuple(co_wgt.opt_list['hour'], 1).index(co_wgt.dtime.hour)) co_wgt.combo_list[9].setCurrentIndex(extract_tuple(co_wgt.opt_list['minute'], 1).index(co_wgt.dtime.minute)) co_wgt.combo_list[11].setCurrentIndex(extract_tuple(co_wgt.opt_list['second'], 1).index(co_wgt.dtime.second)) # Redraw image if not co_wgt.hack: if co_wgt.imfig is not None: pt.close_figure(co_wgt.imfig) image = co_wgt.ibs.get_images(co_wgt.gid_list[co_wgt.current_gindex]) figtitle = "Time Synchronization Picture" co_wgt.imfig, co_wgt.imax = pt.imshow(image, fnum=co_wgt.fnum, title=figtitle) co_wgt.imfig.show()
def HARDCODE_SHOW_PB_PAIR(): """ python -m ibeis.viz.viz_chip HARDCODE_SHOW_PB_PAIR --show Example: >>> # SCRIPT >>> from ibeis.viz.viz_chip import * # NOQA >>> import plottool_ibeis as pt >>> HARDCODE_SHOW_PB_PAIR() >>> pt.show_if_requested() """ # TODO: generalize into testdata_annotmatches which filters ams propertly # Then a function to show these ams import ibeis import ibeis.viz has_any = ut.get_argval('--has_any', default=['photobomb']) index = ut.get_argval('--index', default=0) ibs = ibeis.opendb(defaultdb='PZ_Master1') ams = ibs._get_all_annotmatch_rowids() tags = ibs.get_annotmatch_case_tags(ams) flags = ut.filterflags_general_tags(tags, has_any=has_any) selected_ams = ut.compress(ams, flags) aid_pairs = ibs.get_annotmatch_aids(selected_ams) aid1, aid2 = aid_pairs[index] import plottool_ibeis as pt fnum = 1 if ut.get_argflag('--match'): request = ibs.depc_annot.new_request('vsone', [aid1], [aid2]) res_list2 = request.execute() match = res_list2[0] match.show_single_annotmatch(qreq_=request, vert=False, colorbar_=False, notitle=True, draw_lbl=False, draw_border=False) else: chip1, chip2 = ibs.get_annot_chips([aid1, aid2]) pt.imshow(chip1, pnum=(1, 2, 1), fnum=fnum) pt.imshow(chip2, pnum=(1, 2, 2), fnum=fnum)
def show_chip(ibs, aid, in_image=False, annote=True, title_suffix='', weight_label=None, weights=None, config2_=None, **kwargs): r""" Driver function to show chips Args: ibs (ibeis.IBEISController): aid (int): annotation rowid in_image (bool): displays annotation with the context of its source image annote (bool): enables overlay annoations title_suffix (str): weight_label (None): (default = None) weights (None): (default = None) config2_ (dict): (default = None) Kwargs: enable_chip_title_prefix, nokpts, kpts_subset, kpts, text_color, notitle, draw_lbls, show_aidstr, show_gname, show_name, show_nid, show_exemplar, show_num_gt, show_quality_text, show_viewcode, fnum, title, figtitle, pnum, interpolation, cmap, heatmap, data_colorbar, darken, update, xlabel, redraw_image, ax, alpha, docla, doclf, projection, pts, ell color (3/4-tuple, ndarray, or str): colors for keypoints CommandLine: python -m ibeis.viz.viz_chip show_chip --show --ecc python -c "import utool as ut; ut.print_auto_docstr('ibeis.viz.viz_chip', 'show_chip')" python -m ibeis.viz.viz_chip show_chip --show --db NNP_Master3 --aids 14047 --no-annote python -m ibeis.viz.viz_chip show_chip --show --db NNP_Master3 --aids 14047 --no-annote python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --bgmethod=cnn python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --bgmethod=cnn --scale_max=30 python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --ecc --draw_lbls=False --notitle --save=~/slides/lnbnn_query.jpg --dpi=300 Example: >>> # VIZ_TEST >>> from ibeis.viz.viz_chip import * # NOQA >>> import numpy as np >>> import vtool_ibeis as vt >>> in_image = False >>> ibs, aid_list, kwargs, config2_ = testdata_showchip() >>> aid = aid_list[0] >>> if True: >>> import matplotlib as mpl >>> from ibeis.scripts.thesis import TMP_RC >>> mpl.rcParams.update(TMP_RC) >>> if ut.get_argflag('--ecc'): >>> kpts = ibs.get_annot_kpts(aid, config2_=config2_) >>> weights = ibs.get_annot_fgweights([aid], ensure=True, config2_=config2_)[0] >>> kpts = ut.random_sample(kpts[weights > .9], 200, seed=0) >>> ecc = vt.get_kpts_eccentricity(kpts) >>> scale = 1 / vt.get_scales(kpts) >>> #s = ecc if config2_.affine_invariance else scale >>> s = scale >>> colors = pt.scores_to_color(s, cmap_='jet') >>> kwargs['color'] = colors >>> kwargs['kpts'] = kpts >>> kwargs['ell_linewidth'] = 3 >>> kwargs['ell_alpha'] = .7 >>> show_chip(ibs, aid, in_image=in_image, config2_=config2_, **kwargs) >>> pt.show_if_requested() """ if ut.VERBOSE: print('[viz] show_chip(aid=%r)' % (aid, )) #ibs.assert_valid_aids((aid,)) # Get chip #print('in_image = %r' % (in_image,)) chip = vh.get_chips(ibs, aid, in_image=in_image, config2_=config2_) # Create chip title chip_text = vh.get_annot_texts(ibs, [aid], **kwargs)[0] if kwargs.get('enable_chip_title_prefix', True): chip_title_text = chip_text + title_suffix else: chip_title_text = title_suffix chip_title_text = chip_title_text.strip('\n') # Draw chip fig, ax = pt.imshow(chip, **kwargs) # Populate axis user data vh.set_ibsdat(ax, 'viztype', 'chip') vh.set_ibsdat(ax, 'aid', aid) if annote and not kwargs.get('nokpts', False): # Get and draw keypoints if 'color' not in kwargs: if weight_label == 'fg_weights': if weights is None and ibs.has_species_detector( ibs.get_annot_species_texts(aid)): weight_label = 'fg_weights' weights = ibs.get_annot_fgweights([aid], ensure=True, config2_=config2_)[0] if weights is not None: cmap_ = 'hot' #if weight_label == 'dstncvs': # cmap_ = 'rainbow' color = pt.scores_to_color(weights, cmap_=cmap_, reverse_cmap=False) kwargs['color'] = color kwargs['ell_color'] = color kwargs['pts_color'] = color kpts_ = vh.get_kpts(ibs, aid, in_image, config2_=config2_, kpts_subset=kwargs.get('kpts_subset', None), kpts=kwargs.pop('kpts', None)) pt.viz_keypoints._annotate_kpts(kpts_, **kwargs) if kwargs.get('draw_lbls', True): pt.upperleft_text(chip_text, color=kwargs.get('text_color', None)) use_title = not kwargs.get('notitle', False) if use_title: pt.set_title(chip_title_text) if in_image: gid = ibs.get_annot_gids(aid) aid_list = ibs.get_image_aids(gid) annotekw = viz_image.get_annot_annotations(ibs, aid_list, sel_aids=[aid], draw_lbls=kwargs.get( 'draw_lbls', True)) # Put annotation centers in the axis ph.set_plotdat(ax, 'annotation_bbox_list', annotekw['bbox_list']) ph.set_plotdat(ax, 'aid_list', aid_list) pt.viz_image2.draw_image_overlay(ax, **annotekw) zoom_ = ut.get_argval('--zoom', type_=float, default=None) if zoom_ is not None: import vtool_ibeis as vt # Zoom into the chip for some image context rotated_verts = ibs.get_annot_rotated_verts(aid) bbox = ibs.get_annot_bboxes(aid) #print(bbox) #print(rotated_verts) rotated_bbox = vt.bbox_from_verts(rotated_verts) imgw, imgh = ibs.get_image_sizes(gid) pad_factor = zoom_ pad_length = min(bbox[2], bbox[3]) * pad_factor minx = max(rotated_bbox[0] - pad_length, 0) miny = max(rotated_bbox[1] - pad_length, 0) maxx = min((rotated_bbox[0] + rotated_bbox[2]) + pad_length, imgw) maxy = min((rotated_bbox[1] + rotated_bbox[3]) + pad_length, imgh) #maxy = imgh - maxy #miny = imgh - miny ax = pt.gca() ax.set_xlim(minx, maxx) ax.set_ylim(miny, maxy) ax.invert_yaxis() else: ph.set_plotdat(ax, 'chipshape', chip.shape) #if 'featweights' in vars() and 'color' in kwargs: if weights is not None and weight_label is not None: ## HACK HACK HACK if len(weights) > 0: cb = pt.colorbar(weights, kwargs['color']) cb.set_label(weight_label) return fig, ax
def get_injured_sharks(): """ >>> from ibeis.scripts.getshark import * # NOQA """ import requests url = 'http://www.whaleshark.org/getKeywordImages.jsp' resp = requests.get(url) assert resp.status_code == 200 keywords = resp.json()['keywords'] key_list = ut.take_column(keywords, 'indexName') key_to_nice = {k['indexName']: k['readableName'] for k in keywords} injury_patterns = [ 'injury', 'net', 'hook', 'trunc', 'damage', 'scar', 'nicks', 'bite', ] injury_keys = [key for key in key_list if any([pat in key for pat in injury_patterns])] noninjury_keys = ut.setdiff(key_list, injury_keys) injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys) # NOQA noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys) # NOQA key_list = injury_keys keyed_images = {} for key in ut.ProgIter(key_list, lbl='reading index', bs=True): key_url = url + '?indexName={indexName}'.format(indexName=key) key_resp = requests.get(key_url) assert key_resp.status_code == 200 key_imgs = key_resp.json()['images'] keyed_images[key] = key_imgs key_hist = {key: len(imgs) for key, imgs in keyed_images.items()} key_hist = ut.sort_dict(key_hist, 'vals') print(ut.repr3(key_hist)) nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist) nice_key_hist = ut.sort_dict(nice_key_hist, 'vals') print(ut.repr3(nice_key_hist)) key_to_urls = {key: ut.take_column(vals, 'url') for key, vals in keyed_images.items()} overlaps = {} import itertools overlap_img_list = [] for k1, k2 in itertools.combinations(key_to_urls.keys(), 2): overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2]) num_overlap = len(overlap_imgs) overlaps[(k1, k2)] = num_overlap overlaps[(k1, k1)] = len(key_to_urls[k1]) if num_overlap > 0: #print('[%s][%s], overlap=%r' % (k1, k2, num_overlap)) overlap_img_list.extend(overlap_imgs) all_img_urls = list(set(ut.flatten(key_to_urls.values()))) num_all = len(all_img_urls) # NOQA print('num_all = %r' % (num_all,)) # Determine super-categories categories = ['nicks', 'scar', 'trunc'] # Force these keys into these categories key_to_cat = {'scarbite': 'other_injury'} cat_to_keys = ut.ddict(list) for key in key_to_urls.keys(): flag = 1 if key in key_to_cat: cat = key_to_cat[key] cat_to_keys[cat].append(key) continue for cat in categories: if cat in key: cat_to_keys[cat].append(key) flag = 0 if flag: cat = 'other_injury' cat_to_keys[cat].append(key) cat_urls = ut.ddict(list) for cat, keys in cat_to_keys.items(): for key in keys: cat_urls[cat].extend(key_to_urls[key]) cat_hist = {} for cat in list(cat_urls.keys()): cat_urls[cat] = list(set(cat_urls[cat])) cat_hist[cat] = len(cat_urls[cat]) print(ut.repr3(cat_to_keys)) print(ut.repr3(cat_hist)) key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items() for val in vals]) #ingestset = { # '__class__': 'ImageSet', # 'images': ut.ddict(dict) #} #for key, key_imgs in keyed_images.items(): # for imgdict in key_imgs: # url = imgdict['url'] # encid = imgdict['correspondingEncounterNumber'] # # Make structure # encdict = encounters[encid] # encdict['__class__'] = 'Encounter' # imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber']) # imgdict['__class__'] = 'Image' # cat = key_to_cat[key] # annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]} # annotdict['__class__'] = 'Annotation' # # Ensure structures exist # encdict['images'] = encdict.get('images', []) # imgdict['annots'] = imgdict.get('annots', []) # # Add an image to this encounter # encdict['images'].append(imgdict) # # Add an annotation to this image # imgdict['annots'].append(annotdict) ##http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111 #get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,) #resp = requests.get(get_enc_url) #print(ut.repr3(encdict)) #print(ut.repr3(encounters)) # Download the files to the local disk #fpath_list = all_urls = ut.unique(ut.take_column( ut.flatten( ut.dict_subset(keyed_images, ut.flatten(cat_to_keys.values())).values() ), 'url')) dldir = ut.truepath('~/tmpsharks') from os.path import commonprefix, basename # NOQA prefix = commonprefix(all_urls) suffix_list = [url_[len(prefix):] for url_ in all_urls] fname_list = [suffix.replace('/', '--') for suffix in suffix_list] fpath_list = [] for url, fname in ut.ProgIter(zip(all_urls, fname_list), lbl='downloading imgs', freq=1): fpath = ut.grab_file_url(url, download_dir=dldir, fname=fname, verbose=False) fpath_list.append(fpath) # Make sure we keep orig info #url_to_keys = ut.ddict(list) url_to_info = ut.ddict(dict) for key, imgdict_list in keyed_images.items(): for imgdict in imgdict_list: url = imgdict['url'] info = url_to_info[url] for k, v in imgdict.items(): info[k] = info.get(k, []) info[k].append(v) info['keys'] = info.get('keys', []) info['keys'].append(key) #url_to_keys[url].append(key) info_list = ut.take(url_to_info, all_urls) for info in info_list: if len(set(info['correspondingEncounterNumber'])) > 1: assert False, 'url with two different encounter nums' # Combine duplicate tags hashid_list = [ut.get_file_uuid(fpath_, stride=8) for fpath_ in ut.ProgIter(fpath_list, bs=True)] groupxs = ut.group_indices(hashid_list)[1] # Group properties by duplicate images #groupxs = [g for g in groupxs if len(g) > 1] fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0) url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0) info_list_ = [ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_)) for info_ in ut.apply_grouping(info_list, groupxs)] encid_list_ = [ut.unique(info_['correspondingEncounterNumber'])[0] for info_ in info_list_] keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_] cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_] clist = ut.ColumnLists({ 'gpath': fpath_list_, 'url': url_list_, 'encid': encid_list_, 'key': keys_list_, 'cat': cats_list_, }) #for info_ in ut.apply_grouping(info_list, groupxs): # info = ut.dict_accum(*info_) # info = ut.map_dict_vals(ut.flatten, info) # x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber'])) # if len(x) > 1: # info = info.copy() # del info['keys'] # print(ut.repr3(info)) flags = ut.lmap(ut.fpath_has_imgext, clist['gpath']) clist = clist.compress(flags) import ibeis ibs = ibeis.opendb('WS_Injury', allow_newdir=True) gid_list = ibs.add_images(clist['gpath']) clist['gid'] = gid_list failed_flags = ut.flag_None_items(clist['gid']) print('# failed %s' % (sum(failed_flags)),) passed_flags = ut.not_list(failed_flags) clist = clist.compress(passed_flags) ut.assert_all_not_None(clist['gid']) #ibs.get_image_uris_original(clist['gid']) ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True) #ut.zipflat(clist['cat'], clist['key']) if False: # Can run detection instead clist['tags'] = ut.zipflat(clist['cat']) aid_list = ibs.use_images_as_annotations(clist['gid'], adjust_percent=0.01, tags_list=clist['tags']) aid_list import plottool_ibeis as pt from ibeis import core_annots pt.qt4ensure() #annots = ibs.annots() #aids = [1, 2] #ibs.depc_annot.get('hog', aids , 'hog') #ibs.depc_annot.get('chip', aids, 'img') for aid in ut.InteractiveIter(ibs.get_valid_aids()): hogs = ibs.depc_annot.d.get_hog_hog([aid]) chips = ibs.depc_annot.d.get_chips_img([aid]) chip = chips[0] hogimg = core_annots.make_hog_block_image(hogs[0]) pt.clf() pt.imshow(hogimg, pnum=(1, 2, 1)) pt.imshow(chip, pnum=(1, 2, 2)) fig = pt.gcf() fig.show() fig.canvas.draw() #print(len(groupxs)) #if False: #groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values() #print(ut.repr3(ut.apply_grouping(all_urls, groupxs))) # # FIX # for fpath, fname in zip(fpath_list, fname_list): # if ut.checkpath(fpath): # ut.move(fpath, join(dirname(fpath), fname)) # print('fpath = %r' % (fpath,)) #import ibeis #from ibeis.dbio import ingest_dataset #dbdir = ibeis.sysres.lookup_dbdir('WS_ALL') #self = ingest_dataset.Ingestable2(dbdir) if False: # Show overlap matrix import plottool_ibeis as pt import pandas as pd import numpy as np dict_ = overlaps s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps)) df = s.unstack() lhs, rhs = df.align(df.T) df = lhs.add(rhs, fill_value=0).fillna(0) label_texts = df.columns.values def label_ticks(label_texts): import plottool_ibeis as pt truncated_labels = [repr(lbl[0:100]) for lbl in label_texts] ax = pt.gca() ax.set_xticks(list(range(len(label_texts)))) ax.set_xticklabels(truncated_labels) [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()] [lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()] #xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts))) #pt.plot_surface3d(xgrid, ygrid, disjoint_mat) ax.set_yticks(list(range(len(label_texts)))) ax.set_yticklabels(truncated_labels) [lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()] [lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()] #[lbl.set_rotation(20) for lbl in ax.get_yticklabels()] #df = df.sort(axis=0) #df = df.sort(axis=1) sortx = np.argsort(df.sum(axis=1).values)[::-1] df = df.take(sortx, axis=0) df = df.take(sortx, axis=1) fig = pt.figure(fnum=1) fig.clf() mat = df.values.astype(np.int32) mat[np.diag_indices(len(mat))] = 0 vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max() import matplotlib.colors norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True) pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none') pt.plt.colorbar() pt.plt.grid('off') label_ticks(label_texts) fig.tight_layout() #overlap_df = pd.DataFrame.from_dict(overlap_img_list) class TmpImage(ut.NiceRepr): pass from skimage.feature import hog from skimage import data, color, exposure import plottool_ibeis as pt image2 = color.rgb2gray(data.astronaut()) # NOQA fpath = './GOPR1120.JPG' import vtool_ibeis as vt for fpath in [fpath]: """ http://scikit-image.org/docs/dev/auto_examples/plot_hog.html """ image = vt.imread(fpath, grayscale=True) image = pt.color_funcs.to_base01(image) fig = pt.figure(fnum=2) fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=True) fig, (ax1, ax2) = pt.plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True) ax1.axis('off') ax1.imshow(image, cmap=pt.plt.cm.gray) ax1.set_title('Input image') ax1.set_adjustable('box-forced') # Rescale histogram for better display hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') ax1.set_adjustable('box-forced') pt.plt.show()
def show_multichip_match(rchip1, rchip2_list, kpts1, kpts2_list, fm_list, fs_list, featflag_list, fnum=None, pnum=None, **kwargs): """ move to df2 rchip = rchip1 H = H1 = None target_wh = None """ import vtool_ibeis.image as gtool import plottool_ibeis as pt import numpy as np import vtool_ibeis as vt kwargs = kwargs.copy() colorbar_ = kwargs.pop('colorbar_', True) stack_larger = kwargs.pop('stack_larger', False) stack_side = kwargs.pop('stack_side', False) # mode for features disabled by name scoring NONVOTE_MODE = kwargs.get('nonvote_mode', 'filter') def preprocess_chips(rchip, H, target_wh): rchip_ = rchip if H is None else gtool.warpHomog(rchip, H, target_wh) return rchip_ if fnum is None: fnum = pt.next_fnum() target_wh1 = None H1 = None rchip1_ = preprocess_chips(rchip1, H1, target_wh1) # Hack to visually identify the query rchip1_ = vt.draw_border(rchip1_, out=rchip1_, thickness=15, color=(pt.UNKNOWN_PURP[0:3] * 255).astype( np.uint8).tolist()) wh1 = gtool.get_size(rchip1_) rchip2_list_ = [ preprocess_chips(rchip2, None, wh1) for rchip2 in rchip2_list ] wh2_list = [gtool.get_size(rchip2) for rchip2 in rchip2_list_] num = 0 if len(rchip2_list) < 3 else 1 #vert = True if len(rchip2_list) > 1 else False vert = True if len(rchip2_list) > 1 else None #num = 0 if False and kwargs.get('fastmode', False): # This doesn't actually help the speed very much stackkw = dict( # Hack draw results faster Q #initial_sf=.4, #initial_sf=.9, use_larger=stack_larger, #use_larger=True, ) else: stackkw = dict() #use_larger = True #vert = kwargs.get('fastmode', False) if stack_side: # hack to stack all database images vertically num = 0 # TODO: heatmask match_img, offset_list, sf_list = vt.stack_image_list_special(rchip1_, rchip2_list_, num=num, vert=vert, **stackkw) wh_list = np.array(ut.flatten([[wh1], wh2_list])) * sf_list offset1 = offset_list[0] wh1 = wh_list[0] sf1 = sf_list[0] fig, ax = pt.imshow(match_img, fnum=fnum, pnum=pnum) if kwargs.get('show_matches', True): ut.flatten(fs_list) #ut.embed() flat_fs, cumlen_list = ut.invertible_flatten2(fs_list) flat_colors = pt.scores_to_color(np.array(flat_fs), 'hot') colors_list = ut.unflatten2(flat_colors, cumlen_list) for _tup in zip(offset_list[1:], wh_list[1:], sf_list[1:], kpts2_list, fm_list, fs_list, featflag_list, colors_list): offset2, wh2, sf2, kpts2, fm2_, fs2_, featflags, colors = _tup xywh1 = (offset1[0], offset1[1], wh1[0], wh1[1]) xywh2 = (offset2[0], offset2[1], wh2[0], wh2[1]) #colors = pt.scores_to_color(fs2) if kpts1 is not None and kpts2 is not None: if NONVOTE_MODE == 'filter': fm2 = fm2_.compress(featflags, axis=0) fs2 = fs2_.compress(featflags, axis=0) elif NONVOTE_MODE == 'only': fm2 = fm2_.compress(np.logical_not(featflags), axis=0) fs2 = fs2_.compress(np.logical_not(featflags), axis=0) else: # TODO: optional coloring of nonvotes instead fm2 = fm2_ fs2 = fs2_ pt.plot_fmatch(xywh1, xywh2, kpts1, kpts2, fm2, fs2, fm_norm=None, H1=None, H2=None, scale_factor1=sf1, scale_factor2=sf2, colorbar_=False, colors=colors, **kwargs) if colorbar_: pt.colorbar(flat_fs, flat_colors) bbox_list = [(x, y, w, h) for (x, y), (w, h) in zip(offset_list, wh_list)] return offset_list, sf_list, bbox_list