def get_name_shortlist_aids( daid_list, dnid_list, annot_score_list, name_score_list, nid2_nidx, nNameShortList, nAnnotPerName, ): r""" CommandLine: python -m wbia.algo.hots.scoring --test-get_name_shortlist_aids Example: >>> # ENABLE_DOCTEST >>> from wbia.algo.hots.scoring import * # NOQA >>> daid_list = np.array([11, 12, 13, 14, 15, 16, 17]) >>> dnid_list = np.array([21, 21, 21, 22, 22, 23, 24]) >>> annot_score_list = np.array([ 6, 2, 3, 5, 6, 3, 2]) >>> name_score_list = np.array([ 8, 9, 5, 4]) >>> nid2_nidx = {21:0, 22:1, 23:2, 24:3} >>> nNameShortList, nAnnotPerName = 3, 2 >>> args = (daid_list, dnid_list, annot_score_list, name_score_list, ... nid2_nidx, nNameShortList, nAnnotPerName) >>> top_daids = get_name_shortlist_aids(*args) >>> result = str(top_daids) >>> print(result) [15, 14, 11, 13, 16] """ unique_nids, groupxs = vt.group_indices(np.array(dnid_list)) grouped_annot_scores = vt.apply_grouping(annot_score_list, groupxs) grouped_daids = vt.apply_grouping(np.array(daid_list), groupxs) # Ensure name score list is aligned with the unique_nids aligned_name_score_list = name_score_list.take( ut.dict_take(nid2_nidx, unique_nids)) # Sort each group by the name score group_sortx = aligned_name_score_list.argsort()[::-1] _top_daid_groups = ut.take(grouped_daids, group_sortx) _top_annot_score_groups = ut.take(grouped_annot_scores, group_sortx) top_daid_groups = ut.listclip(_top_daid_groups, nNameShortList) top_annot_score_groups = ut.listclip(_top_annot_score_groups, nNameShortList) # Sort within each group by the annotation score top_daid_sortx_groups = [ annot_score_group.argsort()[::-1] for annot_score_group in top_annot_score_groups ] top_sorted_daid_groups = vt.ziptake(top_daid_groups, top_daid_sortx_groups) top_clipped_daids = [ ut.listclip(sorted_daid_group, nAnnotPerName) for sorted_daid_group in top_sorted_daid_groups ] top_daids = ut.flatten(top_clipped_daids) return top_daids
def build_vsone_shortlist(ibs, qaid2_qres_vsmany): """ looks that the top N names in a vsmany query to apply vsone reranking Args: ibs (IBEISController): ibeis controller object qaid2_qres_vsmany (dict): dict of query result objects Returns: list: vsone_query_pairs CommandLine: python -m ibeis.algo.hots.special_query --test-build_vsone_shortlist Example: >>> # SLOW_DOCTEST >>> from ibeis.algo.hots.special_query import * # NOQA >>> ibs, valid_aids = testdata_special_query() >>> qaids = valid_aids[0:1] >>> daids = valid_aids[1:] >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids) >>> # execute function >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany) >>> qaid, top_aid_list = vsone_query_pairs[0] >>> top_nid_list = ibs.get_annot_name_rowids(top_aid_list) >>> assert top_nid_list.index(1) == 0, 'name 1 should be rank 1' >>> assert len(top_nid_list) == 5, 'should have 3 names and up to 2 image per name' [(1, [3, 2, 6, 5, 4])] [(1, [2, 3, 6, 5, 4])] """ vsone_query_pairs = [] nNameShortlist = 3 nAnnotPerName = 2 for qaid, qres_vsmany in six.iteritems(qaid2_qres_vsmany): nscoretup = qres_vsmany.get_nscoretup() (sorted_nids, sorted_nscores, sorted_aids, sorted_scores) = nscoretup #top_nid_list = ut.listclip(sorted_nids, nNameShortlist) top_aids_list = ut.listclip(sorted_aids, nNameShortlist) top_aids_list_ = [ ut.listclip(aids, nAnnotPerName) for aids in top_aids_list ] top_aid_list = ut.flatten(top_aids_list_) # get top annotations beloning to the database query # TODO: allow annots not in daids to be included #top_unflataids = ibs.get_name_aids(top_nid_list, enable_unknown_fix=True) #flat_top_aids = ut.flatten(top_unflataids) #top_aid_list = ut.intersect_ordered(flat_top_aids, qres_vsmany.daids) vsone_query_pairs.append((qaid, top_aid_list)) print('built %d pairs' % (len(vsone_query_pairs), )) return vsone_query_pairs
def build_vsone_shortlist(ibs, qaid2_qres_vsmany): """ looks that the top N names in a vsmany query to apply vsone reranking Args: ibs (IBEISController): ibeis controller object qaid2_qres_vsmany (dict): dict of query result objects Returns: list: vsone_query_pairs CommandLine: python -m ibeis.algo.hots.special_query --test-build_vsone_shortlist Example: >>> # SLOW_DOCTEST >>> from ibeis.algo.hots.special_query import * # NOQA >>> ibs, valid_aids = testdata_special_query() >>> qaids = valid_aids[0:1] >>> daids = valid_aids[1:] >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids) >>> # execute function >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany) >>> qaid, top_aid_list = vsone_query_pairs[0] >>> top_nid_list = ibs.get_annot_name_rowids(top_aid_list) >>> assert top_nid_list.index(1) == 0, 'name 1 should be rank 1' >>> assert len(top_nid_list) == 5, 'should have 3 names and up to 2 image per name' [(1, [3, 2, 6, 5, 4])] [(1, [2, 3, 6, 5, 4])] """ vsone_query_pairs = [] nNameShortlist = 3 nAnnotPerName = 2 for qaid, qres_vsmany in six.iteritems(qaid2_qres_vsmany): nscoretup = qres_vsmany.get_nscoretup() (sorted_nids, sorted_nscores, sorted_aids, sorted_scores) = nscoretup #top_nid_list = ut.listclip(sorted_nids, nNameShortlist) top_aids_list = ut.listclip(sorted_aids, nNameShortlist) top_aids_list_ = [ut.listclip(aids, nAnnotPerName) for aids in top_aids_list] top_aid_list = ut.flatten(top_aids_list_) # get top annotations beloning to the database query # TODO: allow annots not in daids to be included #top_unflataids = ibs.get_name_aids(top_nid_list, enable_unknown_fix=True) #flat_top_aids = ut.flatten(top_unflataids) #top_aid_list = ut.intersect_ordered(flat_top_aids, qres_vsmany.daids) vsone_query_pairs.append((qaid, top_aid_list)) print('built %d pairs' % (len(vsone_query_pairs),)) return vsone_query_pairs
def get_summary(profile_block_list, maxlines=20): """ References: https://github.com/rkern/line_profiler """ time_list = [get_block_totaltime(block) for block in profile_block_list] time_list = [time if time is not None else -1 for time in time_list] blockid_list = [get_block_id(block) for block in profile_block_list] sortx = ut.list_argsort(time_list) sorted_time_list = ut.take(time_list, sortx) sorted_blockid_list = ut.take(blockid_list, sortx) aligned_blockid_list = ut.util_str.align_lines(sorted_blockid_list, ':') summary_lines = [('%6.2f seconds - ' % time) + line for time, line in zip(sorted_time_list, aligned_blockid_list)] #summary_header = ut.codeblock( # ''' # CLEANED PROFILE OUPUT # The Pystone timings are not from kernprof, so they may include kernprof # overhead, whereas kernprof timings do not (unless the line being # profiled is also decorated with kernrof) # The kernprof times are reported in Timer Units # ''') summary_lines_ = ut.listclip(summary_lines, maxlines, fromback=True) summary_text = '\n'.join(summary_lines_) return summary_text
def get_name_shortlist_aids(daid_list, dnid_list, annot_score_list, name_score_list, nid2_nidx, nNameShortList, nAnnotPerName): r""" CommandLine: python -m ibeis.algo.hots.scoring --test-get_name_shortlist_aids Example: >>> # ENABLE_DOCTEST >>> from ibeis.algo.hots.scoring import * # NOQA >>> # build test data >>> daid_list = np.array([11, 12, 13, 14, 15, 16, 17]) >>> dnid_list = np.array([21, 21, 21, 22, 22, 23, 24]) >>> annot_score_list = np.array([ 6, 2, 3, 5, 6, 3, 2]) >>> name_score_list = np.array([ 8, 9, 5, 4]) >>> nid2_nidx = {21:0, 22:1, 23:2, 24:3} >>> nNameShortList, nAnnotPerName = 3, 2 >>> # execute function >>> args = (daid_list, dnid_list, annot_score_list, name_score_list, ... nid2_nidx, nNameShortList, nAnnotPerName) >>> top_daids = get_name_shortlist_aids(*args) >>> # verify results >>> result = str(top_daids) >>> print(result) [15, 14, 11, 13, 16] """ unique_nids, groupxs = vt.group_indices(np.array(dnid_list)) grouped_annot_scores = vt.apply_grouping(annot_score_list, groupxs) grouped_daids = vt.apply_grouping(np.array(daid_list), groupxs) # Ensure name score list is aligned with the unique_nids aligned_name_score_list = name_score_list.take(ut.dict_take(nid2_nidx, unique_nids)) # Sort each group by the name score group_sortx = aligned_name_score_list.argsort()[::-1] _top_daid_groups = ut.take(grouped_daids, group_sortx) _top_annot_score_groups = ut.take(grouped_annot_scores, group_sortx) top_daid_groups = ut.listclip(_top_daid_groups, nNameShortList) top_annot_score_groups = ut.listclip(_top_annot_score_groups, nNameShortList) # Sort within each group by the annotation score top_daid_sortx_groups = [annot_score_group.argsort()[::-1] for annot_score_group in top_annot_score_groups] top_sorted_daid_groups = vt.ziptake(top_daid_groups, top_daid_sortx_groups) top_clipped_daids = [ut.listclip(sorted_daid_group, nAnnotPerName) for sorted_daid_group in top_sorted_daid_groups] top_daids = ut.flatten(top_clipped_daids) return top_daids
def convert_name_suggestion_to_aids(ibs, choicetup, name_suggest_tup): """ Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.user_dialogs import * # NOQA >>> import ibeis >>> # build test data >>> ibs = ibeis.opendb('testdb1') >>> comp_aids = [2, 3, 4] >>> comp_names = ['fred', 'sue', 'alice'] >>> chosen_names = ['fred'] >>> # execute function >>> result = convert_name_suggestion_to_aids(ibs, choicetup, name_suggest_tup) >>> # verify results >>> print(result) """ num_top = 3 autoname_msg, chosen_names, name_confidence = name_suggest_tup comp_aids_all = ut.get_list_column(choicetup.sorted_aids, 0) comp_aids = ut.listclip(comp_aids_all, num_top) comp_names = ibs.get_annot_names(comp_aids) issuggested = ut.list_cover(comp_names, chosen_names) suggest_aids = ut.compress(comp_aids, issuggested) return comp_aids, suggest_aids
def dev_train_distinctiveness(species=None): r""" Args: ibs (IBEISController): wbia controller object species (None): CommandLine: python -m wbia.algo.hots.distinctiveness_normalizer --test-dev_train_distinctiveness alias dev_train_distinctiveness='python -m wbia.algo.hots.distinctiveness_normalizer --test-dev_train_distinctiveness' # Publishing (uses cached normalizers if available) dev_train_distinctiveness --species GZ --publish dev_train_distinctiveness --species PZ --publish dev_train_distinctiveness --species PZ --retrain Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.distinctiveness_normalizer import * # NOQA >>> import wbia >>> species = ut.get_argval('--species', str, 'zebra_grevys') >>> dev_train_distinctiveness(species) """ import wbia # if 'species' not in vars() or species is None: # species = 'zebra_grevys' if species == 'zebra_grevys': dbname = 'GZ_ALL' elif species == 'zebra_plains': dbname = 'PZ_Master0' ibs = wbia.opendb(dbname) global_distinctdir = ibs.get_global_distinctiveness_modeldir() cachedir = global_distinctdir dstcnvs_normer = DistinctivnessNormalizer(species, cachedir=cachedir) try: if ut.get_argflag('--retrain'): raise IOError('force cache miss') with ut.Timer('loading distinctiveness'): dstcnvs_normer.load(cachedir) # Cache hit logger.info('distinctivness model cache hit') except IOError: logger.info('distinctivness model cache miss') with ut.Timer('training distinctiveness'): # Need to train # Add one example from each name # TODO: add one exemplar per viewpoint for each name # max_vecs = 1E6 # max_annots = 975 max_annots = 975 # ibs.fix_and_clean_database() nid_list = ibs.get_valid_nids() aids_list = ibs.get_name_aids(nid_list) # remove junk aids_list = ibs.unflat_map(ibs.filter_junk_annotations, aids_list) # remove empty aids_list = [aids for aids in aids_list if len(aids) > 0] num_annots_list = list(map(len, aids_list)) aids_list = ut.sortedby(aids_list, num_annots_list, reverse=True) # take only one annot per name aid_list = ut.get_list_column(aids_list, 0) # Keep only a certain number of annots for distinctiveness mapping aid_list_ = ut.listclip(aid_list, max_annots) logger.info('total num named annots = %r' % (sum(num_annots_list))) logger.info( 'training distinctiveness using %d/%d singleton annots' % (len(aid_list_), len(aid_list))) # vec # FIXME: qreq_ params for config rowid vecs_list = ibs.get_annot_vecs(aid_list_) num_vecs = sum(list(map(len, vecs_list))) logger.info('num_vecs = %r' % (num_vecs, )) vecs = np.vstack(vecs_list) logger.info('vecs size = %r' % (ut.get_object_size_str(vecs), )) dstcnvs_normer.init_support(vecs) dstcnvs_normer.save(global_distinctdir) if ut.get_argflag('--publish'): dstcnvs_normer.publish()
def dev_train_distinctiveness(species=None): r""" Args: ibs (IBEISController): ibeis controller object species (None): CommandLine: python -m ibeis.algo.hots.distinctiveness_normalizer --test-dev_train_distinctiveness alias dev_train_distinctiveness='python -m ibeis.algo.hots.distinctiveness_normalizer --test-dev_train_distinctiveness' # Publishing (uses cached normalizers if available) dev_train_distinctiveness --species GZ --publish dev_train_distinctiveness --species PZ --publish dev_train_distinctiveness --species PZ --retrain Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.distinctiveness_normalizer import * # NOQA >>> import ibeis >>> species = ut.get_argval('--species', str, 'zebra_grevys') >>> dev_train_distinctiveness(species) """ import ibeis #if 'species' not in vars() or species is None: # species = 'zebra_grevys' if species == 'zebra_grevys': dbname = 'GZ_ALL' elif species == 'zebra_plains': dbname = 'PZ_Master0' ibs = ibeis.opendb(dbname) global_distinctdir = ibs.get_global_distinctiveness_modeldir() cachedir = global_distinctdir dstcnvs_normer = DistinctivnessNormalizer(species, cachedir=cachedir) try: if ut.get_argflag('--retrain'): raise IOError('force cache miss') with ut.Timer('loading distinctiveness'): dstcnvs_normer.load(cachedir) # Cache hit print('distinctivness model cache hit') except IOError: print('distinctivness model cache miss') with ut.Timer('training distinctiveness'): # Need to train # Add one example from each name # TODO: add one exemplar per viewpoint for each name #max_vecs = 1E6 #max_annots = 975 max_annots = 975 #ibs.fix_and_clean_database() nid_list = ibs.get_valid_nids() aids_list = ibs.get_name_aids(nid_list) # remove junk aids_list = ibs.unflat_map(ibs.filter_junk_annotations, aids_list) # remove empty aids_list = [aids for aids in aids_list if len(aids) > 0] num_annots_list = list(map(len, aids_list)) aids_list = ut.sortedby(aids_list, num_annots_list, reverse=True) # take only one annot per name aid_list = ut.get_list_column(aids_list, 0) # Keep only a certain number of annots for distinctiveness mapping aid_list_ = ut.listclip(aid_list, max_annots) print('total num named annots = %r' % (sum(num_annots_list))) print('training distinctiveness using %d/%d singleton annots' % (len(aid_list_), len(aid_list))) # vec # FIXME: qreq_ params for config rowid vecs_list = ibs.get_annot_vecs(aid_list_) num_vecs = sum(list(map(len, vecs_list))) print('num_vecs = %r' % (num_vecs,)) vecs = np.vstack(vecs_list) print('vecs size = %r' % (ut.get_object_size_str(vecs),)) dstcnvs_normer.init_support(vecs) dstcnvs_normer.save(global_distinctdir) if ut.get_argflag('--publish'): dstcnvs_normer.publish()