def assert_cache_hits(ibs, ismiss_list, rowid_list, kwargs_hash, **kwargs): cached_rowid_list = ut.filterfalse_items(rowid_list, ismiss_list) cache_ = ibs.table_cache[tblname][colname][kwargs_hash] # Load cached values for each rowid cache_vals_list = ut.dict_take_list(cache_, cached_rowid_list, None) db_vals_list = getter_func(ibs, cached_rowid_list, **kwargs) # Assert everything is valid msg_fmt = ut.codeblock( ''' [assert_cache_hits] tblname = %r [assert_cache_hits] colname = %r [assert_cache_hits] cfgkeys = %r [assert_cache_hits] CACHE INVALID: %r != %r ''' ) msg = msg_fmt % (tblname, colname, cfgkeys, cache_vals_list, db_vals_list, ) try: list1 = cache_vals_list list2 = db_vals_list assert ut.lists_eq(list1, list2), msg #if isinstance(db_vals_list, list): # assert cache_vals_list == db_vals_list, msg #else: # assert np.all(cache_vals_list == db_vals_list), msg except AssertionError as ex: raise ex except Exception as ex2: print(type(cache_vals_list)) print(type(db_vals_list)) ut.printex(ex2) ut.embed() raise
def filter_part_set( ibs, part_rowid_list, include_only_aid_list=None, is_staged=False, viewpoint='no-filter', minqual=None, ): # -- valid part_rowid filtering -- # filter by is_staged if is_staged is True: # corresponding unoptimized hack for is_staged flag_list = ibs.get_part_staged_flags(part_rowid_list) part_rowid_list = ut.compress(part_rowid_list, flag_list) elif is_staged is False: flag_list = ibs.get_part_staged_flags(part_rowid_list) part_rowid_list = ut.filterfalse_items(part_rowid_list, flag_list) if include_only_aid_list is not None: gid_list = ibs.get_part_gids(part_rowid_list) is_valid_gid = [gid in include_only_aid_list for gid in gid_list] part_rowid_list = ut.compress(part_rowid_list, is_valid_gid) if viewpoint != 'no-filter': viewpoint_list = ibs.get_part_viewpoints(part_rowid_list) is_valid_viewpoint = [viewpoint == flag for flag in viewpoint_list] part_rowid_list = ut.compress(part_rowid_list, is_valid_viewpoint) if minqual is not None: part_rowid_list = ibs.filter_part_rowids_to_quality(part_rowid_list, minqual, unknown_ok=True) part_rowid_list = sorted(part_rowid_list) return part_rowid_list
def get_turk_image_args(is_reviewed_func): """ Helper to return gids in an imageset or a group review """ ibs = current_app.ibs def _ensureid(_id): return None if _id == 'None' or _id == '' else int(_id) imgsetid = request.args.get('imgsetid', '') imgsetid = _ensureid(imgsetid) print('NOT GROUP_REVIEW') gid_list = ibs.get_valid_gids(imgsetid=imgsetid) reviewed_list = is_reviewed_func(ibs, gid_list) try: num_reviewed = reviewed_list.count(True) progress = '%0.2f' % (100.0 * num_reviewed / len(gid_list), ) except ZeroDivisionError: progress = '0.00' gid = request.args.get('gid', '') if len(gid) > 0: gid = int(gid) else: gid_list_ = ut.filterfalse_items(gid_list, reviewed_list) if len(gid_list_) == 0: gid = None else: gid = random.choice(gid_list_) previous = request.args.get('previous', None) print('gid = %r' % (gid, )) return gid_list, reviewed_list, imgsetid, progress, gid, previous
def get_turk_annot_args(is_reviewed_func, speed_hack=False): """ Helper to return aids in an imageset or a group review """ ibs = current_app.ibs def _ensureid(_id): return None if _id == 'None' or _id == '' else int(_id) imgsetid = request.args.get('imgsetid', '') src_ag = request.args.get('src_ag', '') dst_ag = request.args.get('dst_ag', '') imgsetid = _ensureid(imgsetid) src_ag = _ensureid(src_ag) dst_ag = _ensureid(dst_ag) group_review_flag = src_ag is not None and dst_ag is not None if not group_review_flag: print('NOT GROUP_REVIEW') if speed_hack: aid_list = ibs.get_valid_aids() else: gid_list = ibs.get_valid_gids(imgsetid=imgsetid) aid_list = ibs.get_image_aids(gid_list, is_staged=False) aid_list = ut.flatten(aid_list) reviewed_list = is_reviewed_func(ibs, aid_list) else: src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag) dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag) src_aid_list = ibs.get_gar_aid(src_gar_rowid_list) dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list) aid_list = src_aid_list reviewed_list = [src_aid in dst_aid_list for src_aid in src_aid_list] try: progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), ) except ZeroDivisionError: progress = '0.00' aid = request.args.get('aid', '') if len(aid) > 0: aid = int(aid) else: aid_list_ = ut.filterfalse_items(aid_list, reviewed_list) if len(aid_list_) == 0: aid = None else: if group_review_flag: aid = aid_list_[0] else: aid = random.choice(aid_list_) previous = request.args.get('previous', None) print('aid = %r' % (aid, )) #print(ut.repr2(ibs.get_annot_info(aid))) # if aid is not None: # print(ut.repr2(ibs.get_annot_info(aid, default=True, nl=True))) return aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous
def imageset_annot_demographics_processed(ibs, aid_list): logger.info('[demographics] Check %d total annotations' % (len(aid_list), )) nid_list = ibs.get_annot_nids(aid_list) flag_list = [nid <= 0 for nid in nid_list] aid_list_ = ut.filterfalse_items(aid_list, flag_list) logger.info('[demographics] Found %d named annotations' % (len(aid_list_), )) sex_list = ibs.get_annot_sex(aid_list_) sex_dict = {aid: sex in [0, 1, 2] for aid, sex in zip(aid_list_, sex_list)} value_list = list(sex_dict.values()) logger.info('[demographics] Found %d set sex annotations' % (sum(value_list), )) age_list = ibs.get_annot_age_months_est(aid_list) age_dict = { aid: -1 not in age and age.count(None) <= 1 for aid, age in zip(aid_list, age_list) } value_list = list(age_dict.values()) logger.info('[demographics] Found %d set age annotations' % (sum(value_list), )) annots_reviewed = [ sex_dict.get(aid, True) and age_dict.get(aid, True) for aid in aid_list ] value_list = annots_reviewed logger.info('[demographics] Found %d reviewed annotations' % (sum(value_list), )) return annots_reviewed
def assert_cache_hits(ibs, ismiss_list, rowid_list, kwargs_hash, **kwargs): cached_rowid_list = ut.filterfalse_items(rowid_list, ismiss_list) cache_ = ibs.table_cache[tblname][colname][kwargs_hash] # Load cached values for each rowid cache_vals_list = ut.dict_take_list(cache_, cached_rowid_list, None) db_vals_list = getter_func(ibs, cached_rowid_list, **kwargs) # Assert everything is valid msg_fmt = ut.codeblock( """ [assert_cache_hits] tblname = %r [assert_cache_hits] colname = %r [assert_cache_hits] cfgkeys = %r [assert_cache_hits] CACHE INVALID: %r != %r """ ) msg = msg_fmt % (tblname, colname, cfgkeys, cache_vals_list, db_vals_list) try: list1 = cache_vals_list list2 = db_vals_list assert ut.lists_eq(list1, list2), msg # if isinstance(db_vals_list, list): # assert cache_vals_list == db_vals_list, msg # else: # assert np.all(cache_vals_list == db_vals_list), msg except AssertionError as ex: raise ex except Exception as ex2: print(type(cache_vals_list)) print(type(db_vals_list)) ut.printex(ex2) ut.embed() raise
def merge_nonjunk_into_new_name(self, event=None): """ All nonjunk annotations are given the SAME new name """ # Delete all original names aid_list = self.all_aid_list aid_list_filtered = ut.filterfalse_items(aid_list, self.ibs.get_annot_isjunk(aid_list)) # Rename annotations self.ibs.set_annot_names_to_same_new_name(aid_list_filtered) self.update_callback() self.backend_callback() self.show_page()
def resize_imagelist_to_sqrtarea(gpath_list, new_gpath_list=None, sqrt_area=800, output_dir=None, checkexists=True, **kwargs): """ Resizes images and yeilds results asynchronously """ import vtool as vt target_area = sqrt_area ** 2 # Read image sizes gsize_list = [vt.open_image_size(gpath) for gpath in gpath_list] # Compute new sizes which preserve aspect ratio newsize_list = [vt.ScaleStrat.area(target_area, wh) for wh in gsize_list] if new_gpath_list is None: # Compute names for the new images if not given if output_dir is None: # Create an output directory if not specified output_dir = 'resized_sqrtarea%r' % sqrt_area ut.ensuredir(output_dir) size_suffixs = ['_' + repr(newsize).replace(' ', '') for newsize in newsize_list] from os.path import basename old_gnames = [basename(p) for p in gpath_list] new_gname_list = [ut.augpath(p, suffix=s) for p, s in zip(old_gnames, size_suffixs)] new_gpath_list = [join(output_dir, gname) for gname in new_gname_list] new_gpath_list = list(map(ut.unixpath, new_gpath_list)) assert len(new_gpath_list) == len(gpath_list), 'unequal len' assert len(newsize_list) == len(gpath_list), 'unequal len' # Evaluate generator if checkexists: exists_list = list(map(exists, new_gpath_list)) gpath_list_ = ut.filterfalse_items(gpath_list, exists_list) new_gpath_list_ = ut.filterfalse_items(new_gpath_list, exists_list) newsize_list_ = ut.filterfalse_items(newsize_list, exists_list) else: gpath_list_ = gpath_list new_gpath_list_ = new_gpath_list newsize_list_ = newsize_list generator = resize_imagelist_generator(gpath_list_, new_gpath_list_, newsize_list_, **kwargs) for res in generator: pass #return [res for res in generator] return new_gpath_list
def merge_nonjunk_into_new_name(self, event=None): """ All nonjunk annotations are given the SAME new name """ # Delete all original names aid_list = self.all_aid_list aid_list_filtered = ut.filterfalse_items( aid_list, self.ibs.get_annot_isjunk(aid_list)) # Rename annotations self.ibs.set_annot_names_to_same_new_name(aid_list_filtered) self.update_callback() self.backend_callback() self.show_page()
def get_turk_annot_args(is_reviewed_func): """ Helper to return aids in an imageset or a group review """ ibs = current_app.ibs def _ensureid(_id): return None if _id == 'None' or _id == '' else int(_id) imgsetid = request.args.get('imgsetid', '') src_ag = request.args.get('src_ag', '') dst_ag = request.args.get('dst_ag', '') imgsetid = _ensureid(imgsetid) src_ag = _ensureid(src_ag) dst_ag = _ensureid(dst_ag) group_review_flag = src_ag is not None and dst_ag is not None if not group_review_flag: gid_list = ibs.get_valid_gids(imgsetid=imgsetid) aid_list = ut.flatten(ibs.get_image_aids(gid_list)) reviewed_list = is_reviewed_func(ibs, aid_list) else: src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag) dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag) src_aid_list = ibs.get_gar_aid(src_gar_rowid_list) dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list) aid_list = src_aid_list reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ] try: progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), ) except ZeroDivisionError: progress = '0.00' aid = request.args.get('aid', '') if len(aid) > 0: aid = int(aid) else: aid_list_ = ut.filterfalse_items(aid_list, reviewed_list) if len(aid_list_) == 0: aid = None else: if group_review_flag: aid = aid_list_[0] else: aid = random.choice(aid_list_) previous = request.args.get('previous', None) print('aid = %r' % (aid,)) #print(ut.dict_str(ibs.get_annot_info(aid))) print(ut.obj_str(ibs.get_annot_info(aid, default=True, nl=True))) return aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous
def get_name_linked_imagesets_by_imgsetid(ibs, imgsetid): import utool as ut #gid_list = ibs.get_imageset_gids(imgsetid) aid_list_ = ibs.get_imageset_aids(imgsetid) aid_list = ut.filterfalse_items(aid_list_, ibs.is_aid_unknown(aid_list_)) #all(ibs.db.check_rowid_exists(const.ANNOTATION_TABLE, aid_list)) #aids_list2 = ibs.get_image_aids(gid_list) #assert ut.flatten(aids_list2) == aids_list1 nid_list = list(set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False))) # remove unknown annots name_imgsetids = ibs.get_name_imgsetids(nid_list) name_imagesettexts = ibs.get_imageset_text(name_imgsetids) return name_imagesettexts
def get_name_linked_imagesets_by_imgsetid(ibs, imgsetid): import utool as ut #gid_list = ibs.get_imageset_gids(imgsetid) aid_list_ = ibs.get_imageset_aids(imgsetid) aid_list = ut.filterfalse_items(aid_list_, ibs.is_aid_unknown(aid_list_)) #all(ibs.db.check_rowid_exists(const.ANNOTATION_TABLE, aid_list)) #aids_list2 = ibs.get_image_aids(gid_list) #assert ut.flatten(aids_list2) == aids_list1 nid_list = list( set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False))) # remove unknown annots name_imgsetids = ibs.get_name_imgsetids(nid_list) name_imagesettexts = ibs.get_imageset_text(name_imgsetids) return name_imagesettexts
def imageset_annot_demographics_processed(ibs, aid_list): nid_list = ibs.get_annot_nids(aid_list) flag_list = [nid < 0 for nid in nid_list] aid_list_ = ut.filterfalse_items(aid_list, flag_list) sex_list = ibs.get_annot_sex(aid_list_) sex_list = [-2 if sex is None else sex for sex in sex_list] sex_dict = {aid: sex >= 0 for aid, sex in zip(aid_list_, sex_list)} age_list = ibs.get_annot_age_months_est(aid_list_) age_dict = { aid: -1 not in age and age.count(None) < 2 for aid, age in zip(aid_list_, age_list) } annots_reviewed = [ sex_dict.get(aid, True) and age_dict.get(aid, True) for aid in aid_list ] return annots_reviewed
def __init__(split_index, ibs, daid_list, num_forests=8): print('[nnsindex] make NNSplitIndex over %d annots' % (len(daid_list),)) aid_list = daid_list nid_list = ibs.get_annot_nids(aid_list) #flag_list = ibs.get_annot_exemplar_flag(aid_list) nid2_aids = utool.group_items(aid_list, nid_list) key_list = nid2_aids.keys() aids_list = nid2_aids.values() isunknown_list = ibs.is_nid_unknown(key_list) known_aids = utool.filterfalse_items(aids_list, isunknown_list) uknown_aids = utool.flatten(utool.filter_items(aids_list, isunknown_list)) num_forests_ = min(max(map(len, aids_list)), num_forests) # Put one name per forest forest_aids, overflow_aids = utool.sample_zip(known_aids, num_forests_, allow_overflow=True, per_bin=1) forest_indexes = [] extra_indexes = [] for tx, aids in enumerate(forest_aids): print('[nnsindex] building forest %d/%d with %d aids' % (tx + 1, num_forests_, len(aids))) if len(aids) > 0: nn_index = NNIndex(ibs, aids) forest_indexes.append(nn_index) if len(overflow_aids) > 0: print('[nnsindex] building overflow forest') overflow_index = NNIndex(ibs, overflow_aids) extra_indexes.append(overflow_index) if len(uknown_aids) > 0: print('[nnsindex] building unknown forest') unknown_index = NNIndex(ibs, uknown_aids) extra_indexes.append(unknown_index) #print('[nnsindex] building normalizer forest') # TODO split_index.forest_indexes = forest_indexes split_index.extra_indexes = extra_indexes
def get_test_qaids(ibs, default_qaids=None, return_annot_info=False, aidcfg=None): """ Gets test annot_rowids based on command line arguments DEPRICATE Args: ibs (IBEISController): ibeis controller object default_qaids (None): if list then used only if no other aids are available (default = [1]) as a string it mimics the command line Returns: list: available_qaids CommandLine: python -m ibeis.init.main_helpers --test-get_test_qaids python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qaid 1 python -m ibeis.init.main_helpers --test-get_test_qaids --allgt --db PZ_MTEST python -m ibeis.init.main_helpers --test-get_test_qaids --qaid 4 5 8 --verbmhelp python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2 --verbmhelp python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2 python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qindex 0:10 --verbmhelp python -m ibeis.init.main_helpers --exec-get_test_qaids --controlled --db PZ_Master0 --exec-mode python -m ibeis.init.main_helpers --exec-get_test_qaids --db testdb1 --allgt --qindex 0:256 Example: >>> # ENABLE_DOCTEST >>> from ibeis.init.main_helpers import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> default_qaids = None >>> available_qaids = get_test_qaids(ibs, default_qaids) >>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_qaids, with_contrib=False, short=True) >>> result = 'available_qaids = ' + ut.obj_str(available_qaids, truncate=True, nl=False) >>> print('len(available_qaids) = %d' % len(available_qaids)) >>> print(result) available_qaids = [1] """ qaid_request_info = {} if VERB_MAIN_HELPERS: print('[get_test_qaids] + --- GET_TEST_QAIDS ---') # Old version of this function if VERB_MAIN_HELPERS: print('[get_test_qaids] + --- GET_TEST_QAIDS ---') print('[get_test_qaids] * default_qaids = %s' % (ut.obj_str(default_qaids, truncate=True, nl=False))) valid_aids = ibs.get_valid_aids() if len(valid_aids) == 0: print('[get_test_qaids] WARNING no annotations available') # ---- INCLUDING STEP if VERB_MAIN_HELPERS: print('[get_test_qaids] * include step') available_qaids = [] #ut.get_argflag(('--all-cases', '--all')) #ut.get_argflag(('--all-gt-cases', '--allgt')) #ut.get_argflag(('--all-hard-cases', '--allhard')) #ut.get_argflag(('--qaid', '--qaids')) #ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids') #not ut.get_argflag('--junk') ALL_CASES = params.args.all_cases or default_qaids == 'all' GT_CASES = params.args.all_gt_cases or default_qaids == 'gt' HARD_CASES = params.args.all_hard_cases or ut.get_argflag(('--all-hard-cases', '--allhard', '--hard')) NO_JUNK = not ut.get_argflag('--junk') CONTROLLED_CASES = ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids') NO_REVIEWED = ut.get_argflag('--unreviewed') species = ut.get_argval('--species') #QAID = params.args.qaid QAID = ut.get_argval('--qaid', type_='fuzzy_subset', default=None) QINDEX = params.args.qindex QSHUFFLE = ut.get_argval('--qshuffle') if QAID is not None: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including cmdline specified qaids') try: args_qaid = ensure_flatlistlike(QAID) except Exception: args_qaid = QAID available_qaids.extend(args_qaid) qaid_request_info['custom_commandline'] = args_qaid if ALL_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including all qaids') available_qaids.extend(valid_aids) qaid_request_info['all_cases'] = True if HARD_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including hard qaids') is_hard_list = ibs.get_annot_is_hard(valid_aids) hard_aids = ut.compress(valid_aids, is_hard_list) available_qaids.extend(hard_aids) qaid_request_info['hard_cases'] = True if GT_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including groundtruth qaids') has_gt_list = ibs.get_annot_has_groundtruth(valid_aids) hasgt_aids = ut.compress(valid_aids, has_gt_list) print('[get_test_qaids] Adding all %d/%d ground-truthed test cases' % (len(hasgt_aids), len(valid_aids))) available_qaids.extend(hasgt_aids) qaid_request_info['gt_cases'] = True if CONTROLLED_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including controlled qaids') from ibeis import ibsfuncs # Override all other gts with controlled controlled_qaids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=True) available_qaids.extend(controlled_qaids) qaid_request_info['controlled'] = True else: qaid_request_info['controlled'] = False # ---- CHECK_DEFAULTS QUERY if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) if len(available_qaids) == 0: print('[get_test_qaids] * ... defaulting, no available qaids on command line.') if default_qaids is None: default_qaids = valid_aids[0:1] qaid_request_info['default_one'] = True elif isinstance(default_qaids, six.string_types): if default_qaids == 'gt' or default_qaids == 'allgt': default_qaids = ibs.get_valid_aids(hasgt=True) qaid_request_info['default_gt'] = True available_qaids = default_qaids else: if VERB_MAIN_HELPERS: print('[get_test_qaids] * ... not defaulting') available_qaids = ut.unique_keep_order(available_qaids) # ---- EXCLUSION STEP if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) print('[get_test_qaids] * exclude step') if NO_JUNK: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Filtering junk') available_qaids = ibs.filter_junk_annotations(available_qaids) qaid_request_info['has_junk'] = False if NO_REVIEWED: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Filtering unreviewed') isreviewed_list = ibs.get_annot_has_reviewed_matching_aids(available_qaids) available_qaids = ut.filterfalse_items(available_qaids, isreviewed_list) qaid_request_info['has_unreviewed'] = False if species is not None: if species == 'primary': if VERB_MAIN_HELPERS: print('[get_test_qaids] * Finiding primary species') #species = ibs.get_primary_database_species(available_qaids) species = ibs.get_primary_database_species() qaid_request_info['primary_species'] = True if VERB_MAIN_HELPERS: print('[get_test_qaids] * Filtering to species=%r' % (species,)) isvalid_list = np.array(ibs.get_annot_species(available_qaids)) == species available_qaids = ut.compress(available_qaids, isvalid_list) qaid_request_info['species_filter'] = species if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) print('[get_test_qaids] * subindex step') # ---- INDEX SUBSET #ut.get_argval('--qshuffle') if QSHUFFLE: # Determenistic shuffling available_qaids = ut.take(available_qaids, ut.random_indexes(len(available_qaids), seed=42)) qaid_request_info['shuffled'] = True # Sample a large pool of chosen query qindexes if QINDEX is not None: # FIXME: should use a slice of the list or a sublist qindexes = ensure_flatlistlike(QINDEX) _test_qaids = [available_qaids[qx] for qx in qindexes if qx < len(available_qaids)] print('[get_test_qaids] Chose subset of size %d/%d' % (len(_test_qaids), len(available_qaids))) available_qaids = _test_qaids qaid_request_info['subset'] = qindexes if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) print('[get_test_qaids] L ___ GET_TEST_QAIDS ___') if return_annot_info: return available_qaids, qaid_request_info else: return available_qaids
def setup_incremental_test(ibs_gt, clear_names=True, aid_order='shuffle'): r""" CommandLine: python -m ibeis.algo.hots.automated_helpers --test-setup_incremental_test:0 python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va --index 0 4 8 --verbose Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('PZ_MTEST') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('GZ_ALL') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) """ print('\n\n---- SETUP INCREMENTAL TEST ---\n\n') # Take a known dataase # Create an empty database to test in ONLY_GT = True if ONLY_GT: # use only annotations that will have matches in test aid_list1_ = ibs_gt.get_aids_with_groundtruth() else: # use every annotation in test aid_list1_ = ibs_gt.get_valid_aids() if ut.get_argflag('--gzdev'): # Use a custom selection of gzall from ibeis.algo.hots import devcases assert ibs_gt.get_dbname() == 'GZ_ALL', 'not gzall' vuuid_list, ignore_vuuids = devcases.get_gzall_small_test() # TODO; include all names of these annots too aid_list = ibs_gt.get_annot_aids_from_visual_uuid(vuuid_list) ignore_aid_list = ibs_gt.get_annot_aids_from_visual_uuid(ignore_vuuids) ignore_nid_list = ibs_gt.get_annot_nids(ignore_aid_list) ut.assert_all_not_None(aid_list) other_aids = ut.flatten(ibs_gt.get_annot_groundtruth(aid_list)) aid_list.extend(other_aids) aid_list = sorted(set(aid_list)) nid_list = ibs_gt.get_annot_nids(aid_list) isinvalid_list = [nid in ignore_nid_list for nid in nid_list] print('Filtering %r annots specified to ignore' % (sum(isinvalid_list),)) aid_list = ut.filterfalse_items(aid_list, isinvalid_list) #ut.embed() aid_list1_ = aid_list #ut.embed() # Add aids in a random order VALID_ORDERS = ['shuffle', 'stagger', 'same'] #AID_ORDER = 'shuffle' aid_order = ut.get_argval('--aid-order', default=aid_order) assert VALID_ORDERS.index(aid_order) > -1 if aid_order == 'shuffle': aid_list1 = ut.deterministic_shuffle(aid_list1_[:]) elif aid_order == 'stagger': from six.moves import zip_longest, filter aid_groups, unique_nid_list = ibs_gt.group_annots_by_name(aid_list1_) def stagger_group(list_): return ut.filter_Nones(ut.iflatten(zip_longest(*list_))) aid_multiton_group = list(filter(lambda aids: len(aids) > 1, aid_groups)) aid_list1 = stagger_group(aid_multiton_group) pass elif aid_order == 'same': aid_list1 = aid_list1_ # If reset is true the test database is started completely from scratch reset = ut.get_argflag('--reset') aid1_to_aid2 = {} # annotation mapping ibs2 = make_incremental_test_database(ibs_gt, aid_list1, reset) # Preadd all annotatinos to the test database aids_chunk1 = aid_list1 aid_list2 = add_annot_chunk(ibs_gt, ibs2, aids_chunk1, aid1_to_aid2) #ut.embed() # Assert annotation visual uuids are in agreement if ut.DEBUG2: annot_testdb_consistency_checks(ibs_gt, ibs2, aid_list1, aid_list2) # Remove names and exemplar information from test database if clear_names: ensure_testdb_clean_data(ibs_gt, ibs2, aid_list1, aid_list2) # Preprocess features before testing ibs2.ensure_annotation_data(aid_list2, featweights=True) return ibs2, aid_list1, aid1_to_aid2
def sort_module_functions(): from os.path import dirname, join import utool as ut import ibeis.control import re #import re #regex = r'[^@]*\ndef' modfpath = dirname(ibeis.control.__file__) fpath = join(modfpath, 'manual_annot_funcs.py') #fpath = join(modfpath, 'manual_dependant_funcs.py') #fpath = join(modfpath, 'manual_lblannot_funcs.py') #fpath = join(modfpath, 'manual_name_species_funcs.py') text = ut.read_from(fpath, verbose=False) lines = text.splitlines() indent_list = [ut.get_indentation(line) for line in lines] isfunc_list = [line.startswith('def ') for line in lines] isblank_list = [len(line.strip(' ')) == 0 for line in lines] isdec_list = [line.startswith('@') for line in lines] tmp = ['def' if isfunc else indent for isfunc, indent in zip(isfunc_list, indent_list)] tmp = ['b' if isblank else t for isblank, t in zip(isblank_list, tmp)] tmp = ['@' if isdec else t for isdec, t in zip(isdec_list, tmp)] #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)])) block_list = re.split('\n\n\n', text, flags=re.MULTILINE) #for block in block_list: # print('#====') # print(block) isfunc_list = [re.search('^def ', block, re.MULTILINE) is not None for block in block_list] whole_varname = ut.whole_word(ut.REGEX_VARNAME) funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname) def findfuncname(block): match = re.search(funcname_regex, block) return match.group('funcname') funcnameblock_list = [findfuncname(block) if isfunc else None for isfunc, block in zip(isfunc_list, block_list)] funcblock_list = ut.filter_items(block_list, isfunc_list) funcname_list = ut.filter_items(funcnameblock_list, isfunc_list) nonfunc_list = ut.filterfalse_items(block_list, isfunc_list) nonfunc_list = ut.filterfalse_items(block_list, isfunc_list) ismain_list = [re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None for nonfunc in nonfunc_list] mainblock_list = ut.filter_items(nonfunc_list, ismain_list) nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list) newtext_list = [] for nonfunc in nonfunc_list: newtext_list.append(nonfunc) newtext_list.append('\n') #funcname_list for funcblock in ut.sortedby(funcblock_list, funcname_list): newtext_list.append(funcblock) newtext_list.append('\n') for mainblock in mainblock_list: newtext_list.append(mainblock) newtext = '\n'.join(newtext_list) print('newtext = %s' % (newtext,)) print('len(newtext) = %r' % (len(newtext),)) print('len(text) = %r' % (len(text),)) backup_fpath = ut.augpath(fpath, augext='.bak', augdir='_backup', ensure=True) ut.write_to(backup_fpath, text) ut.write_to(fpath, newtext)
def sort_module_functions(): from os.path import dirname, join import utool as ut import ibeis.control import re #import re #regex = r'[^@]*\ndef' modfpath = dirname(ibeis.control.__file__) fpath = join(modfpath, 'manual_annot_funcs.py') #fpath = join(modfpath, 'manual_dependant_funcs.py') #fpath = join(modfpath, 'manual_lblannot_funcs.py') #fpath = join(modfpath, 'manual_name_species_funcs.py') text = ut.read_from(fpath, verbose=False) lines = text.splitlines() indent_list = [ut.get_indentation(line) for line in lines] isfunc_list = [line.startswith('def ') for line in lines] isblank_list = [len(line.strip(' ')) == 0 for line in lines] isdec_list = [line.startswith('@') for line in lines] tmp = [ 'def' if isfunc else indent for isfunc, indent in zip(isfunc_list, indent_list) ] tmp = ['b' if isblank else t for isblank, t in zip(isblank_list, tmp)] tmp = ['@' if isdec else t for isdec, t in zip(isdec_list, tmp)] #print('\n'.join([str((t, count + 1)) for (count, t) in enumerate(tmp)])) block_list = re.split('\n\n\n', text, flags=re.MULTILINE) #for block in block_list: # print('#====') # print(block) isfunc_list = [ re.search('^def ', block, re.MULTILINE) is not None for block in block_list ] whole_varname = ut.whole_word(ut.REGEX_VARNAME) funcname_regex = r'def\s+' + ut.named_field('funcname', whole_varname) def findfuncname(block): match = re.search(funcname_regex, block) return match.group('funcname') funcnameblock_list = [ findfuncname(block) if isfunc else None for isfunc, block in zip(isfunc_list, block_list) ] funcblock_list = ut.filter_items(block_list, isfunc_list) funcname_list = ut.filter_items(funcnameblock_list, isfunc_list) nonfunc_list = ut.filterfalse_items(block_list, isfunc_list) nonfunc_list = ut.filterfalse_items(block_list, isfunc_list) ismain_list = [ re.search('^if __name__ == ["\']__main__["\']', nonfunc) is not None for nonfunc in nonfunc_list ] mainblock_list = ut.filter_items(nonfunc_list, ismain_list) nonfunc_list = ut.filterfalse_items(nonfunc_list, ismain_list) newtext_list = [] for nonfunc in nonfunc_list: newtext_list.append(nonfunc) newtext_list.append('\n') #funcname_list for funcblock in ut.sortedby(funcblock_list, funcname_list): newtext_list.append(funcblock) newtext_list.append('\n') for mainblock in mainblock_list: newtext_list.append(mainblock) newtext = '\n'.join(newtext_list) print('newtext = %s' % (newtext, )) print('len(newtext) = %r' % (len(newtext), )) print('len(text) = %r' % (len(text), )) backup_fpath = ut.augpath(fpath, augext='.bak', augdir='_backup', ensure=True) ut.write_to(backup_fpath, text) ut.write_to(fpath, newtext)
def compute_or_read_chip_images(ibs, cid_list, ensure=True, config2_=None): """Reads chips and tries to compute them if they do not exist Args: ibs (IBEISController): cid_list (list): ensure (bool): Returns: chip_list CommandLine: python -m ibeis.algo.preproc.preproc_chip --test-compute_or_read_chip_images Example: >>> # SLOW_DOCTEST >>> from ibeis.algo.preproc.preproc_chip import * # NOQA >>> from ibeis.algo.preproc import preproc_chip >>> import numpy as np >>> ibs, aid_list = testdata_ibeis() >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=True) >>> chip_list = compute_or_read_chip_images(ibs, cid_list) >>> result = np.array(list(map(np.shape, chip_list))).sum(0).tolist() >>> print(result) [1434, 2274, 12] Example: >>> # SLOW_DOCTEST >>> from ibeis.algo.preproc.preproc_chip import * # NOQA >>> import numpy as np >>> ibs, aid_list = testdata_ibeis() >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=True) >>> # Do a bad thing. Remove from disk without removing from sql >>> on_delete(ibs, cid_list) >>> # Now compute_or_read_chip_images should catch the bad thing >>> # we did and correct for it. >>> chip_list = compute_or_read_chip_images(ibs, cid_list) >>> result = np.array(list(map(np.shape, chip_list))).sum(0).tolist() >>> print(result) [1434, 2274, 12] """ cfpath_list = ibs.get_chip_fpath(cid_list) try: if ensure: try: ut.assert_all_not_None(cid_list, "cid_list") except AssertionError as ex: ut.printex(ex, key_list=["cid_list"]) raise else: chip_list = [vt.imread(cfpath) for cfpath in cfpath_list] else: chip_list = [None if cfpath is None else vt.imread(cfpath) for cfpath in cfpath_list] except IOError as ex: if not ut.QUIET: ut.printex(ex, "[preproc_chip] Handing Exception: ", iswarning=True) # Remove bad annotations from the sql database aid_list = ibs.get_chip_aids(cid_list) valid_list = [cid is not None for cid in cid_list] valid_aids = ut.compress(aid_list, valid_list) valid_cfpaths = ut.compress(cfpath_list, valid_list) invalid_aids = ut.filterfalse_items(valid_aids, map(exists, valid_cfpaths)) ibs.delete_annot_chips(invalid_aids) # Try readding things new_cid_list = ibs.add_annot_chips(aid_list) cfpath_list = ibs.get_chip_fpath(new_cid_list) chip_list = [vt.imread(cfpath) for cfpath in cfpath_list] return chip_list
def turk_additional(): ibs = current_app.ibs imgsetid = request.args.get('imgsetid', '') imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid) gid_list = ibs.get_valid_gids(imgsetid=imgsetid) aid_list = ut.flatten(ibs.get_image_aids(gid_list)) nid_list = ibs.get_annot_nids(aid_list) reviewed_list = appf.imageset_annot_additional_processed(ibs, aid_list, nid_list) try: progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), ) except ZeroDivisionError: progress = '0.00' imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid) aid = request.args.get('aid', '') if len(aid) > 0: aid = int(aid) else: aid_list_ = ut.filterfalse_items(aid_list, reviewed_list) if len(aid_list_) == 0: aid = None else: # aid = aid_list_[0] aid = random.choice(aid_list_) previous = request.args.get('previous', None) value_sex = ibs.get_annot_sex([aid])[0] if value_sex >= 0: value_sex += 2 else: value_sex = None value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0] value_age = None if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None): value_age = 1 if (value_age_min is 0 or value_age_min is None) and value_age_max == 2: value_age = 2 elif value_age_min is 3 and value_age_max == 5: value_age = 3 elif value_age_min is 6 and value_age_max == 11: value_age = 4 elif value_age_min is 12 and value_age_max == 23: value_age = 5 elif value_age_min is 24 and value_age_max == 35: value_age = 6 elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None): value_age = 7 review = 'review' in request.args.keys() finished = aid is None display_instructions = request.cookies.get('additional_instructions_seen', 1) == 0 if not finished: gid = ibs.get_annot_gids(aid) gpath = ibs.get_annot_chip_fpath(aid) image = vt.imread(gpath) image_src = appf.embed_image_html(image) else: gid = None gpath = None image_src = None name_aid_list = None nid = ibs.get_annot_name_rowids(aid) if nid is not None: name_aid_list = ibs.get_name_aids(nid) quality_list = ibs.get_annot_qualities(name_aid_list) quality_text_list = ibs.get_annot_quality_texts(name_aid_list) yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list) name_aid_combined_list = list(zip( name_aid_list, quality_list, quality_text_list, yaw_text_list, )) name_aid_combined_list.sort(key=lambda t: t[1], reverse=True) else: name_aid_combined_list = [] return appf.template('turk', 'additional', imgsetid=imgsetid, gid=gid, aid=aid, value_sex=value_sex, value_age=value_age, image_path=gpath, name_aid_combined_list=name_aid_combined_list, image_src=image_src, previous=previous, imagesettext=imagesettext, progress=progress, finished=finished, display_instructions=display_instructions, review=review)
def setup_incremental_test(ibs_gt, clear_names=True, aid_order='shuffle'): r""" CommandLine: python -m ibeis.algo.hots.automated_helpers --test-setup_incremental_test:0 python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va --index 0 4 8 --verbose Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('PZ_MTEST') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('GZ_ALL') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) """ print('\n\n---- SETUP INCREMENTAL TEST ---\n\n') # Take a known dataase # Create an empty database to test in ONLY_GT = True if ONLY_GT: # use only annotations that will have matches in test aid_list1_ = ibs_gt.get_aids_with_groundtruth() else: # use every annotation in test aid_list1_ = ibs_gt.get_valid_aids() if ut.get_argflag('--gzdev'): # Use a custom selection of gzall from ibeis.algo.hots import devcases assert ibs_gt.get_dbname() == 'GZ_ALL', 'not gzall' vuuid_list, ignore_vuuids = devcases.get_gzall_small_test() # TODO; include all names of these annots too aid_list = ibs_gt.get_annot_aids_from_visual_uuid(vuuid_list) ignore_aid_list = ibs_gt.get_annot_aids_from_visual_uuid(ignore_vuuids) ignore_nid_list = ibs_gt.get_annot_nids(ignore_aid_list) ut.assert_all_not_None(aid_list) other_aids = ut.flatten(ibs_gt.get_annot_groundtruth(aid_list)) aid_list.extend(other_aids) aid_list = sorted(set(aid_list)) nid_list = ibs_gt.get_annot_nids(aid_list) isinvalid_list = [nid in ignore_nid_list for nid in nid_list] print('Filtering %r annots specified to ignore' % (sum(isinvalid_list), )) aid_list = ut.filterfalse_items(aid_list, isinvalid_list) #ut.embed() aid_list1_ = aid_list #ut.embed() # Add aids in a random order VALID_ORDERS = ['shuffle', 'stagger', 'same'] #AID_ORDER = 'shuffle' aid_order = ut.get_argval('--aid-order', default=aid_order) assert VALID_ORDERS.index(aid_order) > -1 if aid_order == 'shuffle': aid_list1 = ut.deterministic_shuffle(aid_list1_[:]) elif aid_order == 'stagger': from six.moves import zip_longest, filter aid_groups, unique_nid_list = ibs_gt.group_annots_by_name(aid_list1_) def stagger_group(list_): return ut.filter_Nones(ut.iflatten(zip_longest(*list_))) aid_multiton_group = list( filter(lambda aids: len(aids) > 1, aid_groups)) aid_list1 = stagger_group(aid_multiton_group) pass elif aid_order == 'same': aid_list1 = aid_list1_ # If reset is true the test database is started completely from scratch reset = ut.get_argflag('--reset') aid1_to_aid2 = {} # annotation mapping ibs2 = make_incremental_test_database(ibs_gt, aid_list1, reset) # Preadd all annotatinos to the test database aids_chunk1 = aid_list1 aid_list2 = add_annot_chunk(ibs_gt, ibs2, aids_chunk1, aid1_to_aid2) #ut.embed() # Assert annotation visual uuids are in agreement if ut.DEBUG2: annot_testdb_consistency_checks(ibs_gt, ibs2, aid_list1, aid_list2) # Remove names and exemplar information from test database if clear_names: ensure_testdb_clean_data(ibs_gt, ibs2, aid_list1, aid_list2) # Preprocess features before testing ibs2.ensure_annotation_data(aid_list2, featweights=True) return ibs2, aid_list1, aid1_to_aid2
def compute_or_read_chip_images(ibs, cid_list, ensure=True, config2_=None): """Reads chips and tries to compute them if they do not exist Args: ibs (IBEISController): cid_list (list): ensure (bool): Returns: chip_list CommandLine: python -m ibeis.algo.preproc.preproc_chip --test-compute_or_read_chip_images Example: >>> # SLOW_DOCTEST >>> from ibeis.algo.preproc.preproc_chip import * # NOQA >>> from ibeis.algo.preproc import preproc_chip >>> import numpy as np >>> ibs, aid_list = testdata_ibeis() >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=True) >>> chip_list = compute_or_read_chip_images(ibs, cid_list) >>> result = np.array(list(map(np.shape, chip_list))).sum(0).tolist() >>> print(result) [1434, 2274, 12] Example: >>> # SLOW_DOCTEST >>> from ibeis.algo.preproc.preproc_chip import * # NOQA >>> import numpy as np >>> ibs, aid_list = testdata_ibeis() >>> cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=True) >>> # Do a bad thing. Remove from disk without removing from sql >>> on_delete(ibs, cid_list) >>> # Now compute_or_read_chip_images should catch the bad thing >>> # we did and correct for it. >>> chip_list = compute_or_read_chip_images(ibs, cid_list) >>> result = np.array(list(map(np.shape, chip_list))).sum(0).tolist() >>> print(result) [1434, 2274, 12] """ cfpath_list = ibs.get_chip_fpath(cid_list) try: if ensure: try: ut.assert_all_not_None(cid_list, 'cid_list') except AssertionError as ex: ut.printex(ex, key_list=['cid_list']) raise else: chip_list = [vt.imread(cfpath) for cfpath in cfpath_list] else: chip_list = [ None if cfpath is None else vt.imread(cfpath) for cfpath in cfpath_list ] except IOError as ex: if not ut.QUIET: ut.printex(ex, '[preproc_chip] Handing Exception: ', iswarning=True) # Remove bad annotations from the sql database aid_list = ibs.get_chip_aids(cid_list) valid_list = [cid is not None for cid in cid_list] valid_aids = ut.compress(aid_list, valid_list) valid_cfpaths = ut.compress(cfpath_list, valid_list) invalid_aids = ut.filterfalse_items(valid_aids, map(exists, valid_cfpaths)) ibs.delete_annot_chips(invalid_aids) # Try readding things new_cid_list = ibs.add_annot_chips(aid_list) cfpath_list = ibs.get_chip_fpath(new_cid_list) chip_list = [vt.imread(cfpath) for cfpath in cfpath_list] return chip_list
def get_test_qaids(ibs, default_qaids=None, return_annot_info=False, aidcfg=None): """ Gets test annot_rowids based on command line arguments DEPRICATE Args: ibs (IBEISController): ibeis controller object default_qaids (None): if list then used only if no other aids are available (default = [1]) as a string it mimics the command line Returns: list: available_qaids CommandLine: python -m ibeis.init.main_helpers --test-get_test_qaids python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qaid 1 python -m ibeis.init.main_helpers --test-get_test_qaids --allgt --db PZ_MTEST python -m ibeis.init.main_helpers --test-get_test_qaids --qaid 4 5 8 --verbmhelp python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2 --verbmhelp python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_MTEST --qaid 2 python -m ibeis.init.main_helpers --test-get_test_qaids --controlled --db PZ_Master0 --qindex 0:10 --verbmhelp python -m ibeis.init.main_helpers --exec-get_test_qaids --controlled --db PZ_Master0 --exec-mode python -m ibeis.init.main_helpers --exec-get_test_qaids --db testdb1 --allgt --qindex 0:256 Example: >>> # ENABLE_DOCTEST >>> from ibeis.init.main_helpers import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> default_qaids = None >>> available_qaids = get_test_qaids(ibs, default_qaids) >>> ibeis.other.dbinfo.get_dbinfo(ibs, aid_list=available_qaids, with_contrib=False, short=True) >>> result = 'available_qaids = ' + ut.obj_str(available_qaids, truncate=True, nl=False) >>> print('len(available_qaids) = %d' % len(available_qaids)) >>> print(result) available_qaids = [1] """ qaid_request_info = {} if VERB_MAIN_HELPERS: print('[get_test_qaids] + --- GET_TEST_QAIDS ---') # Old version of this function if VERB_MAIN_HELPERS: print('[get_test_qaids] + --- GET_TEST_QAIDS ---') print('[get_test_qaids] * default_qaids = %s' % (ut.obj_str(default_qaids, truncate=True, nl=False))) valid_aids = ibs.get_valid_aids() if len(valid_aids) == 0: print('[get_test_qaids] WARNING no annotations available') # ---- INCLUDING STEP if VERB_MAIN_HELPERS: print('[get_test_qaids] * include step') available_qaids = [] #ut.get_argflag(('--all-cases', '--all')) #ut.get_argflag(('--all-gt-cases', '--allgt')) #ut.get_argflag(('--all-hard-cases', '--allhard')) #ut.get_argflag(('--qaid', '--qaids')) #ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids') #not ut.get_argflag('--junk') ALL_CASES = params.args.all_cases or default_qaids == 'all' GT_CASES = params.args.all_gt_cases or default_qaids == 'gt' HARD_CASES = params.args.all_hard_cases or ut.get_argflag(('--all-hard-cases', '--allhard', '--hard')) NO_JUNK = not ut.get_argflag('--junk') CONTROLLED_CASES = ut.get_argflag('--controlled') or ut.get_argflag('--controlled_qaids') NO_REVIEWED = ut.get_argflag('--unreviewed') species = ut.get_argval('--species') #QAID = params.args.qaid QAID = ut.get_argval('--qaid', type_='fuzzy_subset', default=None) QINDEX = params.args.qindex QSHUFFLE = ut.get_argval('--qshuffle') if QAID is not None: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including cmdline specified qaids') try: args_qaid = ensure_flatlistlike(QAID) except Exception: args_qaid = QAID available_qaids.extend(args_qaid) qaid_request_info['custom_commandline'] = args_qaid if ALL_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including all qaids') available_qaids.extend(valid_aids) qaid_request_info['all_cases'] = True if HARD_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including hard qaids') is_hard_list = ibs.get_annot_is_hard(valid_aids) hard_aids = ut.compress(valid_aids, is_hard_list) available_qaids.extend(hard_aids) qaid_request_info['hard_cases'] = True if GT_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including groundtruth qaids') has_gt_list = ibs.get_annot_has_groundtruth(valid_aids) hasgt_aids = ut.compress(valid_aids, has_gt_list) print('[get_test_qaids] Adding all %d/%d ground-truthed test cases' % (len(hasgt_aids), len(valid_aids))) available_qaids.extend(hasgt_aids) qaid_request_info['gt_cases'] = True if CONTROLLED_CASES: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Including controlled qaids') from ibeis.other import ibsfuncs # Override all other gts with controlled controlled_qaids = ibsfuncs.get_two_annots_per_name_and_singletons(ibs, onlygt=True) available_qaids.extend(controlled_qaids) qaid_request_info['controlled'] = True else: qaid_request_info['controlled'] = False # ---- CHECK_DEFAULTS QUERY if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) if len(available_qaids) == 0: print('[get_test_qaids] * ... defaulting, no available qaids on command line.') if default_qaids is None: default_qaids = valid_aids[0:1] qaid_request_info['default_one'] = True elif isinstance(default_qaids, six.string_types): if default_qaids == 'gt' or default_qaids == 'allgt': default_qaids = ibs.get_valid_aids(hasgt=True) qaid_request_info['default_gt'] = True available_qaids = default_qaids else: if VERB_MAIN_HELPERS: print('[get_test_qaids] * ... not defaulting') available_qaids = ut.unique_ordered(available_qaids) # ---- EXCLUSION STEP if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) print('[get_test_qaids] * exclude step') if NO_JUNK: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Filtering junk') available_qaids = ibs.filter_junk_annotations(available_qaids) qaid_request_info['has_junk'] = False if NO_REVIEWED: if VERB_MAIN_HELPERS: print('[get_test_qaids] * Filtering unreviewed') isreviewed_list = ibs.get_annot_has_reviewed_matching_aids(available_qaids) available_qaids = ut.filterfalse_items(available_qaids, isreviewed_list) qaid_request_info['has_unreviewed'] = False if species is not None: if species == 'primary': if VERB_MAIN_HELPERS: print('[get_test_qaids] * Finiding primary species') #species = ibs.get_primary_database_species(available_qaids) species = ibs.get_primary_database_species() qaid_request_info['primary_species'] = True if VERB_MAIN_HELPERS: print('[get_test_qaids] * Filtering to species=%r' % (species,)) isvalid_list = np.array(ibs.get_annot_species(available_qaids)) == species available_qaids = ut.compress(available_qaids, isvalid_list) qaid_request_info['species_filter'] = species if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) print('[get_test_qaids] * subindex step') # ---- INDEX SUBSET #ut.get_argval('--qshuffle') if QSHUFFLE: # Determenistic shuffling available_qaids = ut.take(available_qaids, ut.random_indexes(len(available_qaids), seed=42)) qaid_request_info['shuffled'] = True # Sample a large pool of chosen query qindexes if QINDEX is not None: # FIXME: should use a slice of the list or a sublist qindexes = ensure_flatlistlike(QINDEX) _test_qaids = [available_qaids[qx] for qx in qindexes if qx < len(available_qaids)] print('[get_test_qaids] Chose subset of size %d/%d' % (len(_test_qaids), len(available_qaids))) available_qaids = _test_qaids qaid_request_info['subset'] = qindexes if VERB_MAIN_HELPERS: print('[get_test_qaids] * len(available_qaids) = %r' % (len(available_qaids))) print('[get_test_qaids] L ___ GET_TEST_QAIDS ___') if return_annot_info: return available_qaids, qaid_request_info else: return available_qaids
def assign_parts_one_image(ibs, aid_list, cutoff_score=0.5): r""" Main assigner method; makes assignments on all_aids based on assigner scores. Args: ibs (IBEISController): IBEIS / WBIA controller object aid_list (int): aids in question cutoff_score: the threshold for the aids' assigner scores, under which no assignments are made Returns: tuple of two lists: all_assignments (a list of tuples, each tuple grouping aids assigned to a single animal), and all_unassigned_aids, which are the aids that did not meet the cutoff_score or whose body/part CommandLine: python -m wbia.algo.detect.assigner _are_part_annots Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> from wbia.algo.detect.assigner import * >>> from wbia.algo.detect.train_assigner import * >>> ibs = assigner_testdb_ibs() >>> gid = 1 >>> aids = ibs.get_image_aids(gid) >>> result = ibs.assign_parts_one_image(aids) >>> assigned_pairs = result[0] >>> unassigned_aids = result[1] >>> assigned_aids = [item for pair in assigned_pairs for item in pair] >>> # no overlap between assigned and unassigned aids >>> assert (set(assigned_aids) & set(unassigned_aids) == set({})) >>> # all aids are either assigned or unassigned >>> assert (set(assigned_aids) | set(unassigned_aids) == set(aids)) >>> ([(3, 1)], [2, 4]) """ all_species = ibs.get_annot_species(aid_list) # put unsupported species into the all_unassigned_aids list all_species_no_parts = [species.split('+')[0] for species in all_species] assign_flag_list = [ species in SPECIES_CONFIG_MAP.keys() for species in all_species_no_parts ] unassigned_aids_noconfig = ut.filterfalse_items(aid_list, assign_flag_list) aid_list = ut.compress(aid_list, assign_flag_list) are_part_aids = _are_part_annots(ibs, aid_list) part_aids = ut.compress(aid_list, are_part_aids) body_aids = ut.compress(aid_list, [not p for p in are_part_aids]) gids = ibs.get_annot_gids(list(set(part_aids)) + list(set(body_aids))) num_images = len(set(gids)) assert num_images <= 1, "assign_parts_one_image called on multiple images' aids" # parallel lists representing all possible part/body pairs all_pairs_parallel = _all_pairs_parallel(part_aids, body_aids) pair_parts, pair_bodies = all_pairs_parallel if len(pair_parts) > 0 and len(pair_bodies) > 0: assigner_features = ibs.depc_annot.get('assigner_viewpoint_features', all_pairs_parallel) # send all aids to this call just so it can find the right classifier model assigner_classifier = load_assigner_classifier(ibs, body_aids + part_aids) assigner_scores = assigner_classifier.predict_proba(assigner_features) # assigner_scores is a list of [P_false, P_true] probabilities which sum to 1, so here we just pare down to the true probabilities assigner_scores = [score[1] for score in assigner_scores] good_pairs, unassigned_aids = _make_assignments( pair_parts, pair_bodies, assigner_scores, cutoff_score) else: good_pairs = [] unassigned_aids = aid_list unassigned_aids = unassigned_aids_noconfig + unassigned_aids return good_pairs, unassigned_aids
def add_species(ibs, species_nice_list, species_text_list=None, species_code_list=None, species_uuid_list=None, species_note_list=None): r""" Adds a list of species. Returns: list: speciesid_list - species rowids RESTful: Method: POST URL: /api/species/ CommandLine: python -m ibeis.control.manual_species_funcs --test-add_species Example: >>> # ENABLE_DOCTEST >>> from ibeis.control.manual_species_funcs import * # NOQA >>> import ibeis >>> import utool as ut >>> ibs = ibeis.opendb('testdb1') >>> species_text_list = [ ... 'jaguar', 'zebra_plains', 'zebra_plains', '____', 'TYPO', ... '____', 'zebra_grevys', 'bear_polar'] >>> species_rowid_list = ibs.add_species(species_text_list) >>> print(ut.list_str(list(zip(species_text_list, species_rowid_list)))) >>> ibs.print_species_table() >>> species_text = ibs.get_species_texts(species_rowid_list) >>> # Ensure we leave testdb1 in a clean state >>> ibs.delete_species(ibs.get_species_rowids_from_text(['jaguar', 'TYPO'])) >>> all_species_rowids = ibs._get_all_species_rowids() >>> result = ut.list_str(species_text, nl=False) + '\n' >>> result += ut.list_str(all_species_rowids, nl=False) + '\n' >>> result += ut.list_str(ibs.get_species_texts(all_species_rowids), nl=False) >>> print(result) ['jaguar', 'zebra_plains', 'zebra_plains', '____', 'typo', '____', 'zebra_grevys', 'bear_polar'] [1, 2, 3] ['zebra_plains', 'zebra_grevys', 'bear_polar'] [u'jaguar', u'zebra_plains', u'zebra_plains', '____', '____', '____', u'zebra_grevys', u'bear_polar'] [8, 9, 10] [u'zebra_plains', u'zebra_grevys', u'bear_polar'] """ # Strip all spaces species_nice_list = [ _.strip() for _ in species_nice_list ] if species_text_list is None: species_text_list = _convert_species_nice_to_text(species_nice_list) if species_code_list is None: species_code_list = _convert_species_nice_to_code(species_nice_list) if species_note_list is None: species_note_list = [''] * len(species_text_list) if species_uuid_list is None: species_uuid_list = [uuid.uuid4() for _ in range(len(species_text_list))] # Sanatize to remove invalid names flag_list = np.array([ species_nice is None or species_nice.strip() in ['_', const.UNKNOWN, 'none', 'None', ''] for species_nice in species_nice_list ]) species_uuid_list = ut.filterfalse_items(species_uuid_list, flag_list) species_nice_list = ut.filterfalse_items(species_nice_list, flag_list) species_text_list = ut.filterfalse_items(species_text_list, flag_list) species_code_list = ut.filterfalse_items(species_code_list, flag_list) species_note_list = ut.filterfalse_items(species_note_list, flag_list) superkey_paramx = (1,) # TODO Allow for better ensure=False without using partial # Just autogenerate these functions get_rowid_from_superkey = functools.partial(ibs.get_species_rowids_from_text, ensure=False) colnames = [SPECIES_UUID, SPECIES_TEXT, SPECIES_NICE, SPECIES_CODE, SPECIES_NOTE] params_iter = list(zip(species_uuid_list, species_text_list, species_nice_list, species_code_list, species_note_list)) species_rowid_list = ibs.db.add_cleanly(const.SPECIES_TABLE, colnames, params_iter, get_rowid_from_superkey, superkey_paramx) temp_list = np.array([-1] * len(flag_list)) temp_list[flag_list == False] = np.array(species_rowid_list) # NOQA temp_list[flag_list == True] = const.UNKNOWN_SPECIES_ROWID # NOQA species_rowid_list = list(temp_list) assert -1 not in species_rowid_list # Clean species ibs._clean_species() return species_rowid_list
def add_species(ibs, species_nice_list, species_text_list=None, species_code_list=None, species_uuid_list=None, species_note_list=None, skip_cleaning=False): r""" Adds a list of species. Returns: list: speciesid_list - species rowids RESTful: Method: POST URL: /api/species/ CommandLine: python -m ibeis.control.manual_species_funcs --test-add_species Example: >>> # ENABLE_DOCTEST >>> from ibeis.control.manual_species_funcs import * # NOQA >>> import ibeis >>> import utool as ut >>> ibs = ibeis.opendb('testdb1') >>> species_text_list = [ ... 'jaguar', 'zebra_plains', 'zebra_plains', '____', 'TYPO', ... '____', 'zebra_grevys', 'bear_polar+head'] >>> species_rowid_list = ibs.add_species(species_text_list) >>> print(ut.repr2(list(zip(species_text_list, species_rowid_list)))) >>> ibs.print_species_table() >>> species_text = ibs.get_species_texts(species_rowid_list) >>> # Ensure we leave testdb1 in a clean state >>> ibs.delete_species(ibs.get_species_rowids_from_text(['jaguar', 'TYPO'])) >>> all_species_rowids = ibs._get_all_species_rowids() >>> result = ut.repr2(species_text, nl=False) + '\n' >>> result += ut.repr2(all_species_rowids, nl=False) + '\n' >>> result += ut.repr2(ibs.get_species_texts(all_species_rowids), nl=False) + '\n' >>> result += ut.repr2(ibs.get_species_codes(all_species_rowids), nl=False) >>> print(result) ['jaguar', 'zebra_plains', 'zebra_plains', '____', 'typo', '____', 'zebra_grevys', 'bear_polar+head'] [1, 2, 3, 6] ['zebra_plains', 'zebra_grevys', 'bear_polar', 'bear_polar+head'] ['PZ', 'GZ', 'PB', 'BP+H'] """ # Strip all spaces species_nice_list = [ const.UNKNOWN if _ is None else _.strip() for _ in species_nice_list ] if species_text_list is None: species_text_list = _convert_species_nice_to_text(species_nice_list) if species_code_list is None: species_code_list = _convert_species_nice_to_code(species_nice_list) if species_note_list is None: species_note_list = [''] * len(species_text_list) if species_uuid_list is None: species_uuid_list = [uuid.uuid4() for _ in range(len(species_text_list))] # Sanatize to remove invalid names flag_list = np.array([ species_nice is None or species_nice.strip() in ['_', const.UNKNOWN, 'none', 'None', ''] for species_nice in species_nice_list ]) species_uuid_list = ut.filterfalse_items(species_uuid_list, flag_list) species_nice_list = ut.filterfalse_items(species_nice_list, flag_list) species_text_list = ut.filterfalse_items(species_text_list, flag_list) species_code_list = ut.filterfalse_items(species_code_list, flag_list) species_note_list = ut.filterfalse_items(species_note_list, flag_list) superkey_paramx = (1,) # TODO Allow for better ensure=False without using partial # Just autogenerate these functions get_rowid_from_superkey = functools.partial(ibs.get_species_rowids_from_text, ensure=False) colnames = [SPECIES_UUID, SPECIES_TEXT, SPECIES_NICE, SPECIES_CODE, SPECIES_NOTE] params_iter = list(zip(species_uuid_list, species_text_list, species_nice_list, species_code_list, species_note_list)) species_rowid_list = ibs.db.add_cleanly(const.SPECIES_TABLE, colnames, params_iter, get_rowid_from_superkey, superkey_paramx) temp_list = np.array([-1] * len(flag_list)) temp_list[flag_list == False] = np.array(species_rowid_list) # NOQA temp_list[flag_list == True] = const.UNKNOWN_SPECIES_ROWID # NOQA species_rowid_list = list(temp_list) assert -1 not in species_rowid_list # Clean species if not skip_cleaning: species_mapping_dict = ibs._clean_species() if species_mapping_dict is not None: species_rowid_list = [ species_mapping_dict.get(species_rowid, species_rowid) for species_rowid in species_rowid_list ] return species_rowid_list
def turk_detection(): ibs = current_app.ibs refer_aid = request.args.get('refer_aid', None) imgsetid = request.args.get('imgsetid', '') imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid) gid_list = ibs.get_valid_gids(imgsetid=imgsetid) reviewed_list = appf.imageset_image_processed(ibs, gid_list) progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), ) imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid) gid = request.args.get('gid', '') if len(gid) > 0: gid = int(gid) else: gid_list_ = ut.filterfalse_items(gid_list, reviewed_list) if len(gid_list_) == 0: gid = None else: # gid = gid_list_[0] gid = random.choice(gid_list_) previous = request.args.get('previous', None) finished = gid is None review = 'review' in request.args.keys() display_instructions = request.cookies.get('detection_instructions_seen', 1) == 0 display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0 if not finished: gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False) image = ibs.get_images(gid) image_src = appf.embed_image_html(image) # Get annotations width, height = ibs.get_image_sizes(gid) aid_list = ibs.get_image_aids(gid) annot_bbox_list = ibs.get_annot_bboxes(aid_list) annot_thetas_list = ibs.get_annot_thetas(aid_list) species_list = ibs.get_annot_species_texts(aid_list) # Get annotation bounding boxes annotation_list = [] for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list): temp = {} temp['left'] = 100.0 * (annot_bbox[0] / width) temp['top'] = 100.0 * (annot_bbox[1] / height) temp['width'] = 100.0 * (annot_bbox[2] / width) temp['height'] = 100.0 * (annot_bbox[3] / height) temp['label'] = species temp['id'] = aid temp['theta'] = float(annot_theta) annotation_list.append(temp) if len(species_list) > 0: species = max(set(species_list), key=species_list.count) # Get most common species elif appf.default_species(ibs) is not None: species = appf.default_species(ibs) else: species = KEY_DEFAULTS[SPECIES_KEY] else: gpath = None species = None image_src = None annotation_list = [] callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), imgsetid, ) return appf.template('turk', 'detection', imgsetid=imgsetid, gid=gid, refer_aid=refer_aid, species=species, image_path=gpath, image_src=image_src, previous=previous, imagesettext=imagesettext, progress=progress, finished=finished, annotation_list=annotation_list, display_instructions=display_instructions, display_species_examples=display_species_examples, callback_url=callback_url, callback_method='POST', EMBEDDED_CSS=None, EMBEDDED_JAVASCRIPT=None, review=review)