def pie_testdb_ibs(): testdb_name = 'manta-test' try: ans_ibs = wbia.opendb(testdb_name) aids = ans_ibs.get_valid_annots() assert len(aids) > 3 return ans_ibs except Exception: print("PIE testdb does not exist; creating it with PIE's example images") ans_ibs = wbia.opendb(testdb_name, allow_newdir=True) test_image_folder = os.path.join(_PLUGIN_FOLDER, 'examples/manta-demo/test') test_images = os.listdir(test_image_folder) test_images = [fname for fname in test_images if fname.lower().endswith('.png')] test_images = sorted(test_images) gpaths = [os.path.join(test_image_folder, fname) for fname in test_images] names = [fname.split('-')[0] for fname in test_images] gid_list = ans_ibs.add_images(gpaths) nid_list = ans_ibs.add_names(names) species = ['Mobula birostris'] * len(gid_list) # these images are pre-cropped aka trivial annotations wh_list = ans_ibs.get_image_sizes(gid_list) bbox_list = [[0, 0, w, h] for (w, h) in wh_list] ans_ibs.add_annots( gid_list, bbox_list=bbox_list, species_list=species, nid_list=nid_list ) return ans_ibs
def experiment_init_db(tag): import wbia if tag in DBDIR_DICT: dbdir = abspath(expanduser(DBDIR_DICT[tag])) DB_DICT[tag] = wbia.opendb(dbdir=dbdir, web=False) return DB_DICT.get(tag, None)
def testdata_nnindexer(dbname='testdb1', with_indexer=True, use_memcache=True): r""" Ignore: >>> # ENABLE_DOCTEST >>> from wbia.algo.hots.neighbor_index_cache import * # NOQA >>> nnindexer, qreq_, ibs = testdata_nnindexer('PZ_Master1') >>> S = np.cov(nnindexer.idx2_vec.T) >>> import wbia.plottool as pt >>> pt.ensureqt() >>> pt.plt.imshow(S) Example: >>> # ENABLE_DOCTEST >>> from wbia.algo.hots.neighbor_index_cache import * # NOQA >>> nnindexer, qreq_, ibs = testdata_nnindexer() """ import wbia daid_list = [7, 8, 9, 10, 11] ibs = wbia.opendb(db=dbname) # use_memcache isn't use here because we aren't lazy loading the indexer cfgdict = dict(fg_on=False) qreq_ = ibs.new_query_request(daid_list, daid_list, use_memcache=use_memcache, cfgdict=cfgdict) if with_indexer: # we do an explicit creation of an indexer for these tests nnindexer = request_wbia_nnindexer(qreq_, use_memcache=use_memcache) else: nnindexer = None return nnindexer, qreq_, ibs
def testdata_showchip(): import wbia ibs = wbia.opendb(defaultdb='PZ_MTEST') aid_list = ut.get_argval(('--aids', '--aid'), type_=list, default=None) if aid_list is None: aid_list = ibs.get_valid_aids()[0:4] weight_label = ut.get_argval('--weight_label', type_=str, default='fg_weights') annote = not ut.get_argflag('--no-annote') kwargs = dict(ori=ut.get_argflag('--ori'), weight_label=weight_label, annote=annote) kwargs['notitle'] = ut.get_argflag('--notitle') kwargs['pts'] = ut.get_argflag('--drawpts') kwargs['ell'] = True or ut.get_argflag('--drawell') kwargs['ell_alpha'] = ut.get_argval('--ellalpha', default=0.4) kwargs['ell_linewidth'] = ut.get_argval('--ell_linewidth', default=2) kwargs['draw_lbls'] = ut.get_argval('--draw_lbls', default=True) logger.info('kwargs = ' + ut.repr4(kwargs, nl=True)) default_config = dict(wbia.algo.Config.FeatureWeightConfig().parse_items()) cfgdict = ut.argparse_dict(default_config) logger.info('[viz_chip.testdata] cfgdict = %r' % (cfgdict, )) config2_ = cfgdict logger.info('[viz_chip.testdata] aid_list = %r' % (aid_list, )) return ibs, aid_list, kwargs, config2_
def load_oxford_wbia(): import wbia ibs = wbia.opendb('Oxford') dim_size = None _dannots = ibs.annots(ibs.filter_annots_general(has_none='query'), config=dict(dim_size=dim_size)) _qannots = ibs.annots(ibs.filter_annots_general(has_any='query'), config=dict(dim_size=dim_size)) with ut.Timer('reading info'): vecs_list = _dannots.vecs kpts_list = _dannots.kpts nfeats_list = np.array(_dannots.num_feats) with ut.Timer('stacking info'): all_vecs = np.vstack(vecs_list) all_kpts = np.vstack(kpts_list) offset_list = np.hstack(([0], nfeats_list.cumsum())).astype(np.int64) # data_annots = reorder_annots(_dannots, data_uri_order) data_uri_order = get_annots_imgid(_dannots) query_uri_order = get_annots_imgid(_qannots) data = { 'offset_list': offset_list, 'all_kpts': all_kpts, 'all_vecs': all_vecs, 'data_uri_order': data_uri_order, 'query_uri_order': query_uri_order, } return data
def demodata_infr2(defaultdb='PZ_MTEST'): defaultdb = 'PZ_MTEST' import wbia ibs = wbia.opendb(defaultdb=defaultdb) annots = ibs.annots() names = list(annots.group_items(annots.nids).values())[0:20] def dummy_phi(c, n): x = np.arange(n) phi = c * x / (c * x + 1) phi = phi / phi.sum() phi = np.diff(phi) return phi phis = {c: dummy_phi(c, 30) for c in range(1, 4)} aids = ut.flatten(names) infr = wbia.AnnotInference(ibs, aids, autoinit=True) infr.init_termination_criteria(phis) infr.init_refresh_criteria() # Partially review n1, n2, n3, n4 = names[0:4] for name in names[4:]: for a, b in ut.itertwo(name.aids): infr.add_feedback((a, b), POSTV) for name1, name2 in it.combinations(names[4:], 2): infr.add_feedback((name1.aids[0], name2.aids[0]), NEGTV) return infr
def load_ordered_annots(data_uri_order, query_uri_order): # Open the wbia version of oxford import wbia ibs = wbia.opendb('Oxford') def reorder_annots(_annots, uri_order): intern_uris = get_annots_imgid(_annots) lookup = ut.make_index_lookup(intern_uris) _reordered = _annots.take(ut.take(lookup, uri_order)) return _reordered # Load database annotations and reorder them to agree with internals _dannots = ibs.annots(ibs.filter_annots_general(has_none='query')) data_annots = reorder_annots(_dannots, data_uri_order) # Load query annototations and reorder to standard order _qannots = ibs.annots(ibs.filter_annots_general(has_any='query')) query_annots = reorder_annots(_qannots, query_uri_order) # Map each query annot to its corresponding data index dgid_to_dx = ut.make_index_lookup(data_annots.gids) qx_to_dx = ut.take(dgid_to_dx, query_annots.gids) return ibs, query_annots, data_annots, qx_to_dx
def testdata_ibs(): r"""""" import wbia ibs = wbia.opendb('testdb1') qreq_ = None return ibs, qreq_
def testdata_ibs(): r"""""" import wbia ibs = wbia.opendb('testdb1') config2_ = None return ibs, config2_
def shark_misc(): import wbia ibs = wbia.opendb('WS_ALL') aid_list = ibs.get_valid_aids() flag_list = ibs.get_annot_been_adjusted(aid_list) adjusted_aids = ut.compress(aid_list, flag_list) return adjusted_aids
def assigner_testdb_ibs(): import wbia from wbia import sysres dbdir = sysres.ensure_testdb_assigner() # dbdir = '/data/testdb_assigner' ibs = wbia.opendb(dbdir=dbdir) return ibs
def test_lnbnn(): import wbia ibs = wbia.opendb('PZ_MTEST') annots = ibs.annots() qaids = daids = annots.aids qreq = ibs.new_query_request(qaids, daids) cm_list = qreq.execute(use_cache=False) # NOQA
def testdata_ibs(): r""" Auto-docstr for 'testdata_ibs' """ import wbia ibs = wbia.opendb('testdb1') qreq_ = None return ibs, qreq_
def testdata_my_exmaples(index): r""" Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.devcases import * # NOQA >>> index = 1 """ import wbia from uuid import UUID ibs = wbia.opendb('GZ_ALL') vsone_pair_examples = [ [ UUID('8415b50f-2c98-0d52-77d6-04002ff4d6f8'), UUID('308fc664-7990-91ad-0576-d2e8ea3103d0'), ], [ UUID('490f76bf-7616-54d5-576a-8fbc907e46ae'), UUID('2046509f-0a9f-1470-2b47-5ea59f803d4b'), ], [ UUID('5cdf68ab-be49-ee3f-94d8-5483772c8618'), UUID('879977a7-b841-d223-dd91-761dfa58d486'), ], ] gf_mapping = { UUID('8415b50f-2c98-0d52-77d6-04002ff4d6f8'): [UUID('38211759-8fa7-875b-1f3e-39a630653f66')], UUID('490f76bf-7616-54d5-576a-8fbc907e46ae'): [UUID('58920d6e-31ba-307c-2ac8-e56aff2b2b9e') ], # other bad_aid is actually a good partial match UUID('5cdf68ab-be49-ee3f-94d8-5483772c8618'): [UUID('5a8c8ad7-873a-e6ed-98df-56a452e0a93e')], } # ibs.get_annot_visual_uuids([36, 3]) vuuid_pair = vsone_pair_examples[index] vuuid1, vuuid2 = vuuid_pair aid1, aid2 = ibs.get_annot_aids_from_visual_uuid(vuuid_pair) assert aid1 is not None assert aid2 is not None # daids = ibs.get_valid_aids() tn_vuuid = gf_mapping.get(vuuid1) if tn_vuuid is None: qaids = [aid1] find_close_incorrect_match(ibs, qaids) logger.info('baste the result in gf_mapping') return tn_aids = ibs.get_annot_aids_from_visual_uuid(tn_vuuid) tn_aid = tn_aids[0] return ibs, aid1, aid2, tn_aid
def demodata_mtest_infr(state='empty'): import wbia ibs = wbia.opendb(db='PZ_MTEST') annots = ibs.annots() names = list(annots.group_items(annots.nids).values()) ut.shuffle(names, rng=321) test_aids = ut.flatten(names[1::2]) infr = wbia.AnnotInference(ibs, test_aids, autoinit=True) infr.reset(state=state) return infr
def testdata_newqreq(defaultdb='testdb1'): """ Returns: (wbia.IBEISController, list, list) """ import wbia ibs = wbia.opendb(defaultdb=defaultdb) qaid_list = [1] daid_list = [1, 2, 3, 4, 5] return ibs, qaid_list, daid_list
def slow_merge_test(): r""" CommandLine: python -m wbia.dbio.export_subset --test-slow_merge_test Example: >>> # SLOW_DOCTEST >>> from wbia.dbio.export_subset import * # NOQA >>> result = slow_merge_test() >>> print(result) """ from wbia.dbio import export_subset import wbia ibs1 = wbia.opendb('testdb2') ibs1.fix_invalid_annotmatches() ibs_dst = wbia.opendb(db='testdb_dst2', allow_newdir=True, delete_ibsdir=True) export_subset.merge_databases(ibs1, ibs_dst) # ibs_src = ibs1 check_merge(ibs1, ibs_dst) ibs2 = wbia.opendb('testdb1') ibs1.print_dbinfo() ibs2.print_dbinfo() ibs_dst.print_dbinfo() ibs_dst.print_dbinfo() export_subset.merge_databases(ibs2, ibs_dst) # ibs_src = ibs2 check_merge(ibs2, ibs_dst) ibs3 = wbia.opendb('PZ_MTEST') export_subset.merge_databases(ibs3, ibs_dst) # ibs_src = ibs2 check_merge(ibs3, ibs_dst) ibs_dst.print_dbinfo() return ibs_dst
def __setstate__(request, state_dict): import wbia dbdir = state_dict['dbdir'] del state_dict['dbdir'] params = state_dict['params'] depc = wbia.opendb(dbdir=dbdir, web=False).depc configclass = depc.configclass_dict[state_dict['tablename']] config = configclass(**params) state_dict['depc'] = depc state_dict['config'] = config request.__dict__.update(state_dict)
def export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=None): """ exports a subset of data and other required info Args: ibs (IBEISController): wbia controller object gid_list (list): list of image rowids aid_list (list): list of annotation rowids nid_list (list): list of name rowids imgsetid_list (list): list of imageset rowids gsgrid_list (list): list of imageset-image pairs rowids new_dbpath (None): (default = None) Returns: str: new_dbpath """ import wbia imgsetid_list = ut.unique_unordered( ut.flatten(ibs.get_image_imgsetids(gid_list))) gsgrid_list = ut.unique_unordered( ut.flatten(ibs.get_image_gsgrids(gid_list))) # TODO: write SQL query to do this am_rowids = ibs._get_all_annotmatch_rowids() flags1_list = [ aid in set(aid_list) for aid in ibs.get_annotmatch_aid1(am_rowids) ] flags2_list = [ aid in set(aid_list) for aid in ibs.get_annotmatch_aid2(am_rowids) ] flag_list = ut.and_lists(flags1_list, flags2_list) am_rowids = ut.compress(am_rowids, flag_list) # am_rowids = ibs.get_valid_aids(ibs.get_valid_aids()) rowid_subsets = { const.ANNOTATION_TABLE: aid_list, const.NAME_TABLE: nid_list, const.IMAGE_TABLE: gid_list, const.ANNOTMATCH_TABLE: am_rowids, const.GSG_RELATION_TABLE: gsgrid_list, const.IMAGESET_TABLE: imgsetid_list, } ibs_dst = wbia.opendb(dbdir=new_dbpath, allow_newdir=True) # Main merge driver merge_databases(ibs, ibs_dst, rowid_subsets=rowid_subsets) logger.info('Exported to %r' % (new_dbpath, )) return new_dbpath
def testdata_showname(): import wbia ibs = wbia.opendb(defaultdb='testdb1') default = None if ibs.dbname == 'testdb1': default = 'easy' name_text = ut.get_argval('--name', type_=str, default=default) if name_text is None: nid = 1 else: nid = ibs.get_name_rowids_from_text(name_text) in_image = not ut.get_argflag('--no-inimage') index_list = ut.get_argval('--index_list', type_=list, default=None) return ibs, nid, in_image, index_list
def __setstate__(ibs, state): # Hack to allow for wbia objects to be pickled import wbia dbdir = state['dbdir'] machine_name = state.pop('machine_name') try: assert ( machine_name == ut.get_computer_name() ), 'wbia objects can only be picked and unpickled on the same machine' except AssertionError as ex: iswarning = ut.checkpath(dbdir) ut.printex(ex, iswarning=iswarning) if not iswarning: raise ibs2 = wbia.opendb(dbdir=dbdir, web=False) ibs.__dict__.update(**ibs2.__dict__)
def HARDCODE_SHOW_PB_PAIR(): """ python -m wbia.viz.viz_chip HARDCODE_SHOW_PB_PAIR --show Example: >>> # SCRIPT >>> from wbia.viz.viz_chip import * # NOQA >>> import wbia.plottool as pt >>> HARDCODE_SHOW_PB_PAIR() >>> pt.show_if_requested() """ # TODO: generalize into testdata_annotmatches which filters ams propertly # Then a function to show these ams import wbia import wbia.viz has_any = ut.get_argval('--has_any', default=['photobomb']) index = ut.get_argval('--index', default=0) ibs = wbia.opendb(defaultdb='PZ_Master1') ams = ibs._get_all_annotmatch_rowids() tags = ibs.get_annotmatch_case_tags(ams) flags = ut.filterflags_general_tags(tags, has_any=has_any) selected_ams = ut.compress(ams, flags) aid_pairs = ibs.get_annotmatch_aids(selected_ams) aid1, aid2 = aid_pairs[index] import wbia.plottool as pt fnum = 1 if ut.get_argflag('--match'): request = ibs.depc_annot.new_request('vsone', [aid1], [aid2]) res_list2 = request.execute() match = res_list2[0] match.show_single_annotmatch( qreq_=request, vert=False, colorbar_=False, notitle=True, draw_lbl=False, draw_border=False, ) else: chip1, chip2 = ibs.get_annot_chips([aid1, aid2]) pt.imshow(chip1, pnum=(1, 2, 1), fnum=fnum) pt.imshow(chip2, pnum=(1, 2, 2), fnum=fnum)
def testdata_multichips(): import wbia ibs = wbia.opendb(defaultdb='testdb1') nid = ut.get_argval('--nid', type_=int, default=None) tags = ut.get_argval('--tags', type_=list, default=None) if nid is not None: aid_list = ibs.get_name_aids(nid) elif tags is not None: index = ut.get_argval('--index', default=0) aid_list = ibs.filter_aidpairs_by_tags(any_tags=tags)[index] else: # aid_list = ut.get_argval('--aids', type_=list, default=[1, 2, 3]) aid_list = wbia.testdata_aids(default_aids=[1, 2, 3], ibs=ibs) in_image = not ut.get_argflag('--no-inimage') return ibs, aid_list, in_image
def start(actor, dbdir, aids='all', config={}, **kwargs): import wbia assert dbdir is not None, 'must specify dbdir' assert actor.infr is None, 'AnnotInference already running' ibs = wbia.opendb(dbdir=dbdir, use_cache=False, web=False, force_serial=True) # Create the AnnotInference log.info('starting via actor with ibs = %r' % (ibs, )) actor.infr = wbia.AnnotInference(ibs=ibs, aids=aids, autoinit=True) actor.infr.print('started via actor') actor.infr.print('config = {}'.format(ut.repr3(config))) # Configure query_annot_infr for key in config: actor.infr.params[key] = config[key] # Initialize # TODO: Initialize state from staging reviews after annotmatch # timestamps (in case of crash) actor.infr.print('Initializing infr tables') table = kwargs.get('init', 'staging') actor.infr.reset_feedback(table, apply=True) actor.infr.ensure_mst() actor.infr.apply_nondynamic_update() actor.infr.print('infr.status() = {}'.format( ut.repr4(actor.infr.status()))) # Load random forests (TODO: should this be config specifiable?) actor.infr.print('loading published models') try: actor.infr.load_published() except Exception: pass # Start actor.infr Main Loop actor.infr.print('start id review') actor.infr.start_id_review() return 'initialized'
def train_part_detector(): """ Problem: healthy sharks usually have a mostly whole body shot injured sharks usually have a close up shot. This distribution of images is likely what the injur-shark net is picking up on. The goal is to train a detector that looks for things that look like the distribution of injured sharks. We will run this on healthy sharks to find the parts of """ import wbia ibs = wbia.opendb('WS_ALL') imgset = ibs.imagesets(text='Injured Sharks') injured_annots = imgset.annots[0] # NOQA # config = { # 'dim_size': (224, 224), # 'resize_dim': 'wh' # } from pydarknet import Darknet_YOLO_Detector data_path = ibs.export_to_xml() output_path = join(ibs.get_cachedir(), 'training', 'localizer') ut.ensuredir(output_path) dark = Darknet_YOLO_Detector() results = dark.train(data_path, output_path) del dark localizer_weight_path, localizer_config_path, localizer_class_path = results classifier_model_path = ibs.classifier_train() labeler_model_path = ibs.labeler_train() output_path = join(ibs.get_cachedir(), 'training', 'detector') ut.ensuredir(output_path) ut.copy(localizer_weight_path, join(output_path, 'localizer.weights')) ut.copy(localizer_config_path, join(output_path, 'localizer.config')) ut.copy(localizer_class_path, join(output_path, 'localizer.classes')) ut.copy(classifier_model_path, join(output_path, 'classifier.npy')) ut.copy(labeler_model_path, join(output_path, 'labeler.npy'))
def cfg_deepcopy_test(): """ TESTING FUNCTION Example: >>> # ENABLE_DOCTEST >>> from wbia.algo.hots.query_request import * # NOQA >>> result = cfg_deepcopy_test() >>> print(result) """ import wbia ibs = wbia.opendb('testdb1') cfg1 = ibs.cfg.query_cfg cfg2 = cfg1.deepcopy() cfg3 = cfg2 assert cfg1.get_cfgstr() == cfg2.get_cfgstr() assert cfg2.sv_cfg is not cfg1.sv_cfg assert cfg3.sv_cfg is cfg2.sv_cfg cfg2.update_query_cfg(sv_on=False) assert cfg1.get_cfgstr() != cfg2.get_cfgstr() assert cfg2.get_cfgstr() == cfg3.get_cfgstr()
def subindexer_time_experiment(): """ builds plot of number of annotations vs indexer build time. TODO: time experiment """ import wbia import utool as ut from vtool._pyflann_backend import pyflann as pyflann import wbia.plottool as pt ibs = wbia.opendb(db='PZ_Master0') daid_list = ibs.get_valid_aids() count_list = [] time_list = [] flann_params = vt.get_flann_params() for count in ut.ProgressIter(range(1, 301)): daids_ = daid_list[:] np.random.shuffle(daids_) daids = daids_[0:count] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann = pyflann.FLANN() flann.build_index(vecs, **flann_params) count_list.append(count) time_list.append(t.ellapsed) count_arr = np.array(count_list) time_arr = np.array(time_list) pt.plot2( count_arr, time_arr, marker='-', equal_aspect=False, x_label='num_annotations', y_label='FLANN build time', )
def testsdata_match_verification(defaultdb='testdb1', aid1=1, aid2=2): r""" CommandLine: main.py --imgsetid 2 main.py --imgsetid 13 --db PZ_MUGU_19 CommandLine: python -m wbia.viz.interact.interact_name --test-testsdata_match_verification --show python -m wbia.viz.interact.interact_name --test-testsdata_match_verification --aid1 2 --aid2 3 --show # Merge case python -m wbia.viz.interact.interact_name --test-testsdata_match_verification --show --db PZ_MTEST --aid1 1 --aid2 30 # Split case python -m wbia.viz.interact.interact_name --test-testsdata_match_verification --show --db PZ_MTEST --aid1 30 --aid2 32 Example: >>> # ENABLE_DOCTEST >>> from wbia.viz.interact.interact_name import * # NOQA >>> self = testsdata_match_verification() >>> # verify results >>> ut.quit_if_noshow() >>> self.show_page() >>> ut.show_if_requested() """ # from wbia.viz.interact.interact_name import * # NOQA import wbia # ibs = wbia.opendb(defaultdb='PZ_Master0') ibs = wbia.opendb(defaultdb=defaultdb) # aid1 = ut.get_argval('--aid1', int, 14) # aid2 = ut.get_argval('--aid2', int, 5545) aid1 = ut.get_argval('--aid1', int, aid1) aid2 = ut.get_argval('--aid2', int, aid2) self = MatchVerificationInteraction(ibs, aid1, aid2, dodraw=False) return self
def testdata_distinctiveness(): """ Example: >>> # SLOW_DOCTEST >>> from wbia.algo.hots.distinctiveness_normalizer import * # NOQA >>> dstcnvs_normer, qreq_ = testdata_distinctiveness() """ import wbia # build test data db = ut.get_argval('--db', str, 'testdb1') species = ut.get_argval('--species', str, None) aid = ut.get_argval('--aid', int, None) ibs = wbia.opendb(db) if aid is not None: species = ibs.get_annot_species_texts(aid) if species is None: if db == 'testdb1': species = wbia.const.TEST_SPECIES.ZEB_PLAIN daids = ibs.get_valid_aids(species=species) qaids = [aid] if aid is not None else daids qreq_ = ibs.new_query_request(qaids, daids) dstcnvs_normer = request_wbia_distinctiveness_normalizer(qreq_) return dstcnvs_normer, qreq_
def dev_train_distinctiveness(species=None): r""" Args: ibs (IBEISController): wbia controller object species (None): CommandLine: python -m wbia.algo.hots.distinctiveness_normalizer --test-dev_train_distinctiveness alias dev_train_distinctiveness='python -m wbia.algo.hots.distinctiveness_normalizer --test-dev_train_distinctiveness' # Publishing (uses cached normalizers if available) dev_train_distinctiveness --species GZ --publish dev_train_distinctiveness --species PZ --publish dev_train_distinctiveness --species PZ --retrain Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.distinctiveness_normalizer import * # NOQA >>> import wbia >>> species = ut.get_argval('--species', str, 'zebra_grevys') >>> dev_train_distinctiveness(species) """ import wbia # if 'species' not in vars() or species is None: # species = 'zebra_grevys' if species == 'zebra_grevys': dbname = 'GZ_ALL' elif species == 'zebra_plains': dbname = 'PZ_Master0' ibs = wbia.opendb(dbname) global_distinctdir = ibs.get_global_distinctiveness_modeldir() cachedir = global_distinctdir dstcnvs_normer = DistinctivnessNormalizer(species, cachedir=cachedir) try: if ut.get_argflag('--retrain'): raise IOError('force cache miss') with ut.Timer('loading distinctiveness'): dstcnvs_normer.load(cachedir) # Cache hit logger.info('distinctivness model cache hit') except IOError: logger.info('distinctivness model cache miss') with ut.Timer('training distinctiveness'): # Need to train # Add one example from each name # TODO: add one exemplar per viewpoint for each name # max_vecs = 1E6 # max_annots = 975 max_annots = 975 # ibs.fix_and_clean_database() nid_list = ibs.get_valid_nids() aids_list = ibs.get_name_aids(nid_list) # remove junk aids_list = ibs.unflat_map(ibs.filter_junk_annotations, aids_list) # remove empty aids_list = [aids for aids in aids_list if len(aids) > 0] num_annots_list = list(map(len, aids_list)) aids_list = ut.sortedby(aids_list, num_annots_list, reverse=True) # take only one annot per name aid_list = ut.get_list_column(aids_list, 0) # Keep only a certain number of annots for distinctiveness mapping aid_list_ = ut.listclip(aid_list, max_annots) logger.info('total num named annots = %r' % (sum(num_annots_list))) logger.info( 'training distinctiveness using %d/%d singleton annots' % (len(aid_list_), len(aid_list))) # vec # FIXME: qreq_ params for config rowid vecs_list = ibs.get_annot_vecs(aid_list_) num_vecs = sum(list(map(len, vecs_list))) logger.info('num_vecs = %r' % (num_vecs, )) vecs = np.vstack(vecs_list) logger.info('vecs size = %r' % (ut.get_object_size_str(vecs), )) dstcnvs_normer.init_support(vecs) dstcnvs_normer.save(global_distinctdir) if ut.get_argflag('--publish'): dstcnvs_normer.publish()