Пример #1
0
def multidb_montage():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw multidb_montage --save montage.jpg --dpath ~/slides --diskshow --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> multidb_montage()
    """
    import ibeis
    import plottool as pt
    import vtool as vt
    import numpy as np
    pt.ensure_pylab_qt4()
    ibs1 = ibeis.opendb('PZ_MTEST')
    ibs2 = ibeis.opendb('GZ_ALL')
    ibs3 = ibeis.opendb('GIRM_Master1')

    chip_lists = []
    aids_list = []

    for ibs in [ibs1, ibs2, ibs3]:
        aids = ibs.sample_annots_general(minqual='good', sample_size=400)
        aids_list.append(aids)

    print(ut.depth_profile(aids_list))

    for ibs, aids in zip([ibs1, ibs2, ibs3], aids_list):
        chips = ibs.get_annot_chips(aids)
        chip_lists.append(chips)

    chip_list = ut.flatten(chip_lists)
    np.random.shuffle(chip_list)

    widescreen_ratio = 16 / 9
    ratio = ut.PHI
    ratio = widescreen_ratio

    fpath = pt.get_save_directions()

    #height = 6000
    width = 6000
    #width = int(height * ratio)
    height = int(width / ratio)
    dsize = (width, height)
    dst = vt.montage(chip_list, dsize)
    vt.imwrite(fpath, dst)
    if ut.get_argflag('--show'):
        pt.imshow(dst)
Пример #2
0
def testdata_ibeis():
    r"""testdata function """
    import ibeis

    ibs = ibeis.opendb("testdb1")
    aid_list = ibs.get_valid_aids()[0::4]
    return ibs, aid_list
Пример #3
0
def test():
    r"""
    >>> from ibeis.algo.hots.score_normalization import *  # NOQA
    """
    #from ibeis.algo.hots import query_request
    import ibeis
    ibs = ibeis.opendb(db='PZ_MTEST')
    qaid_list = [1, 2, 3, 4, 5]
    daid_list = [1, 2, 3, 4, 5]
    cfgdict = {'codename': 'nsum'}
    cm_list, qreq_ = ibs.query_chips(qaid_list, daid_list, use_cache=False, return_cm=True, cfgdict=cfgdict, return_request=True)
    qreq_.load_score_normalizer(qreq_.ibs)
    normalizer = qreq_.normalizer

    for cm in cm_list:
        aid_list = list(six.iterkeys(cm.aid2_score))
        score_list = list(six.itervalues(cm.aid2_score))
        #normalizer  = normalizer
        prob_list = normalizer.normalize_score_list(score_list)
        cm.qaid2_score = dict(zip(aid_list, prob_list))
    for cm in cm_list:
        print(list(six.itervalues(cm.qaid2_score)))

        #aid2_score = {aid: normalizer.no(score) for aid, score in }
        pass
Пример #4
0
def test_nnindexer(dbname='testdb1', with_indexer=True, use_memcache=True):
    r"""

    Ignore:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> nnindexer, qreq_, ibs = test_nnindexer('PZ_Master1')
        >>> S = np.cov(nnindexer.idx2_vec.T)
        >>> import plottool as pt
        >>> pt.ensure_pylab_qt4()
        >>> pt.plt.imshow(S)

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> nnindexer, qreq_, ibs = test_nnindexer()
    """
    import ibeis
    daid_list = [7, 8, 9, 10, 11]
    ibs = ibeis.opendb(db=dbname)
    # use_memcache isn't use here because we aren't lazy loading the indexer
    cfgdict = dict(fg_on=False)
    qreq_ = ibs.new_query_request(daid_list, daid_list,
                                  use_memcache=use_memcache, cfgdict=cfgdict)
    if with_indexer:
        # we do an explicit creation of an indexer for these tests
        nnindexer = request_ibeis_nnindexer(qreq_, use_memcache=use_memcache)
    else:
        nnindexer = None
    return nnindexer, qreq_, ibs
Пример #5
0
def subindexer_time_experiment():
    """
    builds plot of number of annotations vs indexer build time.

    TODO: time experiment
    """
    import ibeis
    import utool as ut
    import pyflann
    import plottool as pt
    ibs = ibeis.opendb(db='PZ_Master0')
    daid_list = ibs.get_valid_aids()
    count_list = []
    time_list = []
    flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params()
    for count in ut.ProgressIter(range(1, 301)):
        daids_ = daid_list[:]
        np.random.shuffle(daids_)
        daids = daids_[0:count]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann = pyflann.FLANN()
            flann.build_index(vecs, **flann_params)
        count_list.append(count)
        time_list.append(t.ellapsed)
    count_arr = np.array(count_list)
    time_arr = np.array(time_list)
    pt.plot2(count_arr, time_arr, marker='-', equal_aspect=False,
             x_label='num_annotations', y_label='FLANN build time')
Пример #6
0
 def __setstate__(qreq_, state_dict):
     #print('[!!!!!!!!!!!!!!!!!!!!] Calling set state.')
     import ibeis
     dbdir = state_dict['dbdir']
     del state_dict['dbdir']
     state_dict['ibs'] = ibeis.opendb(dbdir=dbdir, web=False)
     qreq_.__dict__.update(state_dict)
Пример #7
0
def testdata_ibs():
    r"""
    """
    import ibeis
    ibs = ibeis.opendb('testdb1')
    config2_ = None
    return ibs, config2_
Пример #8
0
def testdata_ibs():
    r"""
    """
    import ibeis
    ibs = ibeis.opendb('testdb1')
    qreq_ = None
    return ibs, qreq_
Пример #9
0
def testdata_automatch(dbname=None):
    if dbname is None:
        dbname = 'testdb1'
    import ibeis
    ibs = ibeis.opendb('testdb1')
    qaid_chunk = ibs.get_valid_aids()[0:1]
    return ibs, qaid_chunk
Пример #10
0
def testdata_annotmatch(defaultdb='testdb1'):
    import ibeis
    ibs = ibeis.opendb(defaultdb=defaultdb)
    config2_ = None  # qreq_.qparams
    #from ibeis.hots import query_config
    #config2_ = query_config.QueryParams(cfgdict=dict())
    return ibs, config2_
Пример #11
0
def testsdata_match_verification(defaultdb='testdb1', aid1=1, aid2=2):
    r"""
    CommandLine:
        main.py --imgsetid 2
        main.py --imgsetid 13 --db PZ_MUGU_19

    CommandLine:
        python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --show
        python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --aid1 2 --aid2 3 --show

        # Merge case
        python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --show --db PZ_MTEST --aid1 1 --aid2 30

        # Split case
        python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --show --db PZ_MTEST --aid1 30 --aid2 32

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.viz.interact.interact_name import *  # NOQA
        >>> self = testsdata_match_verification()
        >>> # verify results
        >>> ut.quit_if_noshow()
        >>> self.show_page()
        >>> ut.show_if_requested()
    """
    #from ibeis.viz.interact.interact_name import *  # NOQA
    import ibeis
    #ibs = ibeis.opendb(defaultdb='PZ_Master0')
    ibs = ibeis.opendb(defaultdb=defaultdb)
    #aid1 = ut.get_argval('--aid1', int, 14)
    #aid2 = ut.get_argval('--aid2', int, 5545)
    aid1 = ut.get_argval('--aid1', int, aid1)
    aid2 = ut.get_argval('--aid2', int, aid2)
    self = MatchVerificationInteraction(ibs, aid1, aid2, dodraw=False)
    return self
Пример #12
0
def shark_misc():
    import ibeis
    ibs = ibeis.opendb('WS_ALL')
    aid_list = ibs.get_valid_aids()
    flag_list = ibs.get_annot_been_adjusted(aid_list)
    adjusted_aids = ut.compress(aid_list, flag_list)
    return adjusted_aids
Пример #13
0
def testdb2_stuff():
    """
    tar -zcvf testdb2.tar.gz testdb2/
    """
    import ibeis
    ibs = ibeis.opendb('testdb2')

    #ibs.ensure_contributor_rowids()

    gid_list = ibs.get_valid_gids()

    # Group gids by species
    image_species_list = ut.get_list_column(
        ibs.unflat_map(ibs.get_annot_species_rowids, ibs.get_image_aids(gid_list)), 0)

    new_contrib_rowid1 = ibs.add_new_temp_contributor(offset=len(ibs.get_valid_contrib_rowids()))
    new_contrib_rowid2 = ibs.add_new_temp_contributor(offset=len(ibs.get_valid_contrib_rowids()))

    gids1, gids2 = list(ut.group_items(gid_list, image_species_list).values())

    party_rowids = ibs.add_party(['TestCar1', 'TestCar2'])
    partyid1, partyid2 = party_rowids
    ibs.set_image_contributor_rowid(gids1, [new_contrib_rowid1] * len(gids1))
    ibs.set_image_contributor_rowid(gids2, [new_contrib_rowid2] * len(gids2))
    ibs.set_image_party_rowids(gids1, [partyid1] * len(gids1))
    ibs.set_image_party_rowids(gids2, [partyid2] * len(gids2))
Пример #14
0
def testdata_ibeis(**kwargs):
    """
    DEPRICATE
    Step 1

    builds ibs for testing

    Example:
        >>> from ibeis.algo.hots.smk.smk_debug import *  # NOQA
        >>> kwargs = {}
    """
    print(' === Test Data IBEIS ===')
    print('kwargs = ' + ut.dict_str(kwargs))
    print('[smk_debug] testdata_ibeis')
    db = kwargs.get('db', ut.get_argval('--db', str, 'PZ_MTEST'))
    #with ut.Indenter('ENSURE'):
    if db == 'PZ_MTEST':
        ibeis.ensure_pz_mtest()
    ibs = ibeis.opendb(db=db)
    ibs._default_config()
    aggregate = kwargs.get('aggregate', ut.get_argflag(('--agg', '--aggregate')))
    nWords    = kwargs.get(   'nWords',  ut.get_argval(('--nWords', '--nCentroids'), int, default=8E3))
    nAssign   = kwargs.get(  'nAssign',  ut.get_argval(('--nAssign', '--K'), int, default=10))
    # Configs
    ibs.cfg.query_cfg.pipeline_root = 'smk'
    ibs.cfg.query_cfg.smk_cfg.aggregate = aggregate
    ibs.cfg.query_cfg.smk_cfg.smk_alpha = 3
    ibs.cfg.query_cfg.smk_cfg.smk_thresh = 0
    ibs.cfg.query_cfg.smk_cfg.vocabtrain_cfg.nWords = nWords
    ibs.cfg.query_cfg.smk_cfg.vocabassign_cfg.nAssign = nAssign
    if ut.VERYVERBOSE:
        ibs.cfg.query_cfg.smk_cfg.printme3()
    return ibs
Пример #15
0
def test_windex():
    from ibeis.algo.hots.query_request import new_ibeis_query_request
    import ibeis
    daid_list = [7, 8, 9, 10, 11]
    ibs = ibeis.opendb(db='testdb1')
    qreq_ = new_ibeis_query_request(ibs, daid_list, daid_list)
    windex = new_ibeis_windex(ibs, qreq_.get_internal_daids())
    return windex, qreq_, ibs
Пример #16
0
def testdata_ibs(defaultdb='testdb1'):
    r"""
    Auto-docstr for 'testdata_ibs'
    """
    import ibeis
    ibs = ibeis.opendb(defaultdb=defaultdb)
    config2_ = None  # qreq_.qparams
    return ibs, config2_
Пример #17
0
def testdata_ibs():
    r"""
    Auto-docstr for 'testdata_ibs'
    """
    import ibeis
    ibs = ibeis.opendb('testdb1')
    qreq_ = None
    return ibs, qreq_
Пример #18
0
def testdata_humpbacks():
    import ibeis
    ibs = ibeis.opendb(defaultdb='humpbacks')
    all_aids = ibs.get_valid_aids()
    isvalid = ibs.depc.get('Has_Notch', all_aids, 'flag')
    aid_list = ut.compress(all_aids, isvalid)
    aid_list = aid_list[0:10]
    return ibs, aid_list
Пример #19
0
def ensure_pz_mtest():
    """
    Ensures that you have the PZ_MTEST dataset

    CommandLine:
        python -m ibeis.init.sysres --exec-ensure_pz_mtest
        python -m ibeis --tf ensure_pz_mtest

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest()
    """
    print('ensure_pz_mtest')
    from ibeis import sysres
    workdir = sysres.get_workdir()
    mtest_zipped_url = const.ZIPPED_URLS.PZ_MTEST
    mtest_dir = ut.grab_zipped_url(mtest_zipped_url, ensure=True, download_dir=workdir)
    print('have mtest_dir=%r' % (mtest_dir,))
    # update the the newest database version
    import ibeis
    ibs = ibeis.opendb('PZ_MTEST')
    print('cleaning up old database and ensureing everything is properly computed')
    ibs.db.vacuum()
    valid_aids = ibs.get_valid_aids()
    assert len(valid_aids) == 119
    ibs.update_annot_semantic_uuids(valid_aids)
    if ut.VERYVERBOSE:
        ibs.print_annotation_table()
    nid = ibs.get_name_rowids_from_text('', ensure=False)
    if nid is not None:
        ibs.set_name_texts([nid], ['lostname'])

    # Remove old imagesets and update to new special ones
    all_imgset_ids = ibs.get_valid_imgsetids()
    special_imgset_ids = ibs.get_special_imgsetids()
    other_imgset_ids = ut.setdiff(all_imgset_ids, special_imgset_ids)
    ibs.delete_imagesets(other_imgset_ids)
    ibs.set_exemplars_from_quality_and_viewpoint()
    ibs.update_all_image_special_imageset()

    occurrence_gids = [2, 9, 12, 16, 25, 26, 29, 30, 32, 33, 35, 46, 47, 52,
                       57, 61, 66, 70, 71, 73, 74, 76, 77, 78, 79, 87, 88, 90,
                       96, 97, 103, 106, 108, 110, 112, 113]

    other_gids = ut.setdiff(ibs.get_valid_gids(), occurrence_gids)
    other_gids1 = other_gids[0::2]
    other_gids2 = other_gids[1::2]
    ibs.set_image_imagesettext(occurrence_gids, ['Occurrence 1'] * len(occurrence_gids))
    ibs.set_image_imagesettext(other_gids1, ['Occurrence 2'] * len(other_gids1))
    ibs.set_image_imagesettext(other_gids2, ['Occurrence 3'] * len(other_gids2))

    # hack in some tags
    print('Hacking in some tags')
    foal_aids = [4, 8, 15, 21, 28, 34, 38, 41, 45, 49, 51, 56, 60, 66, 69, 74, 80, 83, 91, 97, 103, 107, 109, 119]
    mother_aids = [9, 16, 35, 42, 52, 57, 61, 67, 75, 84, 98, 104, 108, 114]
    ibs.append_annot_case_tags(foal_aids, ['foal'] * len(foal_aids))
    ibs.append_annot_case_tags(mother_aids, ['mother'] * len(mother_aids))
Пример #20
0
def sift_stats():
    import ibeis
    ibs = ibeis.opendb('PZ_Mothers')
    aid_list = ibs.get_valid_aids()
    stacked_sift = np.vstack(ibs.get_annot_vecs(aid_list))
    vector_stats(stacked_sift, 'sift')
    # We see that SIFT vectors are actually normalized
    # Between 0 and 512 and clamped to uint8
    vector_stats(stacked_sift.astype(np.float32) / 512.0, 'sift')
Пример #21
0
def testdata_mindexer():
    import ibeis
    ibs = ibeis.opendb(db='PZ_MTEST')
    daid_list = ibs.get_valid_aids()[1:60]
    cfgdict = dict(fg_on=False)
    qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
    index_method = 'name'
    mxer = request_ibeis_mindexer(qreq_, index_method)
    return mxer, qreq_, ibs
Пример #22
0
def testdata_expts(defaultdb='testdb1',
                   default_acfgstr_name_list=['default:qindex=0:10:4,dindex=0:20'],
                   default_test_cfg_name_list=['default'],
                   a=None,
                   t=None,
                   qaid_override=None,
                   daid_override=None,
                   initial_aids=None,
                   ):
    """
    Use this if you want data from an experiment.
    Command line interface to quickly get testdata for test_results.

    Command line flags can be used to specify db, aidcfg, pipecfg, qaid
    override, daid override (and maybe initial aids).

    """
    print('[main_helpers] testdata_expts')
    import ibeis
    from ibeis.expt import experiment_harness
    from ibeis.expt import test_result
    if a is not None:
        default_acfgstr_name_list = a
    if t is not None:
        default_test_cfg_name_list = t

    if isinstance(default_acfgstr_name_list, six.string_types):
        default_acfgstr_name_list = [default_acfgstr_name_list]
    if isinstance(default_test_cfg_name_list, six.string_types):
        default_test_cfg_name_list = [default_test_cfg_name_list]

    #from ibeis.expt import experiment_helpers
    ibs = ibeis.opendb(defaultdb=defaultdb)
    acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list,
                                   default=default_acfgstr_name_list)
    test_cfg_name_list = ut.get_argval(('-t', '-p'), type_=list, default=default_test_cfg_name_list)
    daid_override = ut.get_argval(('--daid-override', '--daids-override'), type_=list, default=daid_override)
    qaid_override = ut.get_argval(('--qaid', '--qaids-override', '--qaid-override'), type_=list, default=qaid_override)

    # Hack a cache here
    use_bigtest_cache3 = not ut.get_argflag(('--nocache', '--nocache-hs'))
    use_bigtest_cache3 &= ut.is_developer()
    use_bigtest_cache3 &= False
    if use_bigtest_cache3:
        from os.path import dirname, join
        cache_dir = ut.ensuredir(join(dirname(ut.get_module_dir(ibeis)), 'BIG_TESTLIST_CACHE3'))
        load_testres = ut.cached_func('testreslist', cache_dir=cache_dir)(experiment_harness.run_test_configurations2)
    else:
        load_testres = experiment_harness.run_test_configurations2
    testres_list = load_testres(
        ibs, acfg_name_list, test_cfg_name_list, qaid_override=qaid_override,
        daid_override=daid_override, initial_aids=initial_aids)
    testres = test_result.combine_testres_list(ibs, testres_list)

    print(testres)

    return ibs, testres
Пример #23
0
def testdata_special_query(dbname=None):
    """ test data for special query doctests """
    import ibeis
    if dbname is None:
        dbname = 'testdb1'
    # build test data
    ibs = ibeis.opendb(dbname)
    #ibs = ibeis.opendb('PZ_MTEST')
    valid_aids = ibs.get_valid_aids(species='zebra_plains')
    return ibs, valid_aids
Пример #24
0
def testdata_newqreq(defaultdb):
    """
    Returns:
        (ibeis.IBEISController, list, list)
    """
    import ibeis
    ibs = ibeis.opendb(defaultdb=defaultdb)
    qaid_list = [1]
    daid_list = [1, 2, 3, 4, 5]
    return ibs, qaid_list, daid_list
Пример #25
0
def learn_k():
    r"""
    CommandLine:
        python -m ibeis.other.optimize_k --test-learn_k
        python -m ibeis.other.optimize_k --test-learn_k --show
        python -m ibeis.other.optimize_k --test-learn_k --show --dummy

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.other.optimize_k import *  # NOQA
        >>> import plottool as pt
        >>> # build test data
        >>> # execute function
        >>> known_nd_data, known_target_points, given_data_dims, opt_model_params = learn_k()
        >>> # verify results
        >>> ut.quit_if_noshow()
        >>> plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params)
        >>> pt.all_figures_bring_to_front()
        >>> pt.show_if_requested()
    """
    # Compute Training Data
    varydict = {
        #'K': [4, 7, 10, 13, 16, 19, 22, 25][:4],
        #'K': [1, 2, 3, 4, 8, 10, 13, 15],
        'K': [1, 2, 4, 8, 16],
        #'nDaids': [20, 100, 250, 500, 750, 1000],
    }
    nDaids_basis = [20, 30, 50, 75, 100, 200, 250, 300, 325, 350, 400, 500, 600, 750, 800, 900, 1000, 1500]
    DUMMY = ut.get_argflag('--dummy')
    if DUMMY:
        nDaids_list, K_list, nError_list = test_training_data(varydict, nDaids_basis)
        nError_list = nError_list.astype(np.float32) / nError_list.max()
    else:
        dbname = ut.get_argval('--db', default='PZ_Master0')
        ibs = ibeis.opendb(dbname)
        verbose = False
        qaids, daids_list = collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=verbose)
        nDaids_list, K_list, nError_list = evaluate_training_data(ibs, qaids, daids_list, varydict, nDaids_basis, verbose=verbose)
        nError_list = nError_list.astype(np.float32) / len(qaids)
        print('\nFinished Get Training Data')
        print('len(qaids) = %r' % (len(qaids)))
        print(ut.get_stats_str(nError_list))

    #unique_nDaids = np.unique(nDaids_list)

    # Alias to general optimization problem
    known_nd_data = np.vstack([nDaids_list, K_list]).T
    known_target_points = nError_list
    # Mark the data we are given vs what we want to learn
    given_data_dims = [0]
    #learn_data_dims = [1]

    # Minimize K params
    opt_model_params = minimize_compute_K_params(known_nd_data, known_target_points, given_data_dims)
    return known_nd_data, known_target_points, given_data_dims, opt_model_params
Пример #26
0
def testdata_qreq():
    """
    Returns:
        (ibeis.QueryRequest, ibeis.IBEISController)
    """
    import ibeis
    qaid_list = [1, 2]
    daid_list = [1, 2, 3, 4, 5]
    ibs = ibeis.opendb(db='testdb1')
    qreq_ = new_ibeis_query_request(ibs, qaid_list, daid_list)
    return qreq_, ibs
Пример #27
0
 def __setstate__(request, state_dict):
     import ibeis
     dbdir = state_dict['dbdir']
     del state_dict['dbdir']
     params = state_dict['params']
     depc = ibeis.opendb(dbdir=dbdir, web=False).depc
     configclass = depc.configclass_dict[state_dict['tablename'] ]
     config = configclass(**params)
     state_dict['depc'] = depc
     state_dict['config'] = config
     request.__dict__.update(state_dict)
Пример #28
0
def test_openworkdirs():
    """
    problems:
        PZ_DanExt_All
        PZ_DanExt_Test
        GZ_March2012
        Wildebeest_ONLY_MATCHES

    python dev.py --convert --dbdir /raid/work/PZ_Marianne --force-delete
    python dev.py --convert --dbdir /raid/work/SL_Siva --force-delete
    python dev.py --convert --dbdir /raid/work/PZ_SweatwaterSmall --force-delete
    """
    canskip = [
        '/raid/work/NAUT_test2', '/raid/work/WD_Siva',
        '/raid/work/PZ_FlankHack', '/raid/work/PZ_Mothers',
        '/raid/work/GZ_Foals', '/raid/work/PZ_MTEST', '/raid/work/GIR_Tanya',
        '/raid/work/GZ_Siva', '/raid/work/Wildebeest', '/raid/work/sonograms',
        '/raid/work/MISC_Jan12', '/raid/work/GZ_Master0',
        '/raid/work/LF_OPTIMIZADAS_NI_V_E', '/raid/work/LF_Bajo_bonito',
        '/raid/work/Frogs', '/raid/work/GZ_ALL', '/raid/work/JAG_Kelly',
        '/raid/work/NAUT_test (copy)', '/raid/work/WS_hard',
        '/raid/work/WY_Toads', '/raid/work/NAUT_Dan',
        '/raid/work/LF_WEST_POINT_OPTIMIZADAS', '/raid/work/Seals',
        '/raid/work/Rhinos_Stewart', '/raid/work/Elephants_Stewart',
        '/raid/work/NAUT_test',
    ]
    import ibeis
    from ibeis.init import sysres
    import os
    import utool as ut  # NOQA
    from os.path import join
    from ibeis.dbio import ingest_hsdb
    import ibeis.other.dbinfo
    ibeis.other.dbinfo.rrr()
    workdir = sysres.get_workdir()
    dbname_list = os.listdir(workdir)
    dbpath_list = [join(workdir, name) for name in dbname_list]
    is_hsdb_list    = list(map(ingest_hsdb.is_hsdb, dbpath_list))
    hsdb_list = ut.compress(dbpath_list, is_hsdb_list)
    #is_ibs_cvt_list = np.array(list(map(is_succesful_convert, dbpath_list)))
    regen_cmds = []
    for hsdb_dpath in hsdb_list:
        if hsdb_dpath in canskip:
            continue
        try:
            ibs = ibeis.opendb(hsdb_dpath)  # NOQA
            print('Succesfully opened hsdb: ' + hsdb_dpath)
            print(ibs.get_dbinfo_str())
        except Exception as ex:
            ut.printex(ex, 'Failed to convert hsdb: ' + hsdb_dpath)
            regen_cmd = 'python dev.py --convert --dbdir ' + hsdb_dpath
            regen_cmds.append(regen_cmd)
    print('\n'.join(regen_cmds))
def export_images(ibs, gid_list, new_dbdir_):
    """ See ibeis/tests/test_ibs_export.py """
    from ibeis.dbio import export_subset
    print('[ibsfuncs] exporting to new_dbdir_=%r' % new_dbdir_)
    print('[ibsfuncs] opening database')
    ibs_dst = ibeis.opendb(dbdir=new_dbdir_, allow_newdir=True)
    print('[ibsfuncs] begining transfer')
    ibs_src = ibs
    gid_list1 = gid_list
    aid_list1 = None
    include_image_annots = True
    status = export_subset.execute_transfer(ibs_src, ibs_dst, gid_list1,
                                            aid_list1, include_image_annots)
    return status
Пример #30
0
def test_open_to_convert():
    r"""
    CommandLine:
        python -m ibeis.dbio.ingest_hsdb --test-test_open_to_convert

    Example:
        >>> # VERY_UNSTABLE_DOCTEST
        >>> from ibeis.dbio.ingest_hsdb import *  # NOQA
        >>> result = test_open_to_convert()
        >>> print(result)
    """
    import ibeis
    hsdb_dir = testdata_ensure_unconverted_hsdb()
    ibs = ibeis.opendb(dbdir=hsdb_dir)
    ibs.print_cachestats_str()
Пример #31
0
def augment_nnindexer_experiment():
    """

    References:
        http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/

    CommandLine:
        utprof.py -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show


        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show

        # RUNS THE SEGFAULTING CASE
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        # Debug it
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6


    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots._neighbor_experiment import *  # NOQA
        >>> # execute function
        >>> augment_nnindexer_experiment()
        >>> # verify results
        >>> ut.show_if_requested()

    """
    import ibeis
    # build test data
    #ibs = ibeis.opendb('PZ_MTEST')
    ibs = ibeis.opendb(defaultdb='PZ_Master0')
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        addition_stride = 4
        max_ceiling = 100
    elif ibs.get_dbname() == 'PZ_Master0':
        initial = 128
        #addition_stride = 64
        #addition_stride = 128
        addition_stride = 256
        max_ceiling = 10000
        #max_ceiling = 4000
        #max_ceiling = 2000
        #max_ceiling = 600
    else:
        assert False
    all_daids = ibs.get_valid_aids(species='zebra_plains')
    qreq_ = ibs.new_query_request(all_daids, all_daids)
    max_num = min(max_ceiling, len(all_daids))

    # Clear Caches
    ibs.delete_flann_cachedir()
    neighbor_index_cache.clear_memcache()
    neighbor_index_cache.clear_uuid_cache(qreq_)

    # Setup
    all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    # ensure all features are computed

    nnindexer_list = []
    addition_lbl = 'Addition'
    _addition_iter = list(range(initial + 1, max_num, addition_stride))
    addition_iter = iter(ut.ProgressIter(_addition_iter, lbl=addition_lbl,
                                         freq=1, autoadjust=False))
    time_list_addition = []
    #time_list_reindex = []
    addition_count_list = []
    tmp_cfgstr_list = []

    #for _ in range(80):
    #    next(addition_iter)
    try:
        memtrack = ut.MemoryTracker(disable=False)
        for count in addition_iter:
            aid_list_ = all_randomize_daids_[0:count]
            # Request an indexer which could be an augmented version of an existing indexer.
            with ut.Timer(verbose=False) as t:
                memtrack.report('BEFORE AUGMENT')
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(qreq_, aid_list_)
                memtrack.report('AFTER AUGMENT')
            nnindexer_list.append(nnindexer_)
            addition_count_list.append(count)
            time_list_addition.append(t.ellapsed)
            tmp_cfgstr_list.append(nnindexer_.cfgstr)
            print('===============\n\n')
        print(ut.repr2(time_list_addition))
        print(ut.repr2(list(map(id, nnindexer_list))))
        print(ut.repr2(tmp_cfgstr_list))
        print(ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))

        IS_SMALL = False

        if IS_SMALL:
            nnindexer_list = []
        reindex_label = 'Reindex'
        # go backwards for reindex
        _reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1]
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label)
        time_list_reindex = []
        #time_list_reindex = []
        reindex_count_list = []

        for count in reindex_iter:
            print('\n+===PREDONE====================\n')
            # check only a single size for memory leaks
            #count = max_num // 16 + ((x % 6) * 1)
            #x += 1

            aid_list_ = all_randomize_daids_[0:count]
            # Call the same code, but force rebuilds
            memtrack.report('BEFORE REINDEX')
            with ut.Timer(verbose=False) as t:
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(
                    qreq_, aid_list_, force_rebuild=True, memtrack=memtrack)
            memtrack.report('AFTER REINDEX')
            ibs.print_cachestats_str()
            print('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' % (
                len(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.UUID_MAP_CACHE),))
            print('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_))
            memtrack.report_type(neighbor_index_cache.NeighborIndex)
            ut.print_object_size_tree(nnindexer_, lbl='nnindexer_')
            if IS_SMALL:
                nnindexer_list.append(nnindexer_)
            reindex_count_list.append(count)
            time_list_reindex.append(t.ellapsed)
            #import cv2
            #import matplotlib as mpl
            #print(mem_top.mem_top(limit=30, width=120,
            #                      #exclude_refs=[cv2.__dict__, mpl.__dict__]
            #     ))
            print('L___________________\n\n\n')
        print(ut.repr2(time_list_reindex))
        if IS_SMALL:
            print(ut.repr2(list(map(id, nnindexer_list))))
            print(ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))
    except KeyboardInterrupt:
            print('\n[train] Caught CRTL+C')
            resolution = ''
            from six.moves import input
            while not (resolution.isdigit()):
                print('\n[train] What do you want to do?')
                print('[train]     0 - Continue')
                print('[train]     1 - Embed')
                print('[train]  ELSE - Stop network training')
                resolution = input('[train] Resolution: ')
            resolution = int(resolution)
            # We have a resolution
            if resolution == 0:
                print('resuming training...')
            elif resolution == 1:
                ut.embed()

    import plottool_ibeis as pt

    next_fnum = iter(range(0, 1)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if len(addition_count_list) > 0:
        pt.plot2(addition_count_list, time_list_addition, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=addition_lbl + ' Time')

    if len(reindex_count_list) > 0:
        pt.plot2(reindex_count_list, time_list_reindex, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=reindex_label + ' Time')

    pt.set_figtitle('Augmented indexer experiment')

    pt.legend()
Пример #32
0
def flann_add_time_experiment():
    """
    builds plot of number of annotations vs indexer build time.

    TODO: time experiment

    CommandLine:
        python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show
        python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show
        utprof.py -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show

        valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots._neighbor_experiment import *  # NOQA
        >>> import ibeis
        >>> #ibs = ibeis.opendb('PZ_MTEST')
        >>> result = flann_add_time_experiment()
        >>> # verify results
        >>> print(result)
        >>> ut.show_if_requested()

    """
    import ibeis
    import utool as ut
    import numpy as np
    import plottool_ibeis as pt

    def make_flann_index(vecs, flann_params):
        flann = pyflann.FLANN()
        flann.build_index(vecs, **flann_params)
        return flann

    db = ut.get_argval('--db')
    ibs = ibeis.opendb(db=db)

    # Input
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        reindex_stride = 16
        addition_stride = 4
        max_ceiling = 120
    elif ibs.get_dbname() == 'PZ_Master0':
        #ibs = ibeis.opendb(db='GZ_ALL')
        initial = 32
        reindex_stride = 32
        addition_stride = 16
        max_ceiling = 300001
    else:
        assert False
    #max_ceiling = 32
    all_daids = ibs.get_valid_aids()
    max_num = min(max_ceiling, len(all_daids))
    flann_params = vt.get_flann_params()

    # Output
    count_list,  time_list_reindex  = [], []
    count_list2, time_list_addition = [], []

    # Setup
    #all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    all_randomize_daids_ = all_daids
    # ensure all features are computed
    ibs.get_annot_vecs(all_randomize_daids_)

    def reindex_step(count, count_list, time_list_reindex):
        daids    = all_randomize_daids_[0:count]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann = make_flann_index(vecs, flann_params)  # NOQA
        count_list.append(count)
        time_list_reindex.append(t.ellapsed)

    def addition_step(count, flann, count_list2, time_list_addition):
        daids = all_randomize_daids_[count:count + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann.add_points(vecs)
        count_list2.append(count)
        time_list_addition.append(t.ellapsed)

    def make_initial_index(initial):
        daids = all_randomize_daids_[0:initial + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        flann = make_flann_index(vecs, flann_params)
        return flann

    WITH_REINDEX = not ut.get_argflag('--no-with-reindex')
    if WITH_REINDEX:
        # Reindex Part
        reindex_lbl = 'Reindexing'
        _reindex_iter = range(1, max_num, reindex_stride)
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1)
        for count in reindex_iter:
            reindex_step(count, count_list, time_list_reindex)

    # Add Part
    flann = make_initial_index(initial)
    addition_lbl = 'Addition'
    _addition_iter = range(initial + 1, max_num, addition_stride)
    addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl)
    for count in addition_iter:
        addition_step(count, flann, count_list2, time_list_addition)

    print('---')
    print('Reindex took time_list_reindex %.2s seconds' % sum(time_list_reindex))
    print('Addition took time_list_reindex  %.2s seconds' % sum(time_list_addition))
    print('---')
    statskw = dict(precision=2, newlines=True)
    print('Reindex stats ' + ut.get_stats_str(time_list_reindex, **statskw))
    print('Addition stats ' + ut.get_stats_str(time_list_addition, **statskw))

    print('Plotting')

    #with pt.FigureContext:

    next_fnum = iter(range(0, 2)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if WITH_REINDEX:
        pt.plot2(count_list, time_list_reindex, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=reindex_lbl + ' Time', dark=False)

    #pt.figure(fnum=next_fnum())
    pt.plot2(count_list2, time_list_addition, marker='-o', equal_aspect=False,
             x_label='num_annotations', label=addition_lbl + ' Time')

    pt
    pt.legend()
Пример #33
0
def testdata_ibs(defaultdb='testdb1'):
    import ibeis
    ibs = ibeis.opendb(defaultdb=defaultdb)
    config2_ = None  # qreq_.qparams
    return ibs, config2_
Пример #34
0
def ggr_random_name_splits():
    """
    CommandLine:
        python -m ibeis.viz.viz_graph2 ggr_random_name_splits --show

    Ignore:
        sshfs -o idmap=user lev:/ ~/lev

    Example:
        >>> # DISABLE_DOCTEST GGR
        >>> from ibeis.viz.viz_graph2 import *  # NOQA
        >>> ggr_random_name_splits()
    """
    import guitool as gt
    gt.ensure_qtapp()
    #nid_list = ibs.get_valid_nids(filter_empty=True)
    import ibeis
    dbdir = '/media/danger/GGR/GGR-IBEIS'
    dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath(
        '~/lev/media/danger/GGR/GGR-IBEIS')
    ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=False)

    import datetime
    day1 = datetime.date(2016, 1, 30)
    day2 = datetime.date(2016, 1, 31)

    orig_filter_kw = {
        'multiple': None,
        #'view': ['right'],
        #'minqual': 'good',
        'is_known': True,
        'min_pername': 2,
    }
    orig_aids = ibs.filter_annots_general(filter_kw=ut.dict_union(
        orig_filter_kw, {
            'min_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day1, 0.0)),
            'max_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day2, 1.0)),
        }))
    orig_all_annots = ibs.annots(orig_aids)
    orig_unique_nids, orig_grouped_annots_ = orig_all_annots.group(
        orig_all_annots.nids)
    # Ensure we get everything
    orig_grouped_annots = [
        ibs.annots(aids_) for aids_ in ibs.get_name_aids(orig_unique_nids)
    ]

    # pip install quantumrandom
    if False:
        import quantumrandom
        data = quantumrandom.uint16()
        seed = data.sum()
        print('seed = %r' % (seed, ))
        #import Crypto.Random
        #from Crypto import Random
        #quantumrandom.get_data()
        #StrongRandom = Crypto.Random.random.StrongRandom
        #aes.reseed(3340258)
        #chars = [str(chr(x)) for x in data.view(np.uint8)]
        #aes_seed = str('').join(chars)
        #aes = Crypto.Random.Fortuna.FortunaGenerator.AESGenerator()
        #aes.reseed(aes_seed)
        #aes.pseudo_random_data(10)

    orig_rand_idxs = ut.random_indexes(len(orig_grouped_annots), seed=3340258)
    orig_sample_size = 75
    random_annot_groups = ut.take(orig_grouped_annots, orig_rand_idxs)
    orig_annot_sample = random_annot_groups[:orig_sample_size]

    # OOOPS MADE ERROR REDO ----

    filter_kw = {
        'multiple': None,
        'view': ['right'],
        'minqual': 'good',
        'is_known': True,
        'min_pername': 2,
    }
    filter_kw_ = ut.dict_union(
        filter_kw, {
            'min_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day1, 0.0)),
            'max_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day2, 1.0)),
        })
    refiltered_sample = [
        ibs.filter_annots_general(annot.aids, filter_kw=filter_kw_)
        for annot in orig_annot_sample
    ]
    is_ok = (np.array(ut.lmap(len, refiltered_sample)) >= 2)
    ok_part_orig_sample = ut.compress(orig_annot_sample, is_ok)
    ok_part_orig_nids = [x.nids[0] for x in ok_part_orig_sample]

    # Now compute real sample
    aids = ibs.filter_annots_general(filter_kw=filter_kw_)
    all_annots = ibs.annots(aids)
    unique_nids, grouped_annots_ = all_annots.group(all_annots.nids)
    grouped_annots = grouped_annots_
    # Ensure we get everything
    #grouped_annots = [ibs.annots(aids_) for aids_ in ibs.get_name_aids(unique_nids)]

    pop = len(grouped_annots)
    pername_list = ut.lmap(len, grouped_annots)
    groups = ibeis.annots.AnnotGroups(grouped_annots, ibs)
    match_tags = [ut.unique(ut.flatten(t)) for t in groups.match_tags]
    tag_case_hist = ut.dict_hist(ut.flatten(match_tags))
    print('name_pop = %r' % (pop, ))
    print('Annots per Multiton Name' +
          ut.repr3(ut.get_stats(pername_list, use_median=True)))
    print('Name Tag Hist ' + ut.repr3(tag_case_hist))
    print('Percent Photobomb: %.2f%%' %
          (tag_case_hist['photobomb'] / pop * 100))
    print('Percent Split: %.2f%%' % (tag_case_hist['splitcase'] / pop * 100))

    # Remove the ok part from this sample
    remain_unique_nids = ut.setdiff(unique_nids, ok_part_orig_nids)
    remain_grouped_annots = [
        ibs.annots(aids_) for aids_ in ibs.get_name_aids(remain_unique_nids)
    ]

    sample_size = 75
    import vtool as vt
    vt.calc_sample_from_error_bars(.05, pop, conf_level=.95, prior=.05)

    remain_rand_idxs = ut.random_indexes(len(remain_grouped_annots),
                                         seed=3340258)
    remain_sample_size = sample_size - len(ok_part_orig_nids)
    remain_random_annot_groups = ut.take(remain_grouped_annots,
                                         remain_rand_idxs)
    remain_annot_sample = remain_random_annot_groups[:remain_sample_size]

    annot_sample_nofilter = ok_part_orig_sample + remain_annot_sample
    # Filter out all bad parts
    annot_sample_filter = [
        ibs.annots(ibs.filter_annots_general(annot.aids, filter_kw=filter_kw_))
        for annot in annot_sample_nofilter
    ]
    annot_sample = annot_sample_filter

    win = None
    from ibeis.viz import viz_graph2
    for annots in ut.InteractiveIter(annot_sample):
        if win is not None:
            win.close()
        win = viz_graph2.make_qt_graph_interface(ibs,
                                                 aids=annots.aids,
                                                 init_mode='rereview')
        print(win)

    sample_groups = ibeis.annots.AnnotGroups(annot_sample, ibs)

    flat_tags = [ut.unique(ut.flatten(t)) for t in sample_groups.match_tags]

    print('Using Split and Photobomb')
    is_positive = ['photobomb' in t or 'splitcase' in t for t in flat_tags]
    num_positive = sum(is_positive)
    vt.calc_error_bars_from_sample(sample_size,
                                   num_positive,
                                   pop,
                                   conf_level=.95)

    print('Only Photobomb')
    is_positive = ['photobomb' in t for t in flat_tags]
    num_positive = sum(is_positive)
    vt.calc_error_bars_from_sample(sample_size,
                                   num_positive,
                                   pop,
                                   conf_level=.95)

    print('Only SplitCase')
    is_positive = ['splitcase' in t for t in flat_tags]
    num_positive = sum(is_positive)
    vt.calc_error_bars_from_sample(sample_size,
                                   num_positive,
                                   pop,
                                   conf_level=.95)
Пример #35
0
    def _precollect(self):
        """
        Sets up an ibs object with an aids_pool

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.scripts.thesis import *
            >>> self = Chap3('humpbacks_fb')
            >>> self = Chap3('GZ_Master1')
            >>> self = Chap3('GIRM_Master1')
            >>> self = Chap3('PZ_MTEST')
            >>> self = Chap3('PZ_PB_RF_TRAIN')
            >>> self = Chap3('PZ_Master1')
            >>> self = Chap3('RotanTurtles')
            >>> self._precollect()

            >>> from ibeis.scripts.thesis import *
            >>> self = Chap4('PZ_Master1')
            >>> self._precollect()
        """
        import ibeis
        from ibeis.init import main_helpers
        self.dbdir = ibeis.sysres.lookup_dbdir(self.dbname)
        ibs = ibeis.opendb(dbdir=self.dbdir)
        if ibs.dbname.startswith('PZ_PB_RF_TRAIN'):
            aids = ibs.get_valid_aids()
        elif ibs.dbname.startswith('LF_ALL'):
            aids = ibs.get_valid_aids()
        elif ibs.dbname.startswith('PZ_Master'):
            # PZ_Master is too big to run in full.  Select a smaller sample.
            # Be sure to include photobomb and incomparable cases.
            aids = ibs.filter_annots_general(
                require_timestamp=True, species='primary', is_known=True,
                minqual='poor',
            )
            infr = ibeis.AnnotInference(ibs=ibs, aids=aids)
            infr.reset_feedback('staging', apply=True)
            minority_ccs = find_minority_class_ccs(infr)
            minority_aids = set(ut.flatten(minority_ccs))

            # We need to do our best to select a small sample here
            flags = ['left' in text for text in ibs.annots(aids).viewpoint_code]
            left_aids = ut.compress(aids, flags)

            majority_aids = set(ibs.filter_annots_general(
                left_aids, require_timestamp=True, species='primary',
                minqual='poor', require_quality=True, min_pername=2,
                max_pername=15
            ))
            # This produces 5720 annotations
            aids = sorted(majority_aids.union(minority_aids))
        else:
            aids = ibs.filter_annots_general(require_timestamp=True,
                                             is_known=True,
                                             species='primary',
                                             minqual='poor')

        if ibs.dbname.startswith('MantaMatcher'):
            # Remove some of the singletons for this db
            annots = ibs.annots(aids)
            names = annots.group2(annots.nids)
            multis = [aids for aids in names if len(aids) > 1]
            singles = [aids for aids in names if len(aids) == 1]
            rng = np.random.RandomState(3988708794)
            aids = ut.flatten(multis)
            aids += ut.shuffle(ut.flatten(singles), rng=rng)[0:358]

        # ibs.print_annot_stats(aids, prefix='P')
        main_helpers.monkeypatch_encounters(ibs, aids, minutes=30)
        print('post monkey patch')
        # if False:
        #     ibs.print_annot_stats(aids, prefix='P')
        self.ibs = ibs
        self.aids_pool = aids