예제 #1
0
def filter_duplicate_acfgs(expanded_aids_list, acfg_list, acfg_name_list, verbose=None):
    """
    Removes configs with the same expanded aids list

    CommandLine:
        # The following will trigger this function:
        wbia -m wbia get_annotcfg_list:0 -a timectrl timectrl:view=left --db PZ_MTEST

    """
    from wbia.expt import annotation_configs

    if verbose is None:
        verbose = ut.VERBOSE
    acfg_list_ = []
    expanded_aids_list_ = []
    seen_ = ut.ddict(list)
    for acfg, (qaids, daids) in zip(acfg_list, expanded_aids_list):
        key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids'))
        if key in seen_:
            seen_[key].append(acfg)
            continue
        else:
            seen_[key].append(acfg)
            expanded_aids_list_.append((qaids, daids))
            acfg_list_.append(acfg)
    if verbose:
        duplicate_configs = dict(
            [(key_, val_) for key_, val_ in seen_.items() if len(val_) > 1]
        )
        if len(duplicate_configs) > 0:
            logger.info('The following configs produced duplicate annnotation configs')
            for key, val in duplicate_configs.items():
                # Print the difference between the duplicate configs
                _tup = annotation_configs.compress_acfg_list_for_printing(val)
                nonvaried_compressed_dict, varied_compressed_dict_list = _tup
                logger.info('+--')
                logger.info('key = %r' % (key,))
                logger.info(
                    'duplicate_varied_cfgs = %s'
                    % (ut.repr2(varied_compressed_dict_list),)
                )
                logger.info(
                    'duplicate_nonvaried_cfgs = %s'
                    % (ut.repr2(nonvaried_compressed_dict),)
                )
                logger.info('L__')

        if verbose >= 1:
            logger.info(
                '[harn.help] parsed %d / %d unique annot configs'
                % (len(acfg_list_), len(acfg_list))
            )
        if verbose > 2:
            logger.info('[harn.help] parsed from: %r' % (acfg_name_list,))
    return expanded_aids_list_, acfg_list_
예제 #2
0
def get_flann_cfgstr(dpts,
                     flann_params,
                     cfgstr='',
                     use_params_hash=True,
                     use_data_hash=True):
    """

    CommandLine:
        python -m vtool.nearest_neighbors --test-get_flann_cfgstr

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.nearest_neighbors import *  # NOQA
        >>> rng = np.random.RandomState(1)
        >>> dpts = rng.randint(0, 255, (10, 128)).astype(np.uint8)
        >>> cache_dir = '.'
        >>> cfgstr = '_FEAT(alg=heshes)'
        >>> flann_params = get_kdtree_flann_params()
        >>> result = get_flann_cfgstr(dpts, flann_params, cfgstr)
        >>> print(result)
        _FEAT(alg=heshes)_FLANN(4kdtree)_DPTS((10,128)xxaotseonmfjkzcr)
    """
    flann_cfgstr = cfgstr
    if use_params_hash:
        flann_valsig = get_flann_params_cfgstr(flann_params)
        flann_cfgstr += '_FLANN(' + flann_valsig + ')'
    # Generate a unique filename for dpts and flann parameters
    if use_data_hash:
        # flann is dependent on the dpts
        data_hashstr = utool.hashstr_arr27(dpts, '_DPTS')
        flann_cfgstr += data_hashstr
    return flann_cfgstr
예제 #3
0
def get_flann_cfgstr(dpts, flann_params, cfgstr='', use_params_hash=True,
                     use_data_hash=True):
    """

    CommandLine:
        python -m vtool.nearest_neighbors --test-get_flann_cfgstr

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.nearest_neighbors import *  # NOQA
        >>> rng = np.random.RandomState(1)
        >>> dpts = rng.randint(0, 255, (10, 128)).astype(np.uint8)
        >>> cache_dir = '.'
        >>> cfgstr = '_FEAT(alg=heshes)'
        >>> flann_params = get_kdtree_flann_params()
        >>> result = get_flann_cfgstr(dpts, flann_params, cfgstr)
        >>> print(result)
        _FEAT(alg=heshes)_FLANN(4kdtree)_DPTS((10,128)xxaotseonmfjkzcr)
    """
    flann_cfgstr = cfgstr
    if use_params_hash:
        flann_valsig = get_flann_params_cfgstr(flann_params)
        flann_cfgstr += '_FLANN(' + flann_valsig + ')'
    # Generate a unique filename for dpts and flann parameters
    if use_data_hash:
        # flann is dependent on the dpts
        data_hashstr = utool.hashstr_arr27(dpts, '_DPTS')
        flann_cfgstr += data_hashstr
    return flann_cfgstr
예제 #4
0
파일: base.py 프로젝트: SU-ECE-18-7/dtool
 def _get_rootset_hashid(request, root_rowids, prefix):
     uuid_type = 'V'
     label = ''.join((prefix, uuid_type, 'UUIDS'))
     # Hack: allow general specification of uuid types
     uuid_list = request.depc.get_root_uuid(root_rowids)
     #uuid_hashid = ut.hashstr_arr27(uuid_list, label, pathsafe=True)
     uuid_hashid = ut.hashstr_arr27(uuid_list, label, pathsafe=False)
     return uuid_hashid
예제 #5
0
파일: base.py 프로젝트: Erotemic/dtool
 def _get_rootset_hashid(request, root_rowids, prefix):
     uuid_type = 'V'
     label = ''.join((prefix, uuid_type, 'UUIDS'))
     # Hack: allow general specification of uuid types
     uuid_list = request.depc.get_root_uuid(root_rowids)
     #uuid_hashid = ut.hashstr_arr27(uuid_list, label, pathsafe=True)
     uuid_hashid = ut.hashstr_arr27(uuid_list, label, pathsafe=False)
     return uuid_hashid
예제 #6
0
def filter_duplicate_acfgs(expanded_aids_list, acfg_list, acfg_name_list, verbose=ut.NOT_QUIET):
    """
    Removes configs with the same expanded aids list

    CommandLine:
        # The following will trigger this function:
        ibeis -e print_acfg -a timectrl timectrl:view=left --db PZ_MTEST

    """
    from ibeis.expt import annotation_configs
    acfg_list_ = []
    expanded_aids_list_ = []
    seen_ = ut.ddict(list)
    for acfg, (qaids, daids) in zip(acfg_list, expanded_aids_list):
        key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids'))
        if key in seen_:
            seen_[key].append(acfg)
            continue
        else:
            seen_[key].append(acfg)
            expanded_aids_list_.append((qaids, daids))
            acfg_list_.append(acfg)
    if verbose:
        duplicate_configs = dict(
            [(key_, val_) for key_, val_ in seen_.items() if len(val_) > 1])
        if len(duplicate_configs) > 0:
            print('The following configs produced duplicate annnotation configs')
            for key, val in duplicate_configs.items():
                # Print the semantic difference between the duplicate configs
                _tup = annotation_configs.compress_acfg_list_for_printing(val)
                nonvaried_compressed_dict, varied_compressed_dict_list = _tup
                print('+--')
                print('key = %r' % (key,))
                print('duplicate_varied_cfgs = %s' % (
                    ut.list_str(varied_compressed_dict_list),))
                print('duplicate_nonvaried_cfgs = %s' % (
                    ut.dict_str(nonvaried_compressed_dict),))
                print('L__')

        print('[harn.help] parsed %d / %d unique annot configs from: %r' % (
            len(acfg_list_), len(acfg_list), acfg_name_list))
    return expanded_aids_list_, acfg_list_
예제 #7
0
파일: harness.py 프로젝트: whaozl/ibeis
def get_big_test_cache_info(ibs, cfgx2_qreq_):
    """
    Args:
        ibs (ibeis.IBEISController):
        cfgx2_qreq_ (dict):
    """
    if ut.is_developer():
        import ibeis
        repodir = dirname(ut.get_module_dir(ibeis))
        bt_cachedir = join(repodir, 'BIG_TEST_CACHE2')
    else:
        bt_cachedir = join(ibs.get_cachedir(), 'BIG_TEST_CACHE2')
        #bt_cachedir = './localdata/BIG_TEST_CACHE2'
    ut.ensuredir(bt_cachedir)
    bt_cachestr = ut.hashstr_arr27(
        [qreq_.get_cfgstr(with_input=True) for qreq_ in cfgx2_qreq_],
        ibs.get_dbname() + '_cfgs')
    bt_cachename = 'BIGTESTCACHE2'
    return bt_cachedir, bt_cachename, bt_cachestr
예제 #8
0
파일: harness.py 프로젝트: Erotemic/ibeis
def get_big_test_cache_info(ibs, cfgx2_qreq_):
    """
    Args:
        ibs (ibeis.IBEISController):
        cfgx2_qreq_ (dict):
    """
    if ut.is_developer():
        import ibeis
        repodir = dirname(ut.get_module_dir(ibeis))
        bt_cachedir = join(repodir, 'BIG_TEST_CACHE2')
    else:
        bt_cachedir = join(ibs.get_cachedir(), 'BIG_TEST_CACHE2')
        #bt_cachedir = './localdata/BIG_TEST_CACHE2'
    ut.ensuredir(bt_cachedir)
    bt_cachestr = ut.hashstr_arr27([
        qreq_.get_cfgstr(with_input=True)
        for qreq_ in cfgx2_qreq_],
        ibs.get_dbname() + '_cfgs')
    bt_cachename = 'BIGTESTCACHE2'
    return bt_cachedir, bt_cachename, bt_cachestr
예제 #9
0
def print_acfg_list(acfg_list, expanded_aids_list=None, ibs=None,
                    combined=False, **kwargs):
    r"""
    Args:
        acfg_list (list):
        expanded_aids_list (list): (default = None)
        ibs (IBEISController):  ibeis controller object(default = None)
        combined (bool): (default = False)

    CommandLine:
        python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.annotation_configs import *  # NOQA
        >>> import ibeis
        >>> acfg_list = '?'
        >>> expanded_aids_list = None
        >>> ibs = None
        >>> combined = False
        >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    _tup = compress_acfg_list_for_printing(acfg_list)
    nonvaried_compressed_dict, varied_compressed_dict_list = _tup

    ut.colorprint('+=== <Info acfg_list> ===', 'white')
    #print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
    print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict))
    seen_ = ut.ddict(list)

    # get default kwkeys for annot info
    if ibs is not None:
        annotstats_kw = kwargs.copy()
        kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
        annotstats_kw.update(ut.argparse_dict(
            dict(zip(kwkeys, [None] * len(kwkeys))), only_specified=True))

    for acfgx in range(len(acfg_list)):
        acfg = acfg_list[acfgx]
        title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] +
                 ' d_cfgname=' + acfg['dcfg']['_cfgname'])

        ut.colorprint('+--- acfg %d / %d -- %s ---- ' %
                      (acfgx + 1, len(acfg_list), title), 'lightgray')
        print('acfg = ' + ut.dict_str(varied_compressed_dict_list[acfgx],
                                      strvals=True))

        if expanded_aids_list is not None:
            qaids, daids = expanded_aids_list[acfgx]
            key = (ut.hashstr_arr27(qaids, 'qaids'),
                   ut.hashstr_arr27(daids, 'daids'))
            if key not in seen_:
                if ibs is not None:
                    seen_[key].append(acfgx)
                    annotconfig_stats_strs, _ = ibs.get_annotconfig_stats(
                        qaids, daids, verbose=True, combined=combined,
                        **annotstats_kw)
            else:
                dupindex = seen_[key]
                print('DUPLICATE of index %r' % (dupindex,))
                dupdict = varied_compressed_dict_list[dupindex[0]]
                print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True))
    ut.colorprint('L___ </Info acfg_list> ___', 'white')
def bigcache_vsone(qreq_, hyper_params):
    """
    Cached output of one-vs-one matches

        >>> from wbia.scripts.script_vsone import *  # NOQA
        >>> self = OneVsOneProblem()
        >>> qreq_ = self.qreq_
        >>> hyper_params = self.hyper_params
    """
    import vtool as vt
    import wbia

    # Get a set of training pairs
    ibs = qreq_.ibs
    cm_list = qreq_.execute()
    infr = wbia.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)

    # Per query choose a set of correct, incorrect, and random training pairs
    aid_pairs_ = infr._cm_training_pairs(
        rng=np.random.RandomState(42), **hyper_params.pair_sample
    )

    aid_pairs_ = vt.unique_rows(np.array(aid_pairs_), directed=False).tolist()

    pb_aid_pairs_ = photobomb_samples(ibs)

    # TODO: try to add in more non-comparable samples
    aid_pairs_ = pb_aid_pairs_ + aid_pairs_
    aid_pairs_ = vt.unique_rows(np.array(aid_pairs_))

    # ======================================
    # Compute one-vs-one scores and local_measures
    # ======================================

    # Prepare lazy attributes for annotations
    qreq_ = infr.qreq_
    ibs = qreq_.ibs
    qconfig2_ = qreq_.extern_query_config2
    dconfig2_ = qreq_.extern_data_config2
    qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_)
    dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_)

    # Remove any pairs missing features
    if dannot_cfg == qannot_cfg:
        unique_annots = ibs.annots(np.unique(np.array(aid_pairs_)), config=dannot_cfg)
        bad_aids = unique_annots.compress(~np.array(unique_annots.num_feats) > 0).aids
        bad_aids = set(bad_aids)
    else:
        annots1_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 0)), config=qannot_cfg)
        annots2_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 1)), config=dannot_cfg)
        bad_aids1 = annots1_.compress(~np.array(annots1_.num_feats) > 0).aids
        bad_aids2 = annots2_.compress(~np.array(annots2_.num_feats) > 0).aids
        bad_aids = set(bad_aids1 + bad_aids2)
    subset_idxs = np.where(
        [not (a1 in bad_aids or a2 in bad_aids) for a1, a2 in aid_pairs_]
    )[0]
    # Keep only a random subset
    if hyper_params.subsample:
        rng = np.random.RandomState(3104855634)
        num_max = hyper_params.subsample
        if num_max < len(subset_idxs):
            subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)
            subset_idxs = sorted(subset_idxs)

    # Take the current selection
    aid_pairs = ut.take(aid_pairs_, subset_idxs)

    if True:
        # NEW WAY
        config = hyper_params.vsone_assign
        # TODO: ensure annot probs like chips and features can be appropriately
        # set via qreq_ config or whatever
        matches = infr.exec_vsone_subset(aid_pairs, config=config)
    else:
        query_aids = ut.take_column(aid_pairs, 0)
        data_aids = ut.take_column(aid_pairs, 1)
        # OLD WAY
        # Determine a unique set of annots per config
        configured_aids = ut.ddict(set)
        configured_aids[qannot_cfg].update(query_aids)
        configured_aids[dannot_cfg].update(data_aids)

        # Make efficient annot-object representation
        configured_obj_annots = {}
        for config, aids in configured_aids.items():
            annots = ibs.annots(sorted(list(aids)), config=config)
            configured_obj_annots[config] = annots

        annots1 = configured_obj_annots[qannot_cfg].loc(query_aids)
        annots2 = configured_obj_annots[dannot_cfg].loc(data_aids)

        # Get hash based on visual annotation appearence of each pair
        # as well as algorithm configurations used to compute those properties
        qvuuids = annots1.visual_uuids
        dvuuids = annots2.visual_uuids
        qcfgstr = annots1._config.get_cfgstr()
        dcfgstr = annots2._config.get_cfgstr()
        annots_cfgstr = ut.hashstr27(qcfgstr) + ut.hashstr27(dcfgstr)
        vsone_uuids = [
            ut.combine_uuids(uuids, salt=annots_cfgstr)
            for uuids in ut.ProgIter(
                zip(qvuuids, dvuuids), length=len(qvuuids), label='hashing ids'
            )
        ]

        # Combine into a big cache for the entire 1-v-1 matching run
        big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True)
        cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train')

        cached_data = cacher.tryload()
        if cached_data is not None:
            # Caching doesn't work 100% for PairwiseMatch object, so we need to do
            # some postprocessing
            configured_lazy_annots = ut.ddict(dict)
            for config, annots in configured_obj_annots.items():
                annot_dict = configured_lazy_annots[config]
                for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):
                    annot_dict[_annot.aid] = _annot._make_lazy_dict()

            # Extract pairs of annot objects (with shared caches)
            lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)
            lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)

            # Create a set of PairwiseMatches with the correct annot properties
            matches = [
                vt.PairwiseMatch(annot1, annot2)
                for annot1, annot2 in zip(lazy_annots1, lazy_annots2)
            ]

            # Updating a new matches dictionary ensure the annot1/annot2 properties
            # are set correctly
            for key, cached_matches in list(cached_data.items()):
                fixed_matches = [match.copy() for match in matches]
                for fixed, internal in zip(fixed_matches, cached_matches):
                    dict_ = internal.__dict__
                    ut.delete_dict_keys(dict_, ['annot1', 'annot2'])
                    fixed.__dict__.update(dict_)
                cached_data[key] = fixed_matches
        else:
            cached_data = vsone_(
                qreq_,
                query_aids,
                data_aids,
                qannot_cfg,
                dannot_cfg,
                configured_obj_annots,
                hyper_params,
            )
            cacher.save(cached_data)
        # key_ = 'SV_LNBNN'
        key_ = 'RAT_SV'
        # for key in list(cached_data.keys()):
        #     if key != 'SV_LNBNN':
        #         del cached_data[key]
        matches = cached_data[key_]
    return matches, infr
예제 #11
0
def print_acfg_list(acfg_list,
                    expanded_aids_list=None,
                    ibs=None,
                    combined=False,
                    **kwargs):
    r"""
    Args:
        acfg_list (list):
        expanded_aids_list (list): (default = None)
        ibs (IBEISController):  ibeis controller object(default = None)
        combined (bool): (default = False)

    CommandLine:
        python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.expt.annotation_configs import *  # NOQA
        >>> import ibeis
        >>> acfg_list = '?'
        >>> expanded_aids_list = None
        >>> ibs = None
        >>> combined = False
        >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    _tup = compress_acfg_list_for_printing(acfg_list)
    nonvaried_compressed_dict, varied_compressed_dict_list = _tup

    ut.colorprint('+=== <Info acfg_list> ===', 'white')
    #print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
    print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict))
    seen_ = ut.ddict(list)

    # get default kwkeys for annot info
    if ibs is not None:
        annotstats_kw = kwargs.copy()
        kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
        annotstats_kw.update(
            ut.argparse_dict(dict(zip(kwkeys, [None] * len(kwkeys))),
                             only_specified=True))

    hashid_list = []
    for acfgx in range(len(acfg_list)):
        acfg = acfg_list[acfgx]
        title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] + ' d_cfgname=' +
                 acfg['dcfg']['_cfgname'])

        ut.colorprint(
            '+--- acfg %d / %d -- %s ---- ' %
            (acfgx + 1, len(acfg_list), title), 'lightgray')
        print('acfg = ' +
              ut.dict_str(varied_compressed_dict_list[acfgx], strvals=True))

        if expanded_aids_list is not None:
            qaids, daids = expanded_aids_list[acfgx]
            key = (ut.hashstr_arr27(qaids,
                                    'qaids'), ut.hashstr_arr27(daids, 'daids'))
            if key not in seen_:
                if ibs is not None:
                    seen_[key].append(acfgx)
                    stats_, locals_ = ibs.get_annotconfig_stats(
                        qaids,
                        daids,
                        verbose=False,
                        combined=combined,
                        **annotstats_kw)
                    hashids = (stats_['qaid_stats']['qhashid'],
                               stats_['daid_stats']['dhashid'])
                    hashid_list.append(hashids)
                    stats_str2 = ut.dict_str(stats_,
                                             strvals=True,
                                             newlines=True,
                                             explicit=False,
                                             nobraces=False)
                    print('annot_config_stats = ' + stats_str2)
            else:
                dupindex = seen_[key]
                print('DUPLICATE of index %r' % (dupindex, ))
                dupdict = varied_compressed_dict_list[dupindex[0]]
                print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True))
    print('hashid summary = ' + ut.list_str(hashid_list, nl=1))
    ut.colorprint('L___ </Info acfg_list> ___', 'white')
예제 #12
0
def make_single_testres(
    ibs,
    qaids,
    daids,
    pipecfg_list,
    cfgx2_lbl,
    cfgdict_list,
    lbl,
    testnameid,
    use_cache=None,
    subindexer_partial=ut.ProgIter,
):
    """
    CommandLine:
        python -m wbia run_expt
    """
    cfgslice = None
    if cfgslice is not None:
        pipecfg_list = pipecfg_list[cfgslice]

    dbname = ibs.get_dbname()

    # if ut.NOT_QUIET:
    #     logger.info('[harn] Make single testres')

    cfgx2_qreq_ = [
        ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg)
        for pipe_cfg in ut.ProgIter(
            pipecfg_list, lbl='Building qreq_', enabled=False)
    ]

    if use_cache is None:
        use_cache = USE_BIG_TEST_CACHE

    if use_cache:
        try:
            bt_cachedir = ut.ensuredir(
                (ibs.get_cachedir(), 'BULK_TEST_CACHE2'))
            cfgstr_list = [
                qreq_.get_cfgstr(with_input=True) for qreq_ in cfgx2_qreq_
            ]
            bt_cachestr = ut.hashstr_arr27(cfgstr_list,
                                           ibs.get_dbname() + '_cfgs')
            bt_cachename = 'BULKTESTCACHE2_v2'
            testres = ut.load_cache(bt_cachedir, bt_cachename, bt_cachestr)
            testres.cfgdict_list = cfgdict_list
            testres.cfgx2_lbl = cfgx2_lbl  # hack override
        except IOError:
            pass
        else:
            if ut.NOT_QUIET:
                ut.colorprint('[harn] single testres cache hit... returning',
                              'brightcyan')
            return testres

    if ibs.table_cache:
        # HACK
        prev_feat_cfgstr = None

    cfgx2_cmsinfo = []
    cfgiter = subindexer_partial(range(len(cfgx2_qreq_)),
                                 lbl='pipe config',
                                 freq=1,
                                 adjust=False)
    # Run each pipeline configuration
    for cfgx in cfgiter:
        qreq_ = cfgx2_qreq_[cfgx]
        cprint = ut.colorprint
        cprint('testnameid=%r' % (testnameid, ), 'green')
        cprint(
            'annot_cfgstr = %s' %
            (qreq_.get_cfgstr(with_input=True, with_pipe=False), ),
            'yellow',
        )
        cprint('pipe_cfgstr= %s' % (qreq_.get_cfgstr(with_data=False), ),
               'brightcyan')
        cprint('pipe_hashstr = %s' % (qreq_.get_pipe_hashid(), ), 'cyan')
        if DRY_RUN:
            continue

        indent_prefix = '[%s cfg %d/%d]' % (
            dbname,
            # cfgiter.count (doesnt work when quiet)
            (cfgiter.parent_index * cfgiter.length) + cfgx,
            cfgiter.length * cfgiter.parent_length,
        )

        with ut.Indenter(indent_prefix):
            # Run the test / read cache
            _need_compute = True
            if use_cache:
                # smaller cache for individual configuration runs
                st_cfgstr = qreq_.get_cfgstr(with_input=True)
                st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests')
                st_cachename = 'smalltest'
                ut.ensuredir(st_cachedir)
                try:
                    cmsinfo = ut.load_cache(st_cachedir, st_cachename,
                                            st_cfgstr)
                except IOError:
                    _need_compute = True
                else:
                    _need_compute = False
            if _need_compute:
                assert not ibs.table_cache
                if ibs.table_cache:
                    if len(prev_feat_cfgstr is not None
                           and prev_feat_cfgstr != qreq_.qparams.feat_cfgstr):
                        # Clear features to preserve memory
                        ibs.clear_table_cache()
                        # qreq_.ibs.print_cachestats_str()
                cm_list = qreq_.execute()
                cmsinfo = test_result.build_cmsinfo(cm_list, qreq_)
                # record previous feature configuration
                if ibs.table_cache:
                    prev_feat_cfgstr = qreq_.qparams.feat_cfgstr
                if use_cache:
                    ut.save_cache(st_cachedir, st_cachename, st_cfgstr,
                                  cmsinfo)
        if not NOMEMORY:
            # Store the results
            cfgx2_cmsinfo.append(cmsinfo)
        else:
            cfgx2_qreq_[cfgx] = None
    if ut.NOT_QUIET:
        ut.colorprint('[harn] Completed running test configurations', 'white')
    if DRY_RUN:
        logger.info('ran tests dryrun mode.')
        return
    if NOMEMORY:
        logger.info('ran tests in memory savings mode. Cannot Print. exiting')
        return
    # Store all pipeline config results in a test result object
    testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cmsinfo,
                                     cfgx2_qreq_)
    testres.testnameid = testnameid
    testres.lbl = lbl
    testres.cfgdict_list = cfgdict_list
    testres.aidcfg = None
    if use_cache:
        try:
            ut.save_cache(bt_cachedir, bt_cachename, bt_cachestr, testres)
        except Exception as ex:
            ut.printex(ex, 'error saving testres cache', iswarning=True)
            if ut.SUPER_STRICT:
                raise
    return testres
def print_acfg_list(
    acfg_list,
    expanded_aids_list=None,
    ibs=None,
    combined=False,
    only_summary=False,
    **kwargs
):
    r"""
    Args:
        acfg_list (list):
        expanded_aids_list (list): (default = None)
        ibs (IBEISController):  wbia controller object(default = None)
        combined (bool): (default = False)

    CommandLine:
        python -m wbia.expt.annotation_configs --exec-print_acfg_list

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.expt.annotation_configs import *  # NOQA
        >>> import wbia
        >>> ibs = wbia.opendb('testdb1')
        >>> a = ['default']
        >>> acfg_list, expanded_aids_list = wbia.expt.experiment_helpers.get_annotcfg_list(
        >>>     ibs, acfg_name_list=a, verbose=0)
        >>> combined = False
        >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined)
        >>> print(result)
    """
    _tup = compress_acfg_list_for_printing(acfg_list)
    nonvaried_compressed_dict, varied_compressed_dict_list = _tup

    ut.colorprint('+=== <Info acfg_list> ===', 'white')
    # logger.info('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),))
    logger.info('non-varied aidcfg = ' + ut.repr2(nonvaried_compressed_dict))
    seen_ = ut.ddict(list)

    # get default kwkeys for annot info
    if ibs is not None:
        annotstats_kw = kwargs.copy()
        kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict)
        annotstats_kw.update(
            ut.argparse_dict(dict(zip(kwkeys, [None] * len(kwkeys))), only_specified=True)
        )

    hashid_list = []
    for acfgx in range(len(acfg_list)):
        acfg = acfg_list[acfgx]
        title = (
            'q_cfgname='
            + acfg['qcfg']['_cfgname']
            + ' d_cfgname='
            + acfg['dcfg']['_cfgname']
        )

        if not only_summary:
            ut.colorprint(
                '+--- acfg %d / %d -- %s ---- ' % (acfgx + 1, len(acfg_list), title),
                'gray',
            )
            logger.info('acfg = ' + ut.repr2(varied_compressed_dict_list[acfgx], si=True))

        if expanded_aids_list is not None:
            qaids, daids = expanded_aids_list[acfgx]
            key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids'))
            if key not in seen_:
                if ibs is not None:
                    seen_[key].append(acfgx)
                    stats_ = ibs.get_annotconfig_stats(
                        qaids, daids, verbose=False, combined=combined, **annotstats_kw
                    )
                    hashids = (
                        stats_['qaid_stats']['qhashid'],
                        stats_['daid_stats']['dhashid'],
                    )
                    hashid_list.append(hashids)
                    stats_str2 = ut.repr2(
                        stats_, si=True, nl=True, explicit=False, nobraces=False
                    )
                    if not only_summary:
                        logger.info('annot_config_stats = ' + stats_str2)
            else:
                dupindex = seen_[key]
                dupdict = varied_compressed_dict_list[dupindex[0]]
                if not only_summary:
                    logger.info('DUPLICATE of index %r' % (dupindex,))
                    logger.info('DUP OF acfg = ' + ut.repr2(dupdict, si=True))
    logger.info('hashid summary = ' + ut.repr2(hashid_list, nl=1))
    ut.colorprint('L___ </Info acfg_list> ___', 'white')