Пример #1
0
        def make_cm_very_old_tuple(qres_copy):
            assert ut.listfind(qres_copy.filtkey_list, filtkey) is None
            weight_filters = hstypes.WEIGHT_FILTERS
            weight_filtxs, nonweight_filtxs = special_query.index_partition(
                qres_copy.filtkey_list, weight_filters)

            aid2_fsv = {}
            aid2_fs = {}
            aid2_score = {}

            for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
                pass
                break
                #scorex_vsone  = ut.listfind(qres_copy.filtkey_list, filtkey)
                #if scorex_vsone is None:
                # TODO: add spatial verification as a filter score
                # augment the vsone scores
                # TODO: paramaterize
                weighted_ave_score = True
                if weighted_ave_score:
                    # weighted average scoring
                    new_fs_vsone = special_query.weighted_average_scoring(
                        new_fsv_vsone, weight_filtxs, nonweight_filtxs)
                else:
                    # product scoring
                    new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
                new_score_vsone = new_fs_vsone.sum()
                aid2_fsv[daid] = new_fsv_vsone
                aid2_fs[daid] = new_fs_vsone
                aid2_score[daid] = new_score_vsone
            return aid2_fsv, aid2_fs, aid2_score
Пример #2
0
        def make_cm_very_old_tuple(qres_copy):
            assert ut.listfind(qres_copy.filtkey_list, filtkey) is None
            weight_filters = hstypes.WEIGHT_FILTERS
            weight_filtxs, nonweight_filtxs = special_query.index_partition(qres_copy.filtkey_list, weight_filters)

            aid2_fsv = {}
            aid2_fs = {}
            aid2_score = {}

            for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
                pass
                break
                #scorex_vsone  = ut.listfind(qres_copy.filtkey_list, filtkey)
                #if scorex_vsone is None:
                # TODO: add spatial verification as a filter score
                # augment the vsone scores
                # TODO: paramaterize
                weighted_ave_score = True
                if weighted_ave_score:
                    # weighted average scoring
                    new_fs_vsone = special_query.weighted_average_scoring(new_fsv_vsone, weight_filtxs, nonweight_filtxs)
                else:
                    # product scoring
                    new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
                new_score_vsone = new_fs_vsone.sum()
                aid2_fsv[daid]   = new_fsv_vsone
                aid2_fs[daid]    = new_fs_vsone
                aid2_score[daid] = new_score_vsone
            return aid2_fsv, aid2_fs, aid2_score
Пример #3
0
def get_new_qres_distinctiveness(qres_vsone, qres_vsmany, top_aids, filtkey):
    """
    gets the distinctiveness score from vsmany and applies it to vsone

    CommandLine:
        python -m ibeis.algo.hots.special_query --exec-get_new_qres_distinctiveness

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> ibs, valid_aids = testdata_special_query()
        >>> qaids = valid_aids[0:1]
        >>> daids = valid_aids[1:]
        >>> qaid = qaids[0]
        >>> filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        >>> use_cache = False
        >>> # execute function
        >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids, use_cache)
        >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany)
        >>> qaid2_qres_vsone, qreq_vsone_ = query_vsone_pairs(ibs, vsone_query_pairs, use_cache)
        >>> qreq_vsone_.load_score_normalizer()
        >>> qres_vsone = qaid2_qres_vsone[qaid]
        >>> qres_vsmany = qaid2_qres_vsmany[qaid]
        >>> top_aids = vsone_query_pairs[0][1]
        >>> # verify results
        >>> newfsv_list, newscore_aids = get_new_qres_distinctiveness(qres_vsone, qres_vsmany, top_aids, filtkey)
    """
    newfsv_list = []
    newscore_aids = []

    # make sure filter does not already exist
    scorex_vsone  = ut.listfind(qres_vsone.filtkey_list, filtkey)
    # Make new filtkey_list
    new_filtkey_list = qres_vsone.filtkey_list[:]
    new_filtkey_list.append(filtkey)
    newscore_aids = top_aids[:]
    for daid in top_aids:
        # Distinctiveness is mostly independent of the vsmany database results
        if daid not in qres_vsone.aid2_fm:  # or daid not in qres_vsmany.aid2_fm):
            # no matches to work with
            continue
        if scorex_vsone is None:
            new_fsv_vsone = new_feature_score_dimension(qres_vsone, daid)
            assert len(new_filtkey_list) == len(new_fsv_vsone.T), 'filter length is not consistent'
        fm_vsone  = qres_vsone.aid2_fm[daid]
        qfx_vsone = fm_vsone.T[0]
        # Use vsmany as the distinctivness
        # Get the distinctiveness score from the neighborhood
        # around each query point in the vsmany query result
        norm_sqared_dist = qres_vsmany.qfx2_dist.T[-1].take(qfx_vsone)
        norm_dist = np.sqrt(norm_sqared_dist)
        # FIXME: params not used
        # but this is probably depricated anyway
        dcvs_power, dcvs_max_clip, dcvs_min_clip = 1.0, 1.0, 0.0
        dstncvs = distinctiveness_normalizer.compute_distinctiveness_from_dist(norm_dist, dcvs_power, dcvs_max_clip, dcvs_min_clip)
        # Copy new scores to the new fsv vector
        new_fsv_vsone.T[-1].T[:] = dstncvs  #
        newfsv_list.append(new_fsv_vsone)
    return newfsv_list, newscore_aids
Пример #4
0
 def find_row_from_id(self, _id):
     """
     given an id (like an ibeis rowid) find the row of this item
     """
     children = self.get_children()
     id_list = [child.get_id() for child in children]
     row = ut.listfind(id_list, _id)
     return row
Пример #5
0
 def find_row_from_id(self, _id):
     """
     given an id (like an ibeis rowid) find the row of this item
     """
     children = self.get_children()
     id_list = [child.get_id() for child in children]
     row = ut.listfind(id_list, _id)
     return row
Пример #6
0
def get_prefered_browser(pref_list=[], fallback=True):
    r"""
    Args:
        browser_preferences (list): (default = [])
        fallback (bool): uses default if non of preferences work (default = True)

    CommandLine:
        python -m utool.util_grabdata --test-get_prefered_browser

    Ignore:
        import webbrowser
        webbrowser._tryorder
        pref_list = ['chrome', 'firefox', 'google-chrome']
        pref_list = ['firefox', 'google-chrome']

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_grabdata import *  # NOQA
        >>> browser_preferences = ['firefox', 'chrome', 'safari']
        >>> fallback = True
        >>> browser = get_prefered_browser(browser_preferences, fallback)
        >>> result = ('browser = %s' % (str(browser),))
        >>> print(result)
        >>> ut.quit_if_noshow()
    """
    import webbrowser
    import utool as ut
    pref_list = ut.ensure_iterable(pref_list)
    error_list = []

    # Hack for finding chrome on win32
    if ut.WIN32:
        # http://stackoverflow.com/questions/24873302/webbrowser-chrome-exe-does-not-work
        win32_chrome_fpath = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'
        win32_chrome_browsername = win32_chrome_fpath + ' %s'
        win32_map = {
            'chrome': win32_chrome_browsername,
            'google-chrome': win32_chrome_browsername,
        }
        for browsername, win32_browsername in win32_map.items():
            index = ut.listfind(pref_list, browsername)
            if index is not None and True:  # ut.checkpath(win32_browsername):
                pref_list.insert(index + 1, win32_browsername)

    for browsername in pref_list:
        try:
            browser = webbrowser.get(browsername)
            return browser
        except webbrowser.Error as ex:
            error_list.append(ex)
            print(str(browsername) + ' failed. Reason: ' + str(ex))

    if fallback:
        browser = webbrowser
        return browser
    else:
        raise AssertionError('No browser meets preferences=%r. error_list=%r' %
                             (pref_list, error_list,))
Пример #7
0
def apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list,
                                 newscore_aids, filtkey):
    r"""
    applies the new filter scores vectors to a query result and updates other
    scores

    Args:
        qres_vsone (QueryResult):  object of feature correspondences and scores
        newfsv_list (list):
        newscore_aids (?):
        filtkey (?):

    CommandLine:
        python -m ibeis.algo.hots.special_query --test-apply_new_qres_filter_scores

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> ibs, valid_aids = testdata_special_query()
        >>> qaids = valid_aids[0:1]
        >>> daids = valid_aids[1:]
        >>> qaid = qaids[0]
        >>> filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        >>> use_cache = False
        >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids, use_cache)
        >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany)
        >>> qaid2_qres_vsone, qreq_vsone_ = query_vsone_pairs(ibs, vsone_query_pairs, use_cache)
        >>> qreq_vsone_.load_score_normalizer()
        >>> qres_vsone = qaid2_qres_vsone[qaid]
        >>> qres_vsmany = qaid2_qres_vsmany[qaid]
        >>> top_aids = vsone_query_pairs[0][1]
        >>> newfsv_list, newscore_aids = get_new_qres_distinctiveness(qres_vsone, qres_vsmany, top_aids, filtkey)
        >>> apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey)

    Ignore:
        qres_vsone.show_top(ibs, name_scoring=True)
        print(qres_vsone.get_inspect_str(ibs=ibs, name_scoring=True))

        print(qres_vsmany.get_inspect_str(ibs=ibs, name_scoring=True))

    """
    assert ut.listfind(qres_vsone.filtkey_list, filtkey) is None
    # HACK to update result cfgstr
    qres_vsone.filtkey_list.append(filtkey)
    qres_vsone.cfgstr = qreq_vsone_.get_cfgstr()
    # Find positions of weight filters and score filters
    # so we can apply a weighted average
    #numer_filters  = [hstypes.FiltKeys.LNBNN, hstypes.FiltKeys.RATIO]

    weight_filters = hstypes.WEIGHT_FILTERS
    weight_filtxs, nonweight_filtxs = vt.index_partition(
        qres_vsone.filtkey_list, weight_filters)

    for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
        #scorex_vsone  = ut.listfind(qres_vsone.filtkey_list, filtkey)
        #if scorex_vsone is None:
        # TODO: add spatial verification as a filter score
        # augment the vsone scores
        # TODO: paramaterize
        weighted_ave_score = True
        if weighted_ave_score:
            # weighted average scoring
            new_fs_vsone = vt.weighted_average_scoring(new_fsv_vsone,
                                                       weight_filtxs,
                                                       nonweight_filtxs)
        else:
            # product scoring
            new_fs_vsone = product_scoring(new_fsv_vsone)
        new_score_vsone = new_fs_vsone.sum()
        qres_vsone.aid2_fsv[daid] = new_fsv_vsone
        qres_vsone.aid2_fs[daid] = new_fs_vsone
        qres_vsone.aid2_score[daid] = new_score_vsone
        # FIXME: this is not how to compute new probability
        #if qres_vsone.aid2_prob is not None:
        #    qres_vsone.aid2_prob[daid] = qres_vsone.aid2_score[daid]

    # This is how to compute new probability
    if qreq_vsone_.qparams.score_normalization:
        # FIXME: TODO: Have unsupported scores be represented as Nones
        # while score normalizer is still being trained.
        normalizer = qreq_vsone_.normalizer
        daid2_score = qres_vsone.aid2_score
        score_list = list(six.itervalues(daid2_score))
        daid_list = list(six.iterkeys(daid2_score))
        prob_list = normalizer.normalize_score_list(score_list)
        daid2_prob = dict(zip(daid_list, prob_list))
        qres_vsone.aid2_prob = daid2_prob
Пример #8
0
def db_to_dbdir(db, allow_newdir=False, extra_workdirs=[]):
    """
    Implicitly gets dbdir. Searches for db inside of workdir
    """
    if ut.VERBOSE:
        logger.info('[sysres] db_to_dbdir: db=%r, allow_newdir=%r' % (db, allow_newdir))

    if db is None:
        raise ValueError('db is None')

    work_dir = get_workdir()
    dbalias_dict = get_dbalias_dict()

    workdir_list = []
    for extra_dir in extra_workdirs:
        if exists(extra_dir):
            workdir_list.append(extra_dir)
    workdir_list.append(work_dir)  # TODO: Allow multiple workdirs?

    # Check all of your work directories for the database
    for _dir in workdir_list:
        dbdir = realpath(join(_dir, db))
        # Use db aliases
        if not exists(dbdir) and db.upper() in dbalias_dict:
            dbdir = join(_dir, dbalias_dict[db.upper()])
        if exists(dbdir):
            break

    # Create the database if newdbs are allowed in the workdir
    # logger.info('allow_newdir=%r' % allow_newdir)
    if allow_newdir:
        ut.ensuredir(dbdir, verbose=True)

    # Complain if the implicit dbdir does not exist
    if not exists(dbdir):
        logger.info('!!!')
        logger.info('[sysres] WARNING: db=%r not found in work_dir=%r' % (db, work_dir))
        fname_list = os.listdir(work_dir)
        lower_list = [fname.lower() for fname in fname_list]
        index = ut.listfind(lower_list, db.lower())
        if index is not None:
            logger.info('[sysres] WARNING: db capitalization seems to be off')
            if not ut.STRICT:
                logger.info('[sysres] attempting to fix it')
                db = fname_list[index]
                dbdir = join(work_dir, db)
                logger.info('[sysres] dbdir=%r' % dbdir)
                logger.info('[sysres] db=%r' % db)
        if not exists(dbdir):
            msg = '[sysres!] ERROR: Database does not exist and allow_newdir=False'
            logger.info('<!!!>')
            logger.info(msg)
            logger.info(
                '[sysres!] Here is a list of valid dbs: '
                + ut.indentjoin(sorted(fname_list), '\n  * ')
            )
            logger.info('[sysres!] dbdir=%r' % dbdir)
            logger.info('[sysres!] db=%r' % db)
            logger.info('[sysres!] work_dir=%r' % work_dir)
            logger.info('</!!!>')
            raise AssertionError(msg)
        logger.info('!!!')
    return dbdir
Пример #9
0
def get_new_qres_distinctiveness(qres_vsone, qres_vsmany, top_aids, filtkey):
    """
    gets the distinctiveness score from vsmany and applies it to vsone

    CommandLine:
        python -m ibeis.algo.hots.special_query --exec-get_new_qres_distinctiveness

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> ibs, valid_aids = testdata_special_query()
        >>> qaids = valid_aids[0:1]
        >>> daids = valid_aids[1:]
        >>> qaid = qaids[0]
        >>> filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        >>> use_cache = False
        >>> # execute function
        >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids, use_cache)
        >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany)
        >>> qaid2_qres_vsone, qreq_vsone_ = query_vsone_pairs(ibs, vsone_query_pairs, use_cache)
        >>> qreq_vsone_.load_score_normalizer()
        >>> qres_vsone = qaid2_qres_vsone[qaid]
        >>> qres_vsmany = qaid2_qres_vsmany[qaid]
        >>> top_aids = vsone_query_pairs[0][1]
        >>> # verify results
        >>> newfsv_list, newscore_aids = get_new_qres_distinctiveness(qres_vsone, qres_vsmany, top_aids, filtkey)
    """
    newfsv_list = []
    newscore_aids = []

    # make sure filter does not already exist
    scorex_vsone = ut.listfind(qres_vsone.filtkey_list, filtkey)
    # Make new filtkey_list
    new_filtkey_list = qres_vsone.filtkey_list[:]
    new_filtkey_list.append(filtkey)
    newscore_aids = top_aids[:]
    for daid in top_aids:
        # Distinctiveness is mostly independent of the vsmany database results
        if daid not in qres_vsone.aid2_fm:  # or daid not in qres_vsmany.aid2_fm):
            # no matches to work with
            continue
        if scorex_vsone is None:
            new_fsv_vsone = new_feature_score_dimension(qres_vsone, daid)
            assert len(new_filtkey_list) == len(
                new_fsv_vsone.T), 'filter length is not consistent'
        fm_vsone = qres_vsone.aid2_fm[daid]
        qfx_vsone = fm_vsone.T[0]
        # Use vsmany as the distinctivness
        # Get the distinctiveness score from the neighborhood
        # around each query point in the vsmany query result
        norm_sqared_dist = qres_vsmany.qfx2_dist.T[-1].take(qfx_vsone)
        norm_dist = np.sqrt(norm_sqared_dist)
        # FIXME: params not used
        # but this is probably depricated anyway
        dcvs_power, dcvs_max_clip, dcvs_min_clip = 1.0, 1.0, 0.0
        dstncvs = distinctiveness_normalizer.compute_distinctiveness_from_dist(
            norm_dist, dcvs_power, dcvs_max_clip, dcvs_min_clip)
        # Copy new scores to the new fsv vector
        new_fsv_vsone.T[-1].T[:] = dstncvs  #
        newfsv_list.append(new_fsv_vsone)
    return newfsv_list, newscore_aids
Пример #10
0
def get_extern_distinctiveness(qreq_, cm, **kwargs):
    r"""
    Uses distinctivness normalizer class (which uses predownloaded models)
    to normalize the distinctivness of a keypoint for query points.


    IDEA:
        because we have database points as well we can use the distance between
        normalizer of the query point and the normalizer of the database point.
        They should have a similar normalizer if they are a correct match AND
        nondistinctive.

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        cm (QueryResult):  object of feature correspondences and scores

    Returns:
        tuple: (new_fsv_list, daid_list)

    CommandLine:
        python -m ibeis.algo.hots.special_query --test-get_extern_distinctiveness

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> daids = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
        >>> qaids = daids[0:1]
        >>> cfgdict = dict(codename='vsone_unnorm_dist_ratio_extern_distinctiveness')
        >>> qreq_ = ibs.new_query_request(qaids, daids, cfgdict=cfgdict)
        >>> #qreq_.lazy_load()
        >>> cm = ibs.query_chips(qreq_=qreq_, use_cache=False, save_qcache=False)[0]
        >>> # execute function
        >>> (new_fsv_list, daid_list) = get_extern_distinctiveness(qreq_, cm)
        >>> # verify results
        >>> assert all([fsv.shape[1] == 1 + len(cm.filtkey_list) for fsv in new_fsv_list])
        >>> assert all([np.all(fsv.T[-1] >= 0) for fsv in new_fsv_list])
        >>> assert all([np.all(fsv.T[-1] <= 1) for fsv in new_fsv_list])
    """
    dstcnvs_normer = qreq_.dstcnvs_normer
    assert dstcnvs_normer is not None, 'must have loaded normalizer'
    filtkey = hstypes.FiltKeys.DISTINCTIVENESS
    # make sure filter does not already exist
    scorex_vsone = ut.listfind(cm.filtkey_list, filtkey)
    assert scorex_vsone is None, 'already applied distinctivness'
    daid_list = list(six.iterkeys(cm.aid2_fsv))
    # Find subset of features to get distinctivness of
    qfxs_list = [cm.aid2_fm[daid].T[0] for daid in daid_list]
    query_vecs = qreq_.ibs.get_annot_vecs(cm.qaid, config2_=qreq_.qparams)

    # there might be duplicate feature indexes in the list of feature index
    # lists. We can use to perform neighbor lookup more efficiently by only
    # performing a single query per feature index. Utool does the mapping for us
    def rowid_distinctivness(unique_flat_qfx_list,
                             dstcnvs_normer=None,
                             query_vecs=None,
                             **kwargs):
        # Take only the unique vectors
        unique_flat_subvecs = query_vecs.take(unique_flat_qfx_list, axis=0)
        unique_flat_dstcvns = dstcnvs_normer.get_distinctiveness(
            unique_flat_subvecs, **kwargs)
        return unique_flat_dstcvns[:, None]

    aug_fsv_list = ut.unflat_unique_rowid_map(rowid_distinctivness,
                                              qfxs_list,
                                              dstcnvs_normer=dstcnvs_normer,
                                              query_vecs=query_vecs,
                                              **kwargs)

    if False:
        with ut.Timer('time1'):
            aug_fsv_list = ut.unflat_unique_rowid_map(
                rowid_distinctivness,
                qfxs_list,
                dstcnvs_normer=dstcnvs_normer,
                query_vecs=query_vecs)
        with ut.Timer('time2'):
            # Less efficient way to do this
            _vecs_list = [query_vecs.take(qfxs, axis=0) for qfxs in qfxs_list]
            _aug_fsv_list = [
                dstcnvs_normer.get_distinctiveness(_vecs)[:, None]
                for _vecs in _vecs_list
            ]
        isequal_list = [
            np.all(np.equal(*tup)) for tup in zip(aug_fsv_list, _aug_fsv_list)
        ]
        assert all(isequal_list), 'utool is broken'

    # Compute the distinctiveness as the augmenting score
    # ensure the shape is (X, 1)
    # Stack the new and augmenting scores
    old_fsv_list = [cm.aid2_fsv[daid] for daid in daid_list]
    new_fsv_list = list(map(np.hstack, zip(old_fsv_list, aug_fsv_list)))

    # FURTHER HACKS TO SCORING
    #if 'fg_power' in kwargs:
    for filtkey in hstypes.WEIGHT_FILTERS:
        key = filtkey + '_power'
        if key in kwargs:
            _power = kwargs[key]
            _index = ut.listfind(cm.filtkey_list, filtkey)
            for fsv in new_fsv_list:
                fsv.T[_index] **= _power
    #new_aid2_fsv = dict(zip(daid_list, new_fsv_list))
    return new_fsv_list, daid_list
Пример #11
0
def total_purge_developed_repo(repodir):
    r"""
    Outputs commands to help purge a repo

    Args:
        repodir (str): path to developed repository

    CommandLine:
        python -m utool.util_sysreq total_purge_installed_repo --show

    Ignore:
        repodir = ut.truepath('~/code/Lasagne')

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_sysreq import *  # NOQA
        >>> import utool as ut
        >>> repodir = ut.get_argval('--repodir', default=None)
        >>> result = total_purge_installed_repo(repodir)
    """
    assert repodir is not None
    import utool as ut
    import os
    repo = ut.util_git.Repo(dpath=repodir)

    user = os.environ['USER']

    fmtdict = dict(
        user=user,
        modname=repo.modname,
        reponame=repo.reponame,
        dpath=repo.dpath,
        global_site_pkgs=ut.get_global_dist_packages_dir(),
        local_site_pkgs=ut.get_local_dist_packages_dir(),
        venv_site_pkgs=ut.get_site_packages_dir(),
    )

    commands = [_.format(**fmtdict) for _ in [
        'pip uninstall {modname}',
        'sudo -H pip uninstall {modname}',
        'sudo pip uninstall {modname}',
        'easy_install -m {modname}',
        'cd {dpath} && python setup.py develop --uninstall',
        # If they still exist try chowning to current user
        'sudo chown -R {user}:{user} {dpath}',
    ]]
    print('Normal uninstall commands')
    print('\n'.join(commands))

    possible_link_paths = [_.format(**fmtdict) for _ in [
        '{dpath}/{modname}.egg-info',
        '{dpath}/build',
        '{venv_site_pkgs}/{reponame}.egg-info',
        '{local_site_pkgs}/{reponame}.egg-info',
        '{venv_site_pkgs}/{reponame}.egg-info',
    ]]
    from os.path import exists, basename
    existing_link_paths = [path for path in possible_link_paths]
    print('# Delete paths and eggs')
    for path in existing_link_paths:
        if exists(path):
            if ut.get_file_info(path)['owner'] != user:
                print('sudo /bin/rm -rf {path}'.format(path=path))
            else:
                print('/bin/rm -rf {path}'.format(path=path))
        #ut.delete(path)

    print('# Make sure nothing is in the easy install paths')
    easyinstall_paths = [_.format(**fmtdict) for _ in [
        '{venv_site_pkgs}/easy-install.pth',
        '{local_site_pkgs}/easy-install.pth',
        '{venv_site_pkgs}/easy-install.pth',
    ]]
    for path in easyinstall_paths:
        if exists(path):
            easy_install_list = ut.readfrom(path, verbose=False).strip().split('\n')
            easy_install_list_ = [basename(p) for p in easy_install_list]
            index1 = ut.listfind(easy_install_list_, repo.reponame)
            index2 = ut.listfind(easy_install_list_, repo.modname)
            if index1 is not None or index2 is not None:
                print('Found at index1=%r, index=%r' % (index1, index2))
                if ut.get_file_info(path)['owner'] != user:
                    print('sudo gvim {path}'.format(path=path))
                else:
                    print('gvim {path}'.format(path=path))

    checkcmds = [_.format(**fmtdict) for _ in [
        'python -c "import {modname}; print({modname}.__file__)"'
    ]]
    import sys
    assert repo.modname not in sys.modules
    print("# CHECK STATUS")
    for cmd in checkcmds:
        print(cmd)
Пример #12
0
        aids_tup = (
            all_daids_t,
            top_aids_vsone_t,
            top_aids_vsmany_t,
            (qaid_t, ),
        )
        nids_tup = ibs_gt.unflat_map(ibs_gt.get_annot_nids, aids_tup)
        (
            all_nids_t,
            top_nids_vsone_t,
            top_nids_vsmany_t,
            (qnid_t, ),
        ) = nids_tup

        vsmany_rank = ut.listfind(top_nids_vsmany_t, qnid_t)
        vsone_rank = ut.listfind(top_nids_vsone_t, qnid_t)
        impossible_to_match = ut.listfind(all_nids_t, qnid_t) is None

        # Sort the test case into a category
        testtup = TestTup(qaid_t, qaid, vsmany_rank, vsone_rank)
        if vsmany_rank is None and vsone_rank is None and impossible_to_match:
            append_case(SINGLETON, testtup)
        elif vsmany_rank is not None and vsone_rank is None:
            if vsmany_rank < 5:
                append_case(VSMANY_DOMINATES, testtup)
            else:
                append_case(VSMANY_OUTPERFORMED, testtup)
        elif vsmany_rank is None:
            append_case(BOTH_FAIL, testtup)
        elif vsone_rank > vsmany_rank:
Пример #13
0
def update_normalizer(ibs, cm, qreq_, chosen_names):
    r"""
    adds new support data to the current normalizer

    FIXME: broken

    Args:
        ibs (IBEISController):  ibeis controller object
        qreq_ (QueryRequest):  query request object with hyper-parameters
        choicetup (?):
        name (?):

    Returns:
        tuple: (tp_rawscore, tn_rawscore)

    CommandLine:
        python -m ibeis.algo.hots.automated_matcher --test-update_normalizer

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.automated_matcher import *  # NOQA
        >>> ibs, qaid_chunk = testdata_automatch()
        >>> exemplar_aids = ibs.get_valid_aids(is_exemplar=True)
        >>> incinfo = {}
        >>> gen = generate_subquery_steps(ibs, qaid_chunk, incinfo)
        >>> item = six.next(gen)
        >>> ibs, cm, qreq_, incinfo = item
        >>> qreq_.load_score_normalizer()
        >>> # verify results
        >>> chosen_names = ['easy']
        >>> update_normalizer(ibs, cm, qreq_, chosen_names)
    """
    # Fixme: duplicate call to get_qres_name_choices
    if qreq_.normalizer is None:
        print(
            '[update_normalizer] NOT UPDATING. qreq_ has not loaded a score normalizer'
        )
        return
    if len(chosen_names) != 1:
        print(
            '[update_normalizer] NOT UPDATING. only updates using simple matches'
        )
        return
    qaid = cm.qaid
    choicetup = automatch_suggestor.get_qres_name_choices(ibs, cm)
    (sorted_nids, sorted_nscore, sorted_rawscore, sorted_aids,
     sorted_ascores) = choicetup
    # Get new True Negative support data for score normalization
    name = chosen_names[0]
    rank = ut.listfind(ibs.get_name_texts(sorted_nids), name)
    if rank is None:
        return
    nid = sorted_nids[rank]
    tp_rawscore = sorted_rawscore[rank]
    valid_falseranks = set(range(len(sorted_rawscore))) - set([rank])
    if len(valid_falseranks) > 0:
        tn_rank = min(valid_falseranks)
        tn_rawscore = sorted_rawscore[tn_rank][0]
    else:
        tn_rawscore = None
    #return tp_rawscore, tn_rawscore
    canupdate = tp_rawscore is not None and tn_rawscore is not None
    if canupdate:
        # TODO: UPDATE SCORE NORMALIZER HERE
        print('UPDATING! NORMALIZER')
        tp_labels = [ut.deterministic_uuid((qaid, nid))]
        tn_labels = [ut.deterministic_uuid((qaid, nid))]
        print(
            'new normalization example: tp_rawscore={}, tn_rawscore={}'.format(
                tp_rawscore, tn_rawscore))
        print('new normalization example: tp_labels={}, tn_labels={}'.format(
            tp_labels, tn_labels))
        tp_scores = [tp_rawscore]
        tn_scores = [tn_rawscore]
        qreq_.normalizer.add_support(tp_scores, tn_scores, tp_labels,
                                     tn_labels)
        qreq_.normalizer.retrain()
        species_text = '_'.join(qreq_.get_unique_species())  # HACK
        # TODO: figure out where to store normalizer
        qreq_.normalizer.save(
            ibs.get_local_species_scorenorm_cachedir(species_text))
    else:
        print('NOUPDATE! cannot update score normalization')
Пример #14
0
def process_batch(X_train, y_train, batch_size, theano_fn, **kwargs):
    """
    compute the loss over all training batches

    Jon, if you get to this before I do, please fix. -J

    CommandLine:
        python -m ibeis_cnn.batch_processing --test-process_batch

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis_cnn.batch_processing import *  # NOQA
        >>> from ibeis_cnn import models
        >>> model = models.DummyModel(autoinit=True)
        >>> X_train, y_train = model.make_random_testdata()
        >>> theano_fn = create_unbuffered_iter_funcs_train2(model)
        >>> kwargs = {'X_is_cv2_native': False}
        >>> batch_size = model.batch_size
        >>> (loss, accu, prob_list, albl_list, pred_list, conf_list) = process_batch(X_train, y_train, batch_size, theano_fn)
        >>> result = str((loss, accu, prob_list, albl_list, pred_list, conf_list))
        >>> print(result)

    Ignore:
        Xb, yb = batch_iter.next()
        assert Xb.shape == (8, 1, 4, 4)
        yb.shape == (8,)
    """
    batch_output_list = []  # NOQA
    output_names = [op.variable.name for op in theano_fn.outputs]  # NOQA
    albl_list = []  # [a]ugmented [l]a[b]e[l] list
    show = False
    batch_iter = batch_iterator(X_train, y_train, batch_size, **kwargs)
    for Xb, yb in batch_iter:
        # Runs a batch through the network and updates the weights. Just returns what it did
        batch_output = theano_fn(Xb, yb)
        albl_list.append(yb)
        batch_output_list.append(batch_output)

        if show:
            # Print the network output for the first batch
            print('--------------')
            print(ut.list_str(zip(output_names, batch_output)))
            print('Correct: ', yb)
            print('--------------')
            show = False
    # Convert to numpy array

    # get outputs of each type

    def concatenate_hack(sequence, axis=0):
        # Hack to fix numpy bug. concatenate should do hstacks on 0-dim arrays
        if len(_output_unstacked) > 0 and len(_output_unstacked[1].shape) == 0:
            res = np.hstack(_output_unstacked)
        else:
            res = np.concatenate(_output_unstacked, axis=axis)
        return res

    unstacked_output_gen = ([bop[count] for bop in batch_output_list] for count, name in enumerate(output_names))
    stacked_output_list  = [concatenate_hack(_output_unstacked, axis=-1) for _output_unstacked in unstacked_output_gen]

    albl_list = np.hstack(albl_list)

    # Calculate performance
    loss_index = ut.listfind(output_names, 'loss_train')
    if loss_index is not None:
        loss_list = stacked_output_list[loss_index]
        loss = np.mean(loss_list)

    pred_index = ut.listfind(output_names, 'prediction')
    if pred_index is not None:
        pred_list = stacked_output_list[pred_index]
        accu = np.mean(np.equal(albl_list, pred_list))

    # Return
    return loss, accu, prob_list, albl_list, pred_list, conf_list
Пример #15
0
def update_normalizer(ibs, cm, qreq_, chosen_names):
    r"""
    adds new support data to the current normalizer

    FIXME: broken

    Args:
        ibs (IBEISController):  ibeis controller object
        qreq_ (QueryRequest):  query request object with hyper-parameters
        choicetup (?):
        name (?):

    Returns:
        tuple: (tp_rawscore, tn_rawscore)

    CommandLine:
        python -m ibeis.algo.hots.automated_matcher --test-update_normalizer

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.automated_matcher import *  # NOQA
        >>> ibs, qaid_chunk = testdata_automatch()
        >>> exemplar_aids = ibs.get_valid_aids(is_exemplar=True)
        >>> incinfo = {}
        >>> gen = generate_subquery_steps(ibs, qaid_chunk, incinfo)
        >>> item = six.next(gen)
        >>> ibs, cm, qreq_, incinfo = item
        >>> qreq_.load_score_normalizer()
        >>> # verify results
        >>> chosen_names = ['easy']
        >>> update_normalizer(ibs, cm, qreq_, chosen_names)
    """
    # Fixme: duplicate call to get_qres_name_choices
    if qreq_.normalizer is None:
        print('[update_normalizer] NOT UPDATING. qreq_ has not loaded a score normalizer')
        return
    if len(chosen_names) != 1:
        print('[update_normalizer] NOT UPDATING. only updates using simple matches')
        return
    qaid = cm.qaid
    choicetup = automatch_suggestor.get_qres_name_choices(ibs, cm)
    (sorted_nids, sorted_nscore, sorted_rawscore, sorted_aids, sorted_ascores) = choicetup
    # Get new True Negative support data for score normalization
    name = chosen_names[0]
    rank = ut.listfind(ibs.get_name_texts(sorted_nids), name)
    if rank is None:
        return
    nid = sorted_nids[rank]
    tp_rawscore = sorted_rawscore[rank]
    valid_falseranks = set(range(len(sorted_rawscore))) - set([rank])
    if len(valid_falseranks) > 0:
        tn_rank = min(valid_falseranks)
        tn_rawscore = sorted_rawscore[tn_rank][0]
    else:
        tn_rawscore = None
    #return tp_rawscore, tn_rawscore
    canupdate = tp_rawscore is not None and tn_rawscore is not None
    if canupdate:
        # TODO: UPDATE SCORE NORMALIZER HERE
        print('UPDATING! NORMALIZER')
        tp_labels = [ut.deterministic_uuid((qaid, nid))]
        tn_labels = [ut.deterministic_uuid((qaid, nid))]
        print('new normalization example: tp_rawscore={}, tn_rawscore={}'.format(tp_rawscore, tn_rawscore))
        print('new normalization example: tp_labels={}, tn_labels={}'.format(tp_labels, tn_labels))
        tp_scores = [tp_rawscore]
        tn_scores = [tn_rawscore]
        qreq_.normalizer.add_support(tp_scores, tn_scores, tp_labels, tn_labels)
        qreq_.normalizer.retrain()
        species_text = '_'.join(qreq_.get_unique_species())  # HACK
        # TODO: figure out where to store normalizer
        qreq_.normalizer.save(ibs.get_local_species_scorenorm_cachedir(species_text))
    else:
        print('NOUPDATE! cannot update score normalization')
Пример #16
0
def total_purge_developed_repo(repodir):
    r"""
    Outputs commands to help purge a repo

    Args:
        repodir (str): path to developed repository

    CommandLine:
        python -m utool.util_sysreq total_purge_installed_repo --show

    Ignore:
        repodir = ut.truepath('~/code/Lasagne')

    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_sysreq import *  # NOQA
        >>> import utool as ut
        >>> repodir = ut.get_argval('--repodir', default=None)
        >>> result = total_purge_installed_repo(repodir)
    """
    assert repodir is not None
    import utool as ut
    import os
    repo = ut.util_git.Repo(dpath=repodir)

    user = os.environ['USER']

    fmtdict = dict(
        user=user,
        modname=repo.modname,
        reponame=repo.reponame,
        dpath=repo.dpath,
        global_site_pkgs=ut.get_global_dist_packages_dir(),
        local_site_pkgs=ut.get_local_dist_packages_dir(),
        venv_site_pkgs=ut.get_site_packages_dir(),
    )

    commands = [
        _.format(**fmtdict) for _ in [
            'pip uninstall {modname}',
            'sudo -H pip uninstall {modname}',
            'sudo pip uninstall {modname}',
            'easy_install -m {modname}',
            'cd {dpath} && python setup.py develop --uninstall',
            # If they still exist try chowning to current user
            'sudo chown -R {user}:{user} {dpath}',
        ]
    ]
    print('Normal uninstall commands')
    print('\n'.join(commands))

    possible_link_paths = [
        _.format(**fmtdict) for _ in [
            '{dpath}/{modname}.egg-info',
            '{dpath}/build',
            '{venv_site_pkgs}/{reponame}.egg-info',
            '{local_site_pkgs}/{reponame}.egg-info',
            '{venv_site_pkgs}/{reponame}.egg-info',
        ]
    ]
    from os.path import exists, basename
    existing_link_paths = [path for path in possible_link_paths]
    print('# Delete paths and eggs')
    for path in existing_link_paths:
        if exists(path):
            if ut.get_file_info(path)['owner'] != user:
                print('sudo /bin/rm -rf {path}'.format(path=path))
            else:
                print('/bin/rm -rf {path}'.format(path=path))
        #ut.delete(path)

    print('# Make sure nothing is in the easy install paths')
    easyinstall_paths = [
        _.format(**fmtdict) for _ in [
            '{venv_site_pkgs}/easy-install.pth',
            '{local_site_pkgs}/easy-install.pth',
            '{venv_site_pkgs}/easy-install.pth',
        ]
    ]
    for path in easyinstall_paths:
        if exists(path):
            easy_install_list = ut.readfrom(path,
                                            verbose=False).strip().split('\n')
            easy_install_list_ = [basename(p) for p in easy_install_list]
            index1 = ut.listfind(easy_install_list_, repo.reponame)
            index2 = ut.listfind(easy_install_list_, repo.modname)
            if index1 is not None or index2 is not None:
                print('Found at index1=%r, index=%r' % (index1, index2))
                if ut.get_file_info(path)['owner'] != user:
                    print('sudo gvim {path}'.format(path=path))
                else:
                    print('gvim {path}'.format(path=path))

    checkcmds = [
        _.format(**fmtdict)
        for _ in ['python -c "import {modname}; print({modname}.__file__)"']
    ]
    import sys
    assert repo.modname not in sys.modules
    print("# CHECK STATUS")
    for cmd in checkcmds:
        print(cmd)
Пример #17
0
def apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey):
    r"""
    applies the new filter scores vectors to a query result and updates other
    scores

    Args:
        qres_vsone (QueryResult):  object of feature correspondences and scores
        newfsv_list (list):
        newscore_aids (?):
        filtkey (?):

    CommandLine:
        python -m ibeis.algo.hots.special_query --test-apply_new_qres_filter_scores

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> ibs, valid_aids = testdata_special_query()
        >>> qaids = valid_aids[0:1]
        >>> daids = valid_aids[1:]
        >>> qaid = qaids[0]
        >>> filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        >>> use_cache = False
        >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids, use_cache)
        >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany)
        >>> qaid2_qres_vsone, qreq_vsone_ = query_vsone_pairs(ibs, vsone_query_pairs, use_cache)
        >>> qreq_vsone_.load_score_normalizer()
        >>> qres_vsone = qaid2_qres_vsone[qaid]
        >>> qres_vsmany = qaid2_qres_vsmany[qaid]
        >>> top_aids = vsone_query_pairs[0][1]
        >>> newfsv_list, newscore_aids = get_new_qres_distinctiveness(qres_vsone, qres_vsmany, top_aids, filtkey)
        >>> apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey)

    Ignore:
        qres_vsone.show_top(ibs, name_scoring=True)
        print(qres_vsone.get_inspect_str(ibs=ibs, name_scoring=True))

        print(qres_vsmany.get_inspect_str(ibs=ibs, name_scoring=True))

    """
    assert ut.listfind(qres_vsone.filtkey_list, filtkey) is None
    # HACK to update result cfgstr
    qres_vsone.filtkey_list.append(filtkey)
    qres_vsone.cfgstr = qreq_vsone_.get_cfgstr()
    # Find positions of weight filters and score filters
    # so we can apply a weighted average
    #numer_filters  = [hstypes.FiltKeys.LNBNN, hstypes.FiltKeys.RATIO]

    weight_filters = hstypes.WEIGHT_FILTERS
    weight_filtxs, nonweight_filtxs = vt.index_partition(qres_vsone.filtkey_list, weight_filters)

    for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
        #scorex_vsone  = ut.listfind(qres_vsone.filtkey_list, filtkey)
        #if scorex_vsone is None:
        # TODO: add spatial verification as a filter score
        # augment the vsone scores
        # TODO: paramaterize
        weighted_ave_score = True
        if weighted_ave_score:
            # weighted average scoring
            new_fs_vsone = vt.weighted_average_scoring(new_fsv_vsone, weight_filtxs, nonweight_filtxs)
        else:
            # product scoring
            new_fs_vsone = product_scoring(new_fsv_vsone)
        new_score_vsone = new_fs_vsone.sum()
        qres_vsone.aid2_fsv[daid]   = new_fsv_vsone
        qres_vsone.aid2_fs[daid]    = new_fs_vsone
        qres_vsone.aid2_score[daid] = new_score_vsone
        # FIXME: this is not how to compute new probability
        #if qres_vsone.aid2_prob is not None:
        #    qres_vsone.aid2_prob[daid] = qres_vsone.aid2_score[daid]

    # This is how to compute new probability
    if qreq_vsone_.qparams.score_normalization:
        # FIXME: TODO: Have unsupported scores be represented as Nones
        # while score normalizer is still being trained.
        normalizer = qreq_vsone_.normalizer
        daid2_score = qres_vsone.aid2_score
        score_list = list(six.itervalues(daid2_score))
        daid_list  = list(six.iterkeys(daid2_score))
        prob_list = normalizer.normalize_score_list(score_list)
        daid2_prob = dict(zip(daid_list, prob_list))
        qres_vsone.aid2_prob = daid2_prob
Пример #18
0
def get_extern_distinctiveness(qreq_, cm, **kwargs):
    r"""
    Uses distinctivness normalizer class (which uses predownloaded models)
    to normalize the distinctivness of a keypoint for query points.


    IDEA:
        because we have database points as well we can use the distance between
        normalizer of the query point and the normalizer of the database point.
        They should have a similar normalizer if they are a correct match AND
        nondistinctive.

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        cm (QueryResult):  object of feature correspondences and scores

    Returns:
        tuple: (new_fsv_list, daid_list)

    CommandLine:
        python -m ibeis.algo.hots.special_query --test-get_extern_distinctiveness

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.special_query import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> daids = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
        >>> qaids = daids[0:1]
        >>> cfgdict = dict(codename='vsone_unnorm_dist_ratio_extern_distinctiveness')
        >>> qreq_ = ibs.new_query_request(qaids, daids, cfgdict=cfgdict)
        >>> #qreq_.lazy_load()
        >>> cm = ibs.query_chips(qreq_=qreq_, use_cache=False, save_qcache=False)[0]
        >>> # execute function
        >>> (new_fsv_list, daid_list) = get_extern_distinctiveness(qreq_, cm)
        >>> # verify results
        >>> assert all([fsv.shape[1] == 1 + len(cm.filtkey_list) for fsv in new_fsv_list])
        >>> assert all([np.all(fsv.T[-1] >= 0) for fsv in new_fsv_list])
        >>> assert all([np.all(fsv.T[-1] <= 1) for fsv in new_fsv_list])
    """
    dstcnvs_normer = qreq_.dstcnvs_normer
    assert dstcnvs_normer is not None, 'must have loaded normalizer'
    filtkey = hstypes.FiltKeys.DISTINCTIVENESS
    # make sure filter does not already exist
    scorex_vsone  = ut.listfind(cm.filtkey_list, filtkey)
    assert scorex_vsone is None, 'already applied distinctivness'
    daid_list = list(six.iterkeys(cm.aid2_fsv))
    # Find subset of features to get distinctivness of
    qfxs_list = [cm.aid2_fm[daid].T[0] for daid in daid_list]
    query_vecs = qreq_.ibs.get_annot_vecs(cm.qaid, config2_=qreq_.qparams)
    # there might be duplicate feature indexes in the list of feature index
    # lists. We can use to perform neighbor lookup more efficiently by only
    # performing a single query per feature index. Utool does the mapping for us
    def rowid_distinctivness(unique_flat_qfx_list, dstcnvs_normer=None, query_vecs=None, **kwargs):
        # Take only the unique vectors
        unique_flat_subvecs = query_vecs.take(unique_flat_qfx_list, axis=0)
        unique_flat_dstcvns = dstcnvs_normer.get_distinctiveness(unique_flat_subvecs, **kwargs)
        return unique_flat_dstcvns[:, None]

    aug_fsv_list = ut.unflat_unique_rowid_map(
        rowid_distinctivness, qfxs_list,
        dstcnvs_normer=dstcnvs_normer, query_vecs=query_vecs, **kwargs)

    if False:
        with ut.Timer('time1'):
            aug_fsv_list = ut.unflat_unique_rowid_map(
                rowid_distinctivness, qfxs_list, dstcnvs_normer=dstcnvs_normer,
                query_vecs=query_vecs)
        with ut.Timer('time2'):
            # Less efficient way to do this
            _vecs_list = [query_vecs.take(qfxs, axis=0) for qfxs in qfxs_list]
            _aug_fsv_list = [dstcnvs_normer.get_distinctiveness(_vecs)[:, None] for _vecs in _vecs_list]
        isequal_list = [np.all(np.equal(*tup)) for tup in zip(aug_fsv_list, _aug_fsv_list)]
        assert all(isequal_list), 'utool is broken'

    # Compute the distinctiveness as the augmenting score
    # ensure the shape is (X, 1)
    # Stack the new and augmenting scores
    old_fsv_list = [cm.aid2_fsv[daid] for daid  in daid_list]
    new_fsv_list = list(map(np.hstack, zip(old_fsv_list, aug_fsv_list)))

    # FURTHER HACKS TO SCORING
    #if 'fg_power' in kwargs:
    for filtkey in hstypes.WEIGHT_FILTERS:
        key = filtkey  + '_power'
        if key in kwargs:
            _power = kwargs[key]
            _index = ut.listfind(cm.filtkey_list, filtkey)
            for fsv in new_fsv_list:
                fsv.T[_index] **= _power
    #new_aid2_fsv = dict(zip(daid_list, new_fsv_list))
    return new_fsv_list, daid_list
Пример #19
0
def test_vsone_errors(ibs, daids, qaid2_qres_vsmany, qaid2_qres_vsone, incinfo):
    """
    ibs1 = ibs_gt
    ibs2 = ibs (the current test database, sorry for the backwardness)
    aid1_to_aid2 - maps annots from ibs1 to ibs2
    """
    WASH                = 'wash'
    BOTH_FAIL           = 'both_fail'
    SINGLETON           = 'singleton'
    VSMANY_OUTPERFORMED = 'vsmany_outperformed'
    VSMANY_DOMINATES    = 'vsmany_dominates'
    VSMANY_WINS         = 'vsmany_wins'
    VSONE_WINS          = 'vsone_wins'
    if 'testcases' not in incinfo:
        testcases = {}
        for case in [WASH, BOTH_FAIL, SINGLETON, VSMANY_OUTPERFORMED,
                     VSMANY_DOMINATES, VSMANY_WINS, VSONE_WINS]:
            testcases[case] = []
        incinfo['testcases'] = testcases
    testcases = incinfo['testcases']

    def append_case(case, testtup):
        print('APPENDED NEW TESTCASE: case=%r' % (case,))
        print('* testup = %r' % (testtup,))
        print('* vuuid = %r' % (ibs_gt.get_annot_visual_uuids(testtup.qaid_t),))
        if ut.get_argflag('--interupt-case') and case in [VSMANY_WINS, VSMANY_DOMINATES]:
            incinfo['interactive'] = True
            incinfo['use_oracle'] = False
            incinfo['STOP'] = True
            if ut.is_developer():
                import plottool as pt  # NOQA
                IPYTHON_COMMANDS = """
                >>> %pylab qt4
                >>> from ibeis.viz.interact import interact_matches  # NOQA
                >>> #qres_vsmany = ut.search_stack_for_localvar('qres_vsmany')
                >>> ibs        = ut.search_stack_for_localvar('ibs')
                >>> daids      = ut.search_stack_for_localvar('daids')
                >>> qnid_t     = ut.search_stack_for_localvar('qnid_t')
                >>> qres_vsone = ut.search_stack_for_localvar('qres_vsone')
                >>> all_nids_t = ut.search_stack_for_localvar('all_nids_t')
                >>> # Find index in daids of correct matches
                >>> cm = qres_vsone
                >>> correct_indices = np.where(np.array(all_nids_t) == qnid_t)[0]
                >>> correct_aids2 = ut.take(daids, correct_indices)
                >>> qaid = cm.qaid
                >>> aid = correct_aids2[0]
                >>> # Report visual uuid for inclusion or exclusion in script
                >>> print(ibs.get_annot_visual_uuids([qaid, aid]))

                >>> # Feature match things
                >>> print('cm.filtkey_list = %r' % (cm.filtkey_list,))
                >>> fm  = cm.aid2_fm[aid]
                >>> fs  = cm.aid2_fs[aid]
                >>> fsv = cm.aid2_fsv[aid]
                >>> mx = 2
                >>> qfx, dfx = fm[mx]
                >>> fsv_single = fsv[mx]
                >>> fs_single = fs[mx]
                >>> # check featweights
                >>> data_featweights = ibs.get_annot_fgweights([aid])[0]
                >>> data_featweights[dfx]
                >>> fnum = pt.next_fnum()
                >>> bad_aid = cm.get_top_aids()[0]
                >>> #match_interaction_good = interact_matches.MatchInteraction(ibs, cm, aid, annot_mode=1)
                >>> #match_interaction_bad = interact_matches.MatchInteraction(ibs, cm, bad_aid)
                >>> match_interaction_good = cm.ishow_matches(ibs, aid, annot_mode=1, fnum=1)
                >>> match_interaction_bad = cm.ishow_matches(ibs, bad_aid, annot_mode=1, fnum=2)
                >>> match_interaction = match_interaction_good
                >>> self = match_interaction
                >>> self.select_ith_match(mx)
                >>> #impossible_to_match = len(correct_indices) > 0
                """
                y = """
                >>> from os.path import exists
                >>> import vtool as vt
                >>> import vtool.patch as vtpatch
                >>> import vtool.image as vtimage  # NOQA
                >>> chip_list = ibs.get_annot_chips([aid])
                >>> kpts_list = ibs.get_annot_kpts([aid])
                >>> probchip_fpath_list = ibs.get_probchip_fpath(aid)
                >>> probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None for fpath in probchip_fpath_list]
                >>> kpts  = kpts_list[0]
                >>> probchip = probchip_list[0]
                >>> kp = kpts[dfx]
                >>> patch  = vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0
                >>> fnum2 = pt.next_fnum()
                >>> pt.figure(fnum2, pnum=(1, 2, 1), doclf=True, docla=True)
                >>> pt.imshow(probchip)
                >>> pt.draw_kpts2([kp])
                >>> pt.figure(fnum2, pnum=(1, 2, 2))
                >>> pt.imshow(patch * 255)
                >>> pt.update()
                >>> vt.gaussian_average_patch(patch)
                >>> cm.ishow_top(ibs, annot_mode=1)
                """
                y
                ut.set_clipboard(IPYTHON_COMMANDS)
                #ut.spawn_delayed_ipython_paste()
                ut.embed(remove_pyqt_hook=False)
                IPYTHON_COMMANDS

        testcases[case].append(testtup)

    for qaid in six.iterkeys(qaid2_qres_vsmany):
        qres_vsmany = qaid2_qres_vsmany[qaid]
        qres_vsone  = qaid2_qres_vsone[qaid]
        nscoretup_vsone  = qres_vsone.get_nscoretup()
        nscoretup_vsmany = qres_vsmany.get_nscoretup()
        metatup = incinfo['metatup']
        ibs_gt, aid1_to_aid2 = metatup
        aid2_to_aid1 = ut.invert_dict(aid1_to_aid2)

        top_aids_vsone  = ut.get_list_column(nscoretup_vsone.sorted_aids, 0)
        top_aids_vsmany = ut.get_list_column(nscoretup_vsmany.sorted_aids, 0)
        # tranform to groundtruth database coordinates
        all_daids_t = ut.dict_take_list(aid2_to_aid1, daids)
        top_aids_vsone_t  = ut.dict_take_list(aid2_to_aid1, top_aids_vsone)
        top_aids_vsmany_t = ut.dict_take_list(aid2_to_aid1, top_aids_vsmany)
        qaid_t = aid2_to_aid1[qaid]

        aids_tup = (all_daids_t, top_aids_vsone_t, top_aids_vsmany_t, (qaid_t,),)
        nids_tup = ibs_gt.unflat_map(ibs_gt.get_annot_nids, aids_tup)
        (all_nids_t, top_nids_vsone_t, top_nids_vsmany_t, (qnid_t,),) = nids_tup

        vsmany_rank  = ut.listfind(top_nids_vsmany_t, qnid_t)
        vsone_rank   = ut.listfind(top_nids_vsone_t, qnid_t)
        impossible_to_match = ut.listfind(all_nids_t, qnid_t) is None

        # Sort the test case into a category
        testtup = TestTup(qaid_t, qaid, vsmany_rank, vsone_rank)
        if vsmany_rank is None and vsone_rank is None and impossible_to_match:
            append_case(SINGLETON, testtup)
        elif vsmany_rank is not None and vsone_rank is None:
            if vsmany_rank < 5:
                append_case(VSMANY_DOMINATES, testtup)
            else:
                append_case(VSMANY_OUTPERFORMED, testtup)
        elif vsmany_rank is None:
            append_case(BOTH_FAIL, testtup)
        elif vsone_rank > vsmany_rank:
            append_case(VSMANY_WINS, testtup)
        elif vsone_rank < vsmany_rank:
            append_case(VSONE_WINS, testtup)
        elif vsone_rank == vsmany_rank:
            append_case(WASH, testtup)
        else:
            raise AssertionError('unenumerated case')
        count_dict = ut.count_dict_vals(testcases)
        print('+--')
        #print(ut.dict_str(testcases))
        print('---')
        print(ut.dict_str(count_dict))
        print('L__')