Beispiel #1
0
def load_or_make_qreq(ibs, qreq_vsmany_, qaid_chunk):
    if qreq_vsmany_ is None:
        qreq_vsmany_ = initialize_persistant_query_request(ibs, qaid_chunk)
    else:
        # set new query aids
        qreq_vsmany_.set_internal_qaids(qaid_chunk)
        # state based exemplars
        # daid_list = qreq_vsmany_.get_external_daids()
        #
        # VALID FOR MULTI_INDEXER ONLY
        # Force indexer reloading if background process is completed.
        # we might get a shiny new indexer.
        force = neighbor_index_cache.check_background_process()
        qreq_vsmany_.load_indexer(force=force)
    return qreq_vsmany_
Beispiel #2
0
def load_or_make_qreq(ibs, qreq_vsmany_, qaid_chunk):
    if qreq_vsmany_ is None:
        qreq_vsmany_ = initialize_persistant_query_request(ibs, qaid_chunk)
    else:
        # set new query aids
        qreq_vsmany_.set_internal_qaids(qaid_chunk)
        # state based exemplars
        # daid_list = qreq_vsmany_.get_external_daids()
        #
        # VALID FOR MULTI_INDEXER ONLY
        # Force indexer reloading if background process is completed.
        # we might get a shiny new indexer.
        force = neighbor_index_cache.check_background_process()
        qreq_vsmany_.load_indexer(force=force)
    return qreq_vsmany_
Beispiel #3
0
def request_ibeis_mindexer(qreq_, index_method='multi', verbose=True):
    """

    CommandLine:
        python -m ibeis.algo.hots.multi_index --test-request_ibeis_mindexer:2

    Example0:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.multi_index import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(db='PZ_MTEST')
        >>> valid_aids = ibs.get_valid_aids()
        >>> daid_list = valid_aids[1:60]
        >>> cfgdict = dict(fg_on=False)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)

    Example1:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.multi_index import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(db='PZ_Master0')
        >>> valid_aids = ibs.get_valid_aids()
        >>> daid_list = valid_aids[1:60]
        >>> cfgdict = dict(fg_on=False)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)

    Example2:
        >>> # DISABLE_DOCTEST
        >>> # Test background reindex
        >>> from ibeis.algo.hots.multi_index import *  # NOQA
        >>> import ibeis
        >>> import time
        >>> ibs = ibeis.opendb(db='PZ_MTEST')
        >>> valid_aids = ibs.get_valid_aids()
        >>> # Remove all cached nnindexers
        >>> ibs.delete_flann_cachedir()
        >>> # This request should build a new nnindexer
        >>> daid_list = valid_aids[1:30]
        >>> cfgdict = dict(fg_on=False)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)
        >>> ut.assert_eq(len(mxer.nn_indexer_list), 1, 'one subindexer')
        >>> # The next request should trigger a background process
        >>> # and build two subindexer
        >>> daid_list = valid_aids[1:60]
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)
        >>> # Do some work in the foreground to ensure that it doesnt block
        >>> # the background job
        >>> print('[FG] sleeping or doing bit compute')
        >>> # Takes about 15 seconds
        >>> with ut.Timer():
        ...     ut.enumerate_primes(int(9E4))
        >>> #time.sleep(10)
        >>> print('[FG] done sleeping')
        >>> ut.assert_eq(len(mxer.nn_indexer_list), 2, 'two subindexer')
        >>> # And this shoud build just one subindexer
        >>> daid_list = valid_aids[1:60]
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)
        >>> ut.assert_eq(len(mxer.nn_indexer_list), 1, 'one big subindexer')

    """
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    max_subindexers = qreq_.qparams.max_subindexers

    daid_list = qreq_.get_internal_daids()
    print('[mindex] make MultiNeighborIndex over %d annots' % (len(daid_list),))
    print('[mindex] index_method=%r' % index_method)

    # Split annotations into groups accorindg to index_method
    ibs = qreq_.ibs
    if index_method == 'name':
        # each group are annotations of the same name
        num_indexers = 8
        aids_list, overflow_aids, num_bins = group_daids_for_indexing_by_name(ibs, daid_list, num_indexers, verbose)
    elif index_method == 'multi':
        neighbor_index_cache.check_background_process()
        # Use greedy set cover to get a list of nnindxers that are already built
        tup = neighbor_index_cache.group_daids_by_cached_nnindexer(
            qreq_, daid_list, min_reindex_thresh)
        uncovered_aids, covered_aids_list = tup
        # If the number of bins gets too big do a reindex
        # in the background
        num_subindexers = len(covered_aids_list) + (len(uncovered_aids) > 1)
        if num_subindexers > max_subindexers:
            print('need to reindex something')
            if USE_FORGROUND_REINDEX:
                aids_list = [sorted(ut.flatten(covered_aids_list))]
                #ut.embed()
            else:
                neighbor_index_cache.request_background_nnindexer(qreq_, daid_list)
                aids_list = covered_aids_list
        else:
            aids_list = covered_aids_list
        if len(uncovered_aids) > 0:
            aids_list.append(uncovered_aids)
        num_bins = len(aids_list)
    else:
        raise AssertionError('unknown index_method=%r' % (index_method,))

    # Build a neighbor indexer for each
    nn_indexer_list = []
    #extra_indexes = []
    for tx, aids in enumerate(aids_list):
        print('[mindex] building forest %d/%d with %d aids' %
                (tx + 1, num_bins, len(aids)))
        if len(aids) > 0:
            # Dont bother shallow copying qreq_ here.
            # just passing aids is enough
            nnindexer = neighbor_index_cache.request_memcached_ibeis_nnindexer(qreq_, aids)
            nn_indexer_list.append(nnindexer)
    #if len(unknown_aids) > 0:
    #    print('[mindex] building unknown forest')
    #    unknown_vecs_list = ibs.get_annot_vecs(overflow_aids, config2_=qreq_.get_internal_data_config2())
    #    unknown_index = NeighborIndex(overflow_aids, unknown_vecs_list)
    #    extra_indexes.append(unknown_index)
    ##print('[mindex] building normalizer forest')  # TODO
    #mxer.nn_indexer_list = nn_indexer_list
    #mxer.extra_indexes = extra_indexes
    #mxer.overflow_index = overflow_index
    #mxer.unknown_index = unknown_index
    mxer = MultiNeighborIndex(nn_indexer_list, min_reindex_thresh, max_subindexers)
    return mxer
Beispiel #4
0
def request_ibeis_mindexer(qreq_, index_method='multi', verbose=True):
    """

    CommandLine:
        python -m ibeis.algo.hots.multi_index --test-request_ibeis_mindexer:2

    Example0:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.multi_index import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(db='PZ_MTEST')
        >>> valid_aids = ibs.get_valid_aids()
        >>> daid_list = valid_aids[1:60]
        >>> cfgdict = dict(fg_on=False)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)

    Example1:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.multi_index import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(db='PZ_Master0')
        >>> valid_aids = ibs.get_valid_aids()
        >>> daid_list = valid_aids[1:60]
        >>> cfgdict = dict(fg_on=False)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)

    Example2:
        >>> # DISABLE_DOCTEST
        >>> # Test background reindex
        >>> from ibeis.algo.hots.multi_index import *  # NOQA
        >>> import ibeis
        >>> import time
        >>> ibs = ibeis.opendb(db='PZ_MTEST')
        >>> valid_aids = ibs.get_valid_aids()
        >>> # Remove all cached nnindexers
        >>> ibs.delete_flann_cachedir()
        >>> # This request should build a new nnindexer
        >>> daid_list = valid_aids[1:30]
        >>> cfgdict = dict(fg_on=False)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)
        >>> ut.assert_eq(len(mxer.nn_indexer_list), 1, 'one subindexer')
        >>> # The next request should trigger a background process
        >>> # and build two subindexer
        >>> daid_list = valid_aids[1:60]
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)
        >>> # Do some work in the foreground to ensure that it doesnt block
        >>> # the background job
        >>> print('[FG] sleeping or doing bit compute')
        >>> # Takes about 15 seconds
        >>> with ut.Timer():
        ...     ut.enumerate_primes(int(9E4))
        >>> #time.sleep(10)
        >>> print('[FG] done sleeping')
        >>> ut.assert_eq(len(mxer.nn_indexer_list), 2, 'two subindexer')
        >>> # And this shoud build just one subindexer
        >>> daid_list = valid_aids[1:60]
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=cfgdict)
        >>> index_method = 'multi'
        >>> mxer = request_ibeis_mindexer(qreq_, index_method)
        >>> ut.assert_eq(len(mxer.nn_indexer_list), 1, 'one big subindexer')

    """
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    max_subindexers = qreq_.qparams.max_subindexers

    daid_list = qreq_.get_internal_daids()
    print('[mindex] make MultiNeighborIndex over %d annots' % (len(daid_list),))
    print('[mindex] index_method=%r' % index_method)

    # Split annotations into groups accorindg to index_method
    ibs = qreq_.ibs
    if index_method == 'name':
        # each group are annotations of the same name
        num_indexers = 8
        aids_list, overflow_aids, num_bins = group_daids_for_indexing_by_name(ibs, daid_list, num_indexers, verbose)
    elif index_method == 'multi':
        neighbor_index_cache.check_background_process()
        # Use greedy set cover to get a list of nnindxers that are already built
        tup = neighbor_index_cache.group_daids_by_cached_nnindexer(
            qreq_, daid_list, min_reindex_thresh)
        uncovered_aids, covered_aids_list = tup
        # If the number of bins gets too big do a reindex
        # in the background
        num_subindexers = len(covered_aids_list) + (len(uncovered_aids) > 1)
        if num_subindexers > max_subindexers:
            print('need to reindex something')
            if USE_FORGROUND_REINDEX:
                aids_list = [sorted(ut.flatten(covered_aids_list))]
                #ut.embed()
            else:
                neighbor_index_cache.request_background_nnindexer(qreq_, daid_list)
                aids_list = covered_aids_list
        else:
            aids_list = covered_aids_list
        if len(uncovered_aids) > 0:
            aids_list.append(uncovered_aids)
        num_bins = len(aids_list)
    else:
        raise AssertionError('unknown index_method=%r' % (index_method,))

    # Build a neighbor indexer for each
    nn_indexer_list = []
    #extra_indexes = []
    for tx, aids in enumerate(aids_list):
        print('[mindex] building forest %d/%d with %d aids' %
                (tx + 1, num_bins, len(aids)))
        if len(aids) > 0:
            # Dont bother shallow copying qreq_ here.
            # just passing aids is enough
            nnindexer = neighbor_index_cache.request_memcached_ibeis_nnindexer(qreq_, aids)
            nn_indexer_list.append(nnindexer)
    #if len(unknown_aids) > 0:
    #    print('[mindex] building unknown forest')
    #    unknown_vecs_list = ibs.get_annot_vecs(overflow_aids, config2_=qreq_.get_internal_data_config2())
    #    unknown_index = NeighborIndex(overflow_aids, unknown_vecs_list)
    #    extra_indexes.append(unknown_index)
    ##print('[mindex] building normalizer forest')  # TODO
    #mxer.nn_indexer_list = nn_indexer_list
    #mxer.extra_indexes = extra_indexes
    #mxer.overflow_index = overflow_index
    #mxer.unknown_index = unknown_index
    mxer = MultiNeighborIndex(nn_indexer_list, min_reindex_thresh, max_subindexers)
    return mxer