Ejemplo n.º 1
0
def request_background_nnindexer(qreq_, daid_list):
    r""" FIXME: Duplicate code

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        daid_list (list):

    CommandLine:
        python -m ibeis.algo.hots.neighbor_index_cache --test-request_background_nnindexer

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list)
        >>> # execute function
        >>> request_background_nnindexer(qreq_, daid_list)
        >>> # verify results
        >>> result = str(False)
        >>> print(result)
    """
    global CURRENT_THREAD
    print('Requesting background reindex')
    if not can_request_background_nnindexer():
        # Make sure this function doesn't run if it is already running
        print('REQUEST DENIED')
        return False
    print('REQUEST ACCPETED')
    daids_hashid = qreq_.ibs.get_annot_hashid_visual_uuid(daid_list)
    cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
    cachedir = qreq_.ibs.get_flann_cachedir()
    # Save inverted cache uuid mappings for
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    # Grab the keypoints names and image ids before query time?
    flann_params = qreq_.qparams.flann_params
    # Get annot descriptors to index
    vecs_list, fgws_list, fxs_list = get_support_data(qreq_, daid_list)
    # Dont hash rowids when given enough info in nnindex_cfgstr
    flann_params['cores'] = 2  # Only ues a few cores in the background
    # Build/Load the flann index
    uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
    visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)

    # set temporary attribute for when the thread finishes
    finishtup = (uuid_map_fpath, daids_hashid, visual_uuid_list,
                 min_reindex_thresh)
    CURRENT_THREAD = ut.spawn_background_process(background_flann_func,
                                                 cachedir, daid_list,
                                                 vecs_list, fgws_list,
                                                 fxs_list, flann_params,
                                                 cfgstr)

    CURRENT_THREAD.finishtup = finishtup
Ejemplo n.º 2
0
def request_background_nnindexer(qreq_, daid_list):
    r""" FIXME: Duplicate code

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        daid_list (list):

    CommandLine:
        python -m ibeis.algo.hots.neighbor_index_cache --test-request_background_nnindexer

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list)
        >>> # execute function
        >>> request_background_nnindexer(qreq_, daid_list)
        >>> # verify results
        >>> result = str(False)
        >>> print(result)
    """
    global CURRENT_THREAD
    print('Requesting background reindex')
    if not can_request_background_nnindexer():
        # Make sure this function doesn't run if it is already running
        print('REQUEST DENIED')
        return False
    print('REQUEST ACCPETED')
    daids_hashid = qreq_.ibs.get_annot_hashid_visual_uuid(daid_list)
    cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
    cachedir = qreq_.ibs.get_flann_cachedir()
    # Save inverted cache uuid mappings for
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    # Grab the keypoints names and image ids before query time?
    flann_params =  qreq_.qparams.flann_params
    # Get annot descriptors to index
    vecs_list, fgws_list = get_support_data(qreq_, daid_list)
    # Dont hash rowids when given enough info in nnindex_cfgstr
    flann_params['cores'] = 2  # Only ues a few cores in the background
    # Build/Load the flann index
    uuid_map_fpath   = get_nnindexer_uuid_map_fpath(qreq_)
    visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)

    # set temporary attribute for when the thread finishes
    finishtup = (uuid_map_fpath, daids_hashid, visual_uuid_list, min_reindex_thresh)
    CURRENT_THREAD = ut.spawn_background_process(
        background_flann_func, cachedir, daid_list, vecs_list, fgws_list,
        flann_params, cfgstr)

    CURRENT_THREAD.finishtup = finishtup
Ejemplo n.º 3
0
def request_diskcached_ibeis_nnindexer(qreq_,
                                       daid_list,
                                       nnindex_cfgstr=None,
                                       verbose=True,
                                       force_rebuild=False,
                                       memtrack=None,
                                       prog_hook=None):
    r"""
    builds new NeighborIndexer which will try to use a disk cached flann if
    available

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        daid_list (list):
        nnindex_cfgstr (?):
        verbose (bool):

    Returns:
        NeighborIndexer: nnindexer

    CommandLine:
        python -m ibeis.algo.hots.neighbor_index_cache --test-request_diskcached_ibeis_nnindexer

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list)
        >>> nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
        >>> verbose = True
        >>> # execute function
        >>> nnindexer = request_diskcached_ibeis_nnindexer(qreq_, daid_list, nnindex_cfgstr, verbose)
        >>> # verify results
        >>> result = str(nnindexer)
        >>> print(result)
    """
    if nnindex_cfgstr is None:
        nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
    cfgstr = nnindex_cfgstr
    cachedir = qreq_.ibs.get_flann_cachedir()
    flann_params = qreq_.qparams.flann_params
    flann_params['checks'] = qreq_.qparams.checks
    #if memtrack is not None:
    #    memtrack.report('[PRE SUPPORT]')
    # Get annot descriptors to index
    if prog_hook is not None:
        prog_hook.set_progress(1, 3, 'Loading support data for indexer')
    print('[nnindex] Loading support data for indexer')
    vecs_list, fgws_list, fxs_list = get_support_data(qreq_, daid_list)
    if memtrack is not None:
        memtrack.report('[AFTER GET SUPPORT DATA]')
    try:
        nnindexer = new_neighbor_index(daid_list,
                                       vecs_list,
                                       fgws_list,
                                       fxs_list,
                                       flann_params,
                                       cachedir,
                                       cfgstr=cfgstr,
                                       verbose=verbose,
                                       force_rebuild=force_rebuild,
                                       memtrack=memtrack,
                                       prog_hook=prog_hook)
    except Exception as ex:
        ut.printex(ex,
                   True,
                   msg_='cannot build inverted index',
                   key_list=['ibs.get_infostr()'])
        raise
    # Record these uuids in the disk based uuid map so they can be augmented if
    # needed
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    if len(daid_list) > min_reindex_thresh:
        uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
        daids_hashid = get_data_cfgstr(qreq_.ibs, daid_list)
        visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)
        UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list,
                                           daids_hashid)
        if memtrack is not None:
            memtrack.report('[AFTER WRITE_UUID_MAP]')
    return nnindexer
Ejemplo n.º 4
0
def request_augmented_ibeis_nnindexer(qreq_,
                                      daid_list,
                                      verbose=True,
                                      use_memcache=True,
                                      force_rebuild=False,
                                      memtrack=None):
    r"""
    DO NOT USE. THIS FUNCTION CAN CURRENTLY CAUSE A SEGFAULT

    tries to give you an indexer for the requested daids using the least amount
    of computation possible. By loading and adding to a partially build nnindex
    if possible and if that fails fallbs back to request_memcache.

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        daid_list (list):

    Returns:
        str: nnindex_cfgstr

    CommandLine:
        python -m ibeis.algo.hots.neighbor_index_cache --test-request_augmented_ibeis_nnindexer

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ZEB_PLAIN = ibeis.const.TEST_SPECIES.ZEB_PLAIN
        >>> ibs = ibeis.opendb('testdb1')
        >>> use_memcache, max_covers, verbose = True, None, True
        >>> daid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:6]
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list)
        >>> qreq_.qparams.min_reindex_thresh = 1
        >>> min_reindex_thresh = qreq_.qparams.min_reindex_thresh
        >>> # CLEAR CACHE for clean test
        >>> clear_uuid_cache(qreq_)
        >>> # LOAD 3 AIDS INTO CACHE
        >>> aid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:3]
        >>> # Should fallback
        >>> nnindexer = request_augmented_ibeis_nnindexer(qreq_, aid_list)
        >>> # assert the fallback
        >>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
        ...     qreq_, daid_list, min_reindex_thresh, max_covers)
        >>> result2 = uncovered_aids, covered_aids_list
        >>> ut.assert_eq(result2, ([4, 5, 6], [[1, 2, 3]]), 'pre augment')
        >>> # Should augment
        >>> nnindexer = request_augmented_ibeis_nnindexer(qreq_, daid_list)
        >>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
        ...     qreq_, daid_list, min_reindex_thresh, max_covers)
        >>> result3 = uncovered_aids, covered_aids_list
        >>> ut.assert_eq(result3, ([], [[1, 2, 3, 4, 5, 6]]), 'post augment')
        >>> # Should fallback
        >>> nnindexer2 = request_augmented_ibeis_nnindexer(qreq_, daid_list)
        >>> assert nnindexer is nnindexer2
    """
    global NEIGHBOR_CACHE
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    if not force_rebuild:
        new_daid_list, covered_aids_list = group_daids_by_cached_nnindexer(
            qreq_, daid_list, min_reindex_thresh, max_covers=1)
        can_augment = (len(covered_aids_list) > 0 and
                       not ut.list_set_equal(covered_aids_list[0], daid_list))
    else:
        can_augment = False
    if verbose:
        print('[aug] Requesting augmented nnindexer')
    if can_augment:
        covered_aids = covered_aids_list[0]
        if verbose:
            print('[aug] Augmenting index %r old daids with %d new daids' %
                  (len(covered_aids), len(new_daid_list)))
        # Load the base covered indexer
        # THIS SHOULD LOAD NOT REBUILD IF THE UUIDS ARE COVERED
        base_nnindexer = request_memcached_ibeis_nnindexer(
            qreq_, covered_aids, verbose=verbose, use_memcache=use_memcache)
        # Remove this indexer from the memcache because we are going to change it
        if NEIGHBOR_CACHE.has_key(base_nnindexer.cfgstr):  # NOQA
            print('Removing key from memcache')
            NEIGHBOR_CACHE[base_nnindexer.cfgstr] = None
            del NEIGHBOR_CACHE[base_nnindexer.cfgstr]

        support_data = get_support_data(qreq_, new_daid_list)
        (new_vecs_list, new_fgws_list, new_fxs_list) = support_data
        base_nnindexer.add_support(new_daid_list,
                                   new_vecs_list,
                                   new_fgws_list,
                                   new_fxs_list,
                                   verbose=True)
        # FIXME: pointer issues
        nnindexer = base_nnindexer
        # Change to the new cfgstr
        nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
        nnindexer.cfgstr = nnindex_cfgstr
        cachedir = qreq_.ibs.get_flann_cachedir()
        nnindexer.save(cachedir)
        # Write to inverse uuid
        if len(daid_list) > min_reindex_thresh:
            uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
            daids_hashid = get_data_cfgstr(qreq_.ibs, daid_list)
            visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)
            UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath,
                                               visual_uuid_list, daids_hashid)
        # Write to memcache
        if ut.VERBOSE:
            print('[aug] Wrote to memcache=%r' % (nnindex_cfgstr, ))
        NEIGHBOR_CACHE[nnindex_cfgstr] = nnindexer
        return nnindexer
    else:
        #if ut.VERBOSE:
        if verbose:
            print('[aug] Nothing to augment, fallback to memcache')
        # Fallback
        nnindexer = request_memcached_ibeis_nnindexer(
            qreq_,
            daid_list,
            verbose=verbose,
            use_memcache=use_memcache,
            force_rebuild=force_rebuild,
            memtrack=memtrack)
        return nnindexer
Ejemplo n.º 5
0
def request_diskcached_ibeis_nnindexer(qreq_, daid_list, nnindex_cfgstr=None, verbose=True, force_rebuild=False, memtrack=None):
    r"""
    builds new NeighborIndexer which will try to use a disk cached flann if
    available

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        daid_list (list):
        nnindex_cfgstr (?):
        verbose (bool):

    Returns:
        NeighborIndexer: nnindexer

    CommandLine:
        python -m ibeis.algo.hots.neighbor_index_cache --test-request_diskcached_ibeis_nnindexer

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('testdb1')
        >>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list)
        >>> nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
        >>> verbose = True
        >>> # execute function
        >>> nnindexer = request_diskcached_ibeis_nnindexer(qreq_, daid_list, nnindex_cfgstr, verbose)
        >>> # verify results
        >>> result = str(nnindexer)
        >>> print(result)
    """
    if nnindex_cfgstr is None:
        nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
    cfgstr = nnindex_cfgstr
    cachedir     = qreq_.ibs.get_flann_cachedir()
    flann_params = qreq_.qparams.flann_params
    flann_params['checks'] = qreq_.qparams.checks
    #if memtrack is not None:
    #    memtrack.report('[PRE SUPPORT]')
    # Get annot descriptors to index
    print('[nnindex] Loading support data to build diskcached indexer')
    vecs_list, fgws_list = get_support_data(qreq_, daid_list)
    if memtrack is not None:
        memtrack.report('[AFTER GET SUPPORT DATA]')
    try:
        nnindexer = new_neighbor_index(
            daid_list, vecs_list, fgws_list, flann_params, cachedir,
            cfgstr=cfgstr, verbose=verbose, force_rebuild=force_rebuild, memtrack=memtrack)
    except Exception as ex:
        ut.printex(ex, True, msg_='cannot build inverted index',
                        key_list=['ibs.get_infostr()'])
        raise
    # Record these uuids in the disk based uuid map so they can be augmented if
    # needed
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    if len(daid_list) > min_reindex_thresh:
        uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
        daids_hashid   = get_data_cfgstr(qreq_.ibs, daid_list)
        visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)
        UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list, daids_hashid)
        if memtrack is not None:
            memtrack.report('[AFTER WRITE_UUID_MAP]')
    return nnindexer
Ejemplo n.º 6
0
def request_augmented_ibeis_nnindexer(qreq_, daid_list, verbose=True,
                                      use_memcache=True, force_rebuild=False, memtrack=None):
    r"""
    DO NOT USE. THIS FUNCTION CAN CURRENTLY CAUSE A SEGFAULT

    tries to give you an indexer for the requested daids using the least amount
    of computation possible. By loading and adding to a partially build nnindex
    if possible and if that fails fallbs back to request_memcache.

    Args:
        qreq_ (QueryRequest):  query request object with hyper-parameters
        daid_list (list):

    Returns:
        str: nnindex_cfgstr

    CommandLine:
        python -m ibeis.algo.hots.neighbor_index_cache --test-request_augmented_ibeis_nnindexer

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.neighbor_index_cache import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ZEB_PLAIN = ibeis.const.TEST_SPECIES.ZEB_PLAIN
        >>> ibs = ibeis.opendb('testdb1')
        >>> use_memcache, max_covers, verbose = True, None, True
        >>> daid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:6]
        >>> qreq_ = ibs.new_query_request(daid_list, daid_list)
        >>> qreq_.qparams.min_reindex_thresh = 1
        >>> min_reindex_thresh = qreq_.qparams.min_reindex_thresh
        >>> # CLEAR CACHE for clean test
        >>> clear_uuid_cache(qreq_)
        >>> # LOAD 3 AIDS INTO CACHE
        >>> aid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:3]
        >>> # Should fallback
        >>> nnindexer = request_augmented_ibeis_nnindexer(qreq_, aid_list)
        >>> # assert the fallback
        >>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
        ...     qreq_, daid_list, min_reindex_thresh, max_covers)
        >>> result2 = uncovered_aids, covered_aids_list
        >>> ut.assert_eq(result2, ([4, 5, 6], [[1, 2, 3]]), 'pre augment')
        >>> # Should augment
        >>> nnindexer = request_augmented_ibeis_nnindexer(qreq_, daid_list)
        >>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
        ...     qreq_, daid_list, min_reindex_thresh, max_covers)
        >>> result3 = uncovered_aids, covered_aids_list
        >>> ut.assert_eq(result3, ([], [[1, 2, 3, 4, 5, 6]]), 'post augment')
        >>> # Should fallback
        >>> nnindexer2 = request_augmented_ibeis_nnindexer(qreq_, daid_list)
        >>> assert nnindexer is nnindexer2
    """
    global NEIGHBOR_CACHE
    min_reindex_thresh = qreq_.qparams.min_reindex_thresh
    if not force_rebuild:
        new_daid_list, covered_aids_list = group_daids_by_cached_nnindexer(
            qreq_, daid_list, min_reindex_thresh, max_covers=1)
        can_augment = (
            len(covered_aids_list) > 0 and
            not ut.list_set_equal(covered_aids_list[0], daid_list))
    else:
        can_augment = False
    if verbose:
        print('[aug] Requesting augmented nnindexer')
    if can_augment:
        covered_aids = covered_aids_list[0]
        if verbose:
            print('[aug] Augmenting index %r old daids with %d new daids' %
                  (len(covered_aids), len(new_daid_list)))
        # Load the base covered indexer
        # THIS SHOULD LOAD NOT REBUILD IF THE UUIDS ARE COVERED
        base_nnindexer = request_memcached_ibeis_nnindexer(
            qreq_, covered_aids, verbose=verbose, use_memcache=use_memcache)
        # Remove this indexer from the memcache because we are going to change it
        if NEIGHBOR_CACHE.has_key(base_nnindexer.cfgstr):  # NOQA
            print('Removing key from memcache')
            NEIGHBOR_CACHE[base_nnindexer.cfgstr] = None
            del NEIGHBOR_CACHE[base_nnindexer.cfgstr]

        new_vecs_list, new_fgws_list = get_support_data(qreq_, new_daid_list)
        base_nnindexer.add_support(new_daid_list, new_vecs_list, new_fgws_list, verbose=True)
        # FIXME: pointer issues
        nnindexer = base_nnindexer
        # Change to the new cfgstr
        nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
        nnindexer.cfgstr = nnindex_cfgstr
        cachedir = qreq_.ibs.get_flann_cachedir()
        nnindexer.save(cachedir)
        # Write to inverse uuid
        if len(daid_list) > min_reindex_thresh:
            uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
            daids_hashid   = get_data_cfgstr(qreq_.ibs, daid_list)
            visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)
            UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list, daids_hashid)
        # Write to memcache
        if ut.VERBOSE:
            print('[aug] Wrote to memcache=%r' % (nnindex_cfgstr,))
        NEIGHBOR_CACHE[nnindex_cfgstr] = nnindexer
        return nnindexer
    else:
        #if ut.VERBOSE:
        if verbose:
            print('[aug] Nothing to augment, fallback to memcache')
        # Fallback
        nnindexer = request_memcached_ibeis_nnindexer(
            qreq_, daid_list, verbose=verbose, use_memcache=use_memcache,
            force_rebuild=force_rebuild, memtrack=memtrack
        )
        return nnindexer