示例#1
11
def gridsearch_coverage_grid_mask():
    """
    CommandLine:
        python -m vtool.coverage_grid --test-gridsearch_coverage_grid_mask --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_grid import *  # NOQA
        >>> import plottool as pt
        >>> gridsearch_coverage_grid_mask()
        >>> pt.show_if_requested()
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = get_coverage_grid_gridsearch_configs()
    kpts, chipsize, weights = coverage_kpts.testdata_coverage('easy1.png')
    gridmask_list = [
        255 *  make_grid_coverage_mask(kpts, chipsize, weights, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl='coverage grid')
    ]
    NORMHACK = False
    if NORMHACK:
        gridmask_list = [
            255 * (gridmask / gridmask.max()) for gridmask in gridmask_list
        ]

    fnum = 1
    ut.interact_gridsearch_result_images(
        pt.imshow, cfgdict_list, cfglbl_list,
        gridmask_list, fnum=fnum, figtitle='coverage grid', unpack=False,
        max_plots=25)

    pt.iup()
示例#2
0
def execute_query2(ibs, qreq_, verbose, save_qcache, batch_size=None):
    """
    Breaks up query request into several subrequests
    to process "more efficiently" and safer as well.
    """
    qreq_.lazy_preload(verbose=verbose and ut.NOT_QUIET)
    all_qaids = qreq_.qaids
    print('len(missed_qaids) = %r' % (len(all_qaids), ))
    qaid2_cm = {}
    # vsone must have a chunksize of 1
    if batch_size is None:
        if HOTS_BATCH_SIZE is None:
            hots_batch_size = ibs.cfg.other_cfg.hots_batch_size
        else:
            hots_batch_size = HOTS_BATCH_SIZE
    else:
        hots_batch_size = batch_size
    chunksize = 1 if qreq_.qparams.vsone else hots_batch_size
    # Iterate over vsone queries in chunks.
    # This minimizes lost computation if a qreq_ crashes
    nTotalChunks = ut.get_nTotalChunks(len(all_qaids), chunksize)
    qaid_chunk_iter = ut.ichunks(all_qaids, chunksize)
    _qreq_iter = (qreq_.shallowcopy(qaids=qaids) for qaids in qaid_chunk_iter)
    sub_qreq_iter = ut.ProgressIter(_qreq_iter,
                                    nTotal=nTotalChunks,
                                    freq=1,
                                    lbl='[mc4] query chunk: ',
                                    prog_hook=qreq_.prog_hook)
    for sub_qreq_ in sub_qreq_iter:
        if ut.VERBOSE:
            print('Generating vsmany chunk')
        sub_cm_list = pipeline.request_ibeis_query_L0(ibs,
                                                      sub_qreq_,
                                                      verbose=verbose)
        assert len(sub_qreq_.qaids) == len(sub_cm_list), 'not aligned'
        assert all([
            qaid == cm.qaid for qaid, cm in zip(sub_qreq_.qaids, sub_cm_list)
        ]), 'not corresonding'
        if save_qcache:
            fpath_list = qreq_.get_chipmatch_fpaths(sub_qreq_.qaids)
            _iter = zip(sub_cm_list, fpath_list)
            _iter = ut.ProgressIter(_iter,
                                    nTotal=len(sub_cm_list),
                                    lbl='saving chip matches',
                                    adjust=True,
                                    freq=1)
            for cm, fpath in _iter:
                cm.save_to_fpath(fpath, verbose=False)
        else:
            if ut.VERBOSE:
                print('[mc4] not saving vsmany chunk')
        qaid2_cm.update({cm.qaid: cm for cm in sub_cm_list})
    return qaid2_cm
示例#3
0
def score_chipmatch_list(qreq_, cm_list, score_method, progkw=None):
    """
    CommandLine:
        python -m wbia.algo.hots.scoring --test-score_chipmatch_list
        python -m wbia.algo.hots.scoring --test-score_chipmatch_list:1
        python -m wbia.algo.hots.scoring --test-score_chipmatch_list:0 --show

    Example0:
        >>> # SLOW_DOCTEST
        >>> # xdoctest: +SKIP
        >>> # (IMPORTANT)
        >>> from wbia.algo.hots.scoring import *  # NOQA
        >>> ibs, qreq_, cm_list = plh.testdata_pre_sver()
        >>> score_method = qreq_.qparams.prescore_method
        >>> score_chipmatch_list(qreq_, cm_list, score_method)
        >>> cm = cm_list[0]
        >>> assert cm.score_list.argmax() == 0
        >>> ut.quit_if_noshow()
        >>> cm.show_single_annotmatch(qreq_)
        >>> ut.show_if_requested()

    Example1:
        >>> # SLOW_DOCTEST
        >>> # (IMPORTANT)
        >>> from wbia.algo.hots.scoring import *  # NOQA
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver()
        >>> qaid = qreq_.qaids[0]
        >>> cm = cm_list[0]
        >>> score_method = qreq_.qparams.score_method
        >>> score_chipmatch_list(qreq_, cm_list, score_method)
        >>> assert cm.score_list.argmax() == 0
        >>> ut.quit_if_noshow()
        >>> cm.show_single_annotmatch(qreq_)
        >>> ut.show_if_requested()
    """
    if progkw is None:
        progkw = dict(freq=1, time_thresh=30.0, adjust=True)
    lbl = 'scoring %s' % (score_method)
    # Choose the appropriate scoring mechanism
    logger.info('[scoring] score %d chipmatches with %s' %
                (len(cm_list), score_method))
    if score_method == 'sumamech':
        for cm in ut.ProgressIter(cm_list, lbl=lbl, **progkw):
            cm.score_name_sumamech(qreq_)
    if score_method == 'csum':
        for cm in ut.ProgressIter(cm_list, lbl=lbl, **progkw):
            cm.score_name_maxcsum(qreq_)
    elif score_method == 'nsum':
        for cm in ut.ProgressIter(cm_list, lbl=lbl, **progkw):
            cm.score_name_nsum(qreq_)
    else:
        raise NotImplementedError('[hs] unknown scoring method:' +
                                  score_method)
示例#4
0
def subindexer_time_experiment():
    """
    builds plot of number of annotations vs indexer build time.

    TODO: time experiment
    """
    import ibeis
    import utool as ut
    import pyflann
    import plottool as pt
    ibs = ibeis.opendb(db='PZ_Master0')
    daid_list = ibs.get_valid_aids()
    count_list = []
    time_list = []
    flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params()
    for count in ut.ProgressIter(range(1, 301)):
        daids_ = daid_list[:]
        np.random.shuffle(daids_)
        daids = daids_[0:count]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann = pyflann.FLANN()
            flann.build_index(vecs, **flann_params)
        count_list.append(count)
        time_list.append(t.ellapsed)
    count_arr = np.array(count_list)
    time_arr = np.array(time_list)
    pt.plot2(count_arr, time_arr, marker='-', equal_aspect=False,
             x_label='num_annotations', y_label='FLANN build time')
示例#5
0
 def get_buildtime_data(**kwargs):
     flann_params = vt.get_flann_params(**kwargs)
     print('flann_params = %r' % (ut.dict_str(flann_params), ))
     data_list = []
     num = 1000
     print('-----')
     for count in ut.ProgressIter(itertools.count(),
                                  nTotal=-1,
                                  freq=1,
                                  autoadjust=False):
         num = int(num * 1.2)
         print('num = %r' % (num, ))
         #if num > 1E6:
         #    break
         data = pool.get_testdata(num)
         print('object size ' + ut.get_object_size_str(data, 'data'))
         flann = pyflann.FLANN(**flann_params)
         with ut.Timer(verbose=False) as t:
             flann.build_index(data)
         print('t.ellapsed = %r' % (t.ellapsed, ))
         if t.ellapsed > 5 or count > 1000:
             break
         data_list.append((count, num, t.ellapsed))
         print('-----')
     return data_list, flann_params
示例#6
0
def gridsearch_coverage_grid():
    """
    CommandLine:
        python -m vtool.coverage_grid --test-gridsearch_coverage_grid --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_grid import *  # NOQA
        >>> import plottool as pt
        >>> gridsearch_coverage_grid()
        >>> pt.show_if_requested()
    """
    import plottool as pt
    fname = None  # 'easy1.png'
    kpts, chipsize, weights = coverage_kpts.testdata_coverage(fname)
    if len(kpts) > 100:
        kpts = kpts[::100]
        weights = weights[::100]
    cfgdict_list, cfglbl_list = get_coverage_grid_gridsearch_configs()
    coverage_gridtup_list = [
        sparse_grid_coverage(kpts, chipsize, weights, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl='coverage grid')
    ]

    fnum = 1
    with ut.Timer('plotting gridsearch'):
        ut.interact_gridsearch_result_images(
            show_coverage_grid, cfgdict_list, cfglbl_list,
            coverage_gridtup_list, fnum=fnum, figtitle='coverage grid', unpack=True,
            max_plots=25)

    pt.iup()
示例#7
0
def gridsearch_image_function(param_info,
                              test_func,
                              args=tuple(),
                              show_func=None):
    """
    gridsearch for a function that produces a single image
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(
        defaultslice=slice(0, 10))
    fnum = pt.ensure_fnum(None)
    if show_func is None:
        show_func = pt.imshow
    lbl = ut.get_funcname(test_func)
    cfgresult_list = [
        test_func(*args, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl)
    ]
    onclick_func = None
    ut.interact_gridsearch_result_images(show_func,
                                         cfgdict_list,
                                         cfglbl_list,
                                         cfgresult_list,
                                         fnum=fnum,
                                         figtitle=lbl,
                                         unpack=False,
                                         max_plots=25,
                                         onclick_func=onclick_func)
    pt.iup()
示例#8
0
def collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=True):
    # load a dataset
    #dbname = 'PZ_MTEST'
    #dbname = 'GZ_ALL'
    def get_set_groundfalse(ibs, qaids):
        # get groundfalse annots relative to the entire set
        valid_nids = ibs.get_valid_nids()
        qnids = ibs.get_annot_nids(qaids)
        nid_list = list(set(valid_nids) - set(qnids))
        aids_list = ibs.get_name_aids(nid_list)
        return ut.flatten(aids_list)

    # determanism
    np.random.seed(0)
    random.seed(0)
    # TODO: USE ANOT FILTERINGS
    import ibeis
    qaids_all = ibeis.testdata_aids(
        a='default:pername=1,mingt=2,is_known=True')
    qaids = qaids_all[::2]
    print('nQaids = %r' % len(qaids))

    def get_annot_groundtruth_sample(ibs,
                                     aid_list,
                                     per_name=1,
                                     isexemplar=True):
        r"""
        DEPRICATE
        """
        all_trues_list = ibs.get_annot_groundtruth(aid_list,
                                                   noself=True,
                                                   is_exemplar=isexemplar)

        def random_choice(aids):
            size = min(len(aids), per_name)
            return np.random.choice(aids, size, replace=False).tolist()

        sample_trues_list = [
            random_choice(aids) if len(aids) > 0 else []
            for aids in all_trues_list
        ]
        return sample_trues_list

    daids_gt_sample = ut.flatten(
        ibs.get_annot_groundtruth_sample(ibs, qaids, isexemplar=None))
    daids_gf_all = get_set_groundfalse(ibs, qaids)
    ut.assert_eq(len(daids_gt_sample), len(qaids), 'missing gt')
    daids_list = []

    for target_nDaids in ut.ProgressIter(nDaids_basis, lbl='testing dbsize'):
        print('---------------------------')
        # Sample one match from the groundtruth with padding
        daids_gf_sample = ut.random_sample(
            daids_gf_all, max(0, target_nDaids - len(daids_gt_sample)))
        daids = sorted(daids_gt_sample + daids_gf_sample)
        nDaids = len(daids)
        if target_nDaids != nDaids:
            continue
        daids_list.append(daids)
    return qaids, daids_list
示例#9
0
 def test_body2(count, logmode, backspace):
     ut.colorprint('\n---- count = %r -----' % (count, ), 'yellow')
     ut.colorprint('backspace = %r' % (backspace, ), 'yellow')
     ut.colorprint('logmode = %r' % (logmode, ), 'yellow')
     if logmode:
         ut.delete('test.log')
         ut.start_logging('test.log')
     print('Start main loop')
     import time
     for count in ut.ProgressIter(range(2), freq=1, backspace=backspace):
         for count in ut.ProgressIter(range(50),
                                      freq=1,
                                      backspace=backspace):
             time.sleep(.01)
     print('Done with main loop work')
     print('Exiting main body')
     if logmode:
         ut.stop_logging()
示例#10
0
def write_dirty_aids(ibs, dirty_probchip_fpath_list, dirty_aids, config2_,
                     species):
    if config2_ is None:
        fw_detector = ibs.cfg.featweight_cfg.fw_detector
    else:
        fw_detector = config2_.get('fw_detector')

    if fw_detector == 'rf':
        (
            extramargin_fpath_list,
            probchip_extramargin_fpath_list,
            halfoffset_cs_list,
        ) = compute_extramargin_detectchip(ibs,
                                           dirty_aids,
                                           config2_=config2_,
                                           species=species,
                                           FACTOR=4)
        #dirty_cfpath_list  = ibs.get_annot_chip_fpath(dirty_aids, ensure=True, config2_=config2_)

        config = {
            'scale_list': [1.0],
            'output_gpath_list': probchip_extramargin_fpath_list,
            'mode': 1,
        }
        probchip_generator = randomforest.detect_gpath_list_with_species(
            ibs, extramargin_fpath_list, species, **config)
        # Evalutate genrator until completion
        ut.evaluate_generator(probchip_generator)
        extramargin_mask_gen = (vt.imread(fpath, grayscale=True)
                                for fpath in probchip_extramargin_fpath_list)
        # Crop the extra margin off of the new probchips
        _iter = zip(dirty_probchip_fpath_list, extramargin_mask_gen,
                    halfoffset_cs_list)
        for (probchip_fpath, extramargin_probchip, halfmargin) in _iter:
            half_w, half_h = halfmargin
            probchip = extramargin_probchip[half_h:-half_h, half_w:-half_w]
            vt.imwrite(probchip_fpath, probchip)
    elif fw_detector == 'cnn':
        # dont use extrmargin here (for now)
        chip_fpath_list = ibs.get_annot_chip_fpath(dirty_aids,
                                                   config2_=config2_)
        mask_gen = ibs.generate_species_background_mask(
            chip_fpath_list, species)
        _iter = zip(dirty_probchip_fpath_list, mask_gen)
        for chunk in ut.ichunks(_iter, 64):
            for probchip_fpath, probchip in ut.ProgressIter(
                    chunk,
                    lbl='write probchip chunk',
                    adjust=True,
                    time_thresh=30.0):
                probchip = postprocess_mask(probchip)
                vt.imwrite(probchip_fpath, probchip)
    else:
        raise NotImplementedError('bad fw_detector=%r' % (fw_detector, ))
示例#11
0
def evaluate_training_data(ibs,
                           qaids,
                           daids_list,
                           varydict,
                           nDaids_basis,
                           verbose=True):
    nError_list = []
    nDaids_list = []
    cfgdict_list2 = []
    cfgdict_list = ut.all_dict_combinations(varydict)
    for daids in ut.ProgressIter(daids_list, lbl='testing dbsize'):
        nDaids = len(daids)
        print('\n---------------------------')
        with ut.Indenter('[nDaids=%r]' % (nDaids)):
            print('nDaids = %r' % nDaids)
            for cfgdict in ut.ProgressIter(cfgdict_list,
                                           lbl='testing cfgdict'):
                qreq_ = ibs.new_query_request(qaids,
                                              daids,
                                              cfgdict=cfgdict,
                                              verbose=verbose)
                qres_list = ibs.query_chips(qreq_=qreq_, verbose=verbose)
                gt_ranks_list = [
                    qres.get_gt_ranks(ibs=ibs) for qres in qres_list
                ]
                incorrect_list = [
                    len(gt_ranks) == 0 or min(gt_ranks) != 0
                    for gt_ranks in gt_ranks_list
                ]
                nErrors = sum(incorrect_list)
                nError_list.append(nErrors)
                nDaids_list.append(nDaids)
                cfgdict_list2.append(cfgdict.copy())

    nError_list = np.array(nError_list)
    nDaids_list = np.array(nDaids_list)
    K_list = np.array([cfgdict['K'] for cfgdict in cfgdict_list2])
    return nDaids_list, K_list, nError_list
示例#12
0
 def test_body(count, logmode, backspace):
     ut.colorprint('\n---- count = %r -----' % (count, ), 'yellow')
     ut.colorprint('backspace = %r' % (backspace, ), 'yellow')
     ut.colorprint('logmode = %r' % (logmode, ), 'yellow')
     if logmode:
         ut.delete('test.log')
         ut.start_logging('test.log')
     print('Start main loop')
     import time
     for count in ut.ProgressIter(range(20), freq=3, backspace=backspace):
         time.sleep(.01)
     print('Done with main loop work')
     print('Exiting main body')
     if logmode:
         ut.stop_logging()
         #print('-----DONE LOGGING----')
         testlog_text = ut.readfrom('test.log')
         print(ut.indent(testlog_text.replace('\r', '\n'), '        '))
示例#13
0
def drive_test_script(ibs):
    r"""
    Test script where we drive around and take pictures of animals
    both in a given database and not in a given databse to make sure
    the system works.

    CommandLine:
        python -m wbia.viz.viz_image --test-drive_test_script
        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_MTEST --show
        python -m wbia.viz.viz_image --test-drive_test_script --db GIR_Tanya --show
        python -m wbia.viz.viz_image --test-drive_test_script --db GIR_Master0 --show
        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_Master0 --show
        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show

        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show
        python -m wbia.viz.viz_image --test-drive_test_script --dbdir /raid/work2/Turk/GIR_Master --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.viz.viz_image import *  # NOQA
        >>> import wbia
        >>> ibs = wbia.opendb()
        >>> drive_test_script(ibs)
    """
    import wbia

    aid_list = wbia.testdata_aids(a='default:pername=1')
    logger.info('Running with (annot) aid_list = %r' % (aid_list))
    gid_list = ibs.get_annot_gids(aid_list)
    logger.info('Running with (image) gid_list = %r' % (gid_list))
    avuuid_list = ibs.get_annot_visual_uuids(aid_list)
    guuid_list = ibs.get_image_uuids(gid_list)
    logger.info('Running with annot_visual_uuid_list = %s' %
                (ut.repr2(zip(aid_list, avuuid_list))))
    logger.info('Running with image_uuid_list = %s' %
                (ut.repr2(zip(gid_list, guuid_list))))
    for gid, aid in ut.ProgressIter(zip(gid_list, aid_list), lbl='progress '):
        logger.info('\ngid, aid, nid = %r, %r, %r' % (
            gid,
            aid,
            ibs.get_annot_nids(aid),
        ))
        show_image(ibs, gid, annote=False, rich_title=True)
        pt.show_if_requested()
示例#14
0
def query_smk_test(annots_df, invindex, qreq_):
    """
    ibeis interface
    Example:
        >>> from ibeis.algo.hots.smk import smk_match
        >>> from ibeis.algo.hots.smk import smk_debug
        >>> ibs, annots_df, daids, qaids, invindex, qreq_ = smk_debug.testdata_internals_full()
        >>> qaid2_qres_ = smk_match.query_smk(annots_df, invindex, qreq_)

    Dev::
        qres = qaid2_qres_[qaids[0]]
        fig = qres.show_top(ibs)

    """
    from ibeis.algo.hots import pipeline
    from ibeis.algo.hots.smk import smk_match  # NOQA
    qaids = qreq_.get_external_qaids()
    qaid2_chipmatch = {}
    qaid2_scores = {}
    aggregate = qreq_.qparams.aggregate
    smk_alpha = qreq_.qparams.smk_alpha
    smk_thresh = qreq_.qparams.smk_thresh
    lbl = '[smk_match] asmk query: ' if aggregate else '[smk_match] smk query: '
    withinfo = True
    for qaid in ut.ProgressIter(enumerate(qaids), lbl=lbl, freq=1):
        daid2_score, daid2_chipmatch = smk_match.query_inverted_index(
            annots_df, qaid, invindex, withinfo, aggregate, smk_alpha,
            smk_thresh)
        qaid2_scores[qaid] = daid2_score
        qaid2_chipmatch[qaid] = daid2_chipmatch
    try:
        #filt2_meta = {}
        cm_list = convert_smkmatch_to_chipmatch(qaid2_chipmatch, qaid2_scores)
        #qaid2_qres_ = pipeline.chipmatch_to_resdict(qaid2_chipmatch, filt2_meta, qreq_)
        qaid2_qres_ = pipeline.chipmatch_to_resdict(qreq_, cm_list)
    except Exception as ex:
        ut.printex(ex)
        ut.qflag()
        raise
    return qaid2_qres_
示例#15
0
def test_average_contrast():
    import vtool as vt
    ut.get_valid_test_imgkeys()
    img_fpath_list = [
        ut.grab_test_imgpath(key) for key in ut.get_valid_test_imgkeys()
    ]
    img_list = [vt.imread(img, grayscale=True) for img in img_fpath_list]
    avecontrast_list = np.array(
        [compute_average_contrast(img) for img in img_list])
    import plottool as pt
    nCols = len(img_list)
    fnum = None
    if fnum is None:
        fnum = pt.next_fnum()
    pt.figure(fnum=fnum, pnum=(2, 1, 1))
    sortx = avecontrast_list.argsort()
    y_list = avecontrast_list[sortx]
    x_list = np.arange(0, nCols) + .5
    pt.plot(x_list, y_list, 'bo-')
    sorted_imgs = ut.take(img_list, sortx)
    for px, img in ut.ProgressIter(enumerate(sorted_imgs, start=1)):
        pt.imshow(img, fnum=fnum, pnum=(2, nCols, nCols + px))
示例#16
0
def gridsearch_kpts_coverage_mask():
    """
    testing function

    CommandLine:
        python -m vtool.coverage_kpts --test-gridsearch_kpts_coverage_mask --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> import plottool as pt
        >>> gridsearch_kpts_coverage_mask()
        >>> pt.show_if_requested()
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = get_coverage_kpts_gridsearch_configs()
    kpts, chipsize, weights = testdata_coverage('easy1.png')
    imgmask_list = [
        255 * make_kpts_coverage_mask(
            kpts, chipsize, weights, return_patch=False, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl='coverage grid')
    ]
    #NORMHACK = True
    #if NORMHACK:
    #    imgmask_list = [
    #        255 * (mask / mask.max()) for mask in imgmask_list
    #    ]
    fnum = pt.next_fnum()
    ut.interact_gridsearch_result_images(pt.imshow,
                                         cfgdict_list,
                                         cfglbl_list,
                                         imgmask_list,
                                         fnum=fnum,
                                         figtitle='coverage image',
                                         unpack=False,
                                         max_plots=25)
    pt.iup()
示例#17
0
def extract_desc_from_patches(patch_list):
    r"""
    Careful about the way the patches are extracted here.

    Args:
        patch_list (ndarray[ndims=3]):

    CommandLine:
        python -m pyhesaff extract_desc_from_patches  --rebuild-hesaff --no-rmbuild
        python -m pyhesaff extract_desc_from_patches  --rebuild-hesaff --no-rmbuild --show
        python -m pyhesaff extract_desc_from_patches:1 --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from pyhesaff._pyhesaff import *  # NOQA
        >>> import vtool as vt
        >>> img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png'))
        >>> # First extract keypoints normally
        >>> (orig_kpts_list, orig_vecs_list) = detect_feats(img_fpath)
        >>> # Take 9 keypoints
        >>> img = vt.imread(img_fpath)
        >>> kpts_list = orig_kpts_list[1::len(orig_kpts_list) // 9]
        >>> vecs_list = orig_vecs_list[1::len(orig_vecs_list) // 9]
        >>> # Extract the underlying grayscale patches (using different patch_size)
        >>> patch_list_ = np.array(vt.get_warped_patches(img, kpts_list, patch_size=64)[0])
        >>> patch_list = np.array(vt.convert_image_list_colorspace(patch_list_, 'gray'))
        >>> # Extract descriptors from the patches
        >>> vecs_array = extract_desc_from_patches(patch_list)

    Example:
        >>> # ENABLE_DOCTEST
        >>> from pyhesaff._pyhesaff import *  # NOQA
        >>> import vtool as vt
        >>> img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png'))
        >>> # First extract keypoints normally
        >>> (orig_kpts_list, orig_vecs_list) = detect_feats(img_fpath)
        >>> # Take 9 keypoints
        >>> img = vt.imread(img_fpath)
        >>> kpts_list = orig_kpts_list[1::len(orig_kpts_list) // 9]
        >>> vecs_list = orig_vecs_list[1::len(orig_vecs_list) // 9]
        >>> # Extract the underlying grayscale patches
        >>> #patch_list_ = np.array(vt.get_warped_patches(img, kpts_list)[0])
        >>> #patch_list = np.array(vt.convert_image_list_colorspace(patch_list_, 'gray'))
        >>> patch_list = extract_patches(img, kpts_list)
        >>> patch_list = np.round(patch_list).astype(np.uint8)
        >>> # Currently its impossible to get the correct answer
        >>> # TODO: allow patches to be passed in as float32
        >>> # Extract descriptors from those patches
        >>> vecs_array = extract_desc_from_patches(patch_list)
        >>> # Comparse to see if they are close to the original descriptors
        >>> errors = vt.L2_sift(vecs_list, vecs_array)
        >>> print('Errors: %r' % (errors,))
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ax = pt.draw_patches_and_sifts(patch_list, vecs_array, pnum=(1, 2, 1))
        >>> ax.set_title('patch extracted')
        >>> ax = pt.draw_patches_and_sifts(patch_list, vecs_list, pnum=(1, 2, 2))
        >>> ax.set_title('image extracted')
        >>> ut.show_if_requested()
    """
    ndims = len(patch_list.shape)
    if ndims == 4 and patch_list.shape[-1] == 1:
        print('[pyhesaff] warning need to reshape patch_list')
        # need to remove grayscale dimension, maybe it should be included
        patch_list = patch_list.reshape(patch_list.shape[0:3])
    elif ndims == 4 and patch_list.shape[-1] == 3:
        assert False, 'cannot handle color images yet'
    assert patch_list.flags[
        'C_CONTIGUOUS'], 'patch_list must be contiguous array'
    num_patches, patch_h, patch_w = patch_list.shape[0:3]
    assert patch_h == patch_w, 'must be square patches'
    vecs_array = alloc_vecs(num_patches)
    #vecs_array[:] = 0
    #print('vecs_array = %r' % (vecs_array,))
    # If the input array list is memmaped it is a good idea to process in chunks
    CHUNKS = isinstance(patch_list, np.memmap)
    if not CHUNKS:
        HESAFF_CLIB.extractDescFromPatches(num_patches, patch_h, patch_w,
                                           patch_list, vecs_array)
    else:
        from six.moves import range
        chunksize = 2048
        _iter = range(num_patches // chunksize)
        _progiter = ut.ProgressIter(_iter, lbl='extracting sift chunk')
        for ix in _progiter:
            lx = ix * chunksize
            rx = (ix + 1) * chunksize
            patch_sublist = np.array(patch_list[lx:rx])
            sublist_size = rx - lx
            HESAFF_CLIB.extractDescFromPatches(sublist_size, patch_h, patch_w,
                                               patch_sublist,
                                               vecs_array[lx:rx])
        last_size = num_patches - rx
        if last_size > 0:
            lx = rx
            rx = lx + last_size
            patch_sublist = np.array(patch_list[lx:rx])
            sublist_size = rx - lx
            HESAFF_CLIB.extractDescFromPatches(sublist_size, patch_h, patch_w,
                                               patch_sublist,
                                               vecs_array[lx:rx])
    #print('vecs_array = %r' % (vecs_array,))
    return vecs_array
示例#18
0
def execute_query_and_save_L1(ibs,
                              qreq_,
                              use_cache,
                              save_qcache,
                              verbose=True,
                              batch_size=None):
    """
    Args:
        ibs (ibeis.IBEISController):
        qreq_ (ibeis.QueryRequest):
        use_cache (bool):

    Returns:
        qaid2_cm

    CommandLine:
        python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:0
        python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:1
        python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:2
        python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:3


    Example0:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.match_chips4 import *  # NOQA
        >>> cfgdict1 = dict(codename='vsmany', sv_on=True)
        >>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
        >>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
        >>> ibs = qreq_.ibs
        >>> use_cache, save_qcache, verbose = False, False, True
        >>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
        >>> print(qaid2_cm)

    Example1:
        >>> # SLOW_DOCTEST
        >>> from ibeis.algo.hots.match_chips4 import *  # NOQA
        >>> cfgdict1 = dict(codename='vsone', sv_on=True)
        >>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
        >>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
        >>> ibs = qreq_.ibs
        >>> use_cache, save_qcache, verbose = False, False, True
        >>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
        >>> print(qaid2_cm)

    Example1:
        >>> # SLOW_DOCTEST
        >>> # TEST SAVE
        >>> from ibeis.algo.hots.match_chips4 import *  # NOQA
        >>> import ibeis
        >>> cfgdict1 = dict(codename='vsmany', sv_on=True)
        >>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
        >>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
        >>> ibs = qreq_.ibs
        >>> use_cache, save_qcache, verbose = False, True, True
        >>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
        >>> print(qaid2_cm)

    Example2:
        >>> # SLOW_DOCTEST
        >>> # TEST LOAD
        >>> from ibeis.algo.hots.match_chips4 import *  # NOQA
        >>> import ibeis
        >>> cfgdict1 = dict(codename='vsmany', sv_on=True)
        >>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
        >>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
        >>> ibs = qreq_.ibs
        >>> use_cache, save_qcache, verbose = True, True, True
        >>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
        >>> print(qaid2_cm)

    Example2:
        >>> # ENABLE_DOCTEST
        >>> # TEST PARTIAL HIT
        >>> from ibeis.algo.hots.match_chips4 import *  # NOQA
        >>> import ibeis
        >>> cfgdict1 = dict(codename='vsmany', sv_on=False, prescore_method='csum')
        >>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
        >>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4, 5, 6, 7, 8, 9])
        >>> ibs = qreq_.ibs
        >>> use_cache, save_qcache, verbose = False, True, False
        >>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose, batch_size=3)
        >>> cm = qaid2_cm[1]
        >>> ut.delete(cm.get_fpath(qreq_))
        >>> cm = qaid2_cm[4]
        >>> ut.delete(cm.get_fpath(qreq_))
        >>> cm = qaid2_cm[5]
        >>> ut.delete(cm.get_fpath(qreq_))
        >>> cm = qaid2_cm[6]
        >>> ut.delete(cm.get_fpath(qreq_))
        >>> print('Re-execute')
        >>> qaid2_cm_ = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose, batch_size=3)
        >>> assert all([qaid2_cm_[qaid] == qaid2_cm[qaid] for qaid in qreq_.qaids])
        >>> [ut.delete(fpath) for fpath in qreq_.get_chipmatch_fpaths(qreq_.qaids)]

    Ignore:
        other = cm_ = qaid2_cm_[qaid]
        cm = qaid2_cm[qaid]
    """
    if use_cache:
        if ut.VERBOSE:
            print('[mc4] cache-query is on')
        if ut.DEBUG2:
            # sanity check
            qreq_.assert_self(ibs)
        # Try loading as many cached results as possible
        qaid2_cm_hit = {}
        external_qaids = qreq_.qaids
        fpath_list = qreq_.get_chipmatch_fpaths(external_qaids)
        exists_flags = [exists(fpath) for fpath in fpath_list]
        qaids_hit = ut.compress(external_qaids, exists_flags)
        fpaths_hit = ut.compress(fpath_list, exists_flags)
        fpath_iter = ut.ProgressIter(fpaths_hit,
                                     nTotal=len(fpaths_hit),
                                     enabled=len(fpaths_hit) > 1,
                                     lbl='loading cache hits',
                                     adjust=True,
                                     freq=1)
        try:
            cm_hit_list = [
                chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
                for fpath in fpath_iter
            ]
            assert all([
                qaid == cm.qaid for qaid, cm in zip(qaids_hit, cm_hit_list)
            ]), 'inconsistent'
            qaid2_cm_hit = {cm.qaid: cm for cm in cm_hit_list}
        except chip_match.NeedRecomputeError:
            print('NeedRecomputeError: Some cached chips need to recompute')
            fpath_iter = ut.ProgressIter(fpaths_hit,
                                         nTotal=len(fpaths_hit),
                                         enabled=len(fpaths_hit) > 1,
                                         lbl='checking chipmatch cache',
                                         adjust=True,
                                         freq=1)
            # Recompute those that fail loading
            qaid2_cm_hit = {}
            for fpath in fpath_iter:
                try:
                    cm = chip_match.ChipMatch.load_from_fpath(fpath,
                                                              verbose=False)
                except chip_match.NeedRecomputeError:
                    pass
                else:
                    qaid2_cm_hit[cm.qaid] = cm
            print('%d / %d cached matches need to be recomputed' %
                  (len(qaids_hit) - len(qaid2_cm_hit), len(qaids_hit)))
        if len(qaid2_cm_hit) == len(external_qaids):
            return qaid2_cm_hit
        else:
            if len(qaid2_cm_hit) > 0 and not ut.QUIET:
                print('... partial cm cache hit %d/%d' %
                      (len(qaid2_cm_hit), len(external_qaids)))
        cachehit_qaids = list(qaid2_cm_hit.keys())
        # mask queries that have already been executed
        qreq_.set_external_qaid_mask(cachehit_qaids)
    else:
        if ut.VERBOSE:
            print('[mc4] cache-query is off')
        qaid2_cm_hit = {}
    qaid2_cm = execute_query2(ibs, qreq_, verbose, save_qcache, batch_size)
    if ut.DEBUG2:
        # sanity check
        qreq_.assert_self(ibs)
    # Merge cache hits with computed misses
    if len(qaid2_cm_hit) > 0:
        qaid2_cm.update(qaid2_cm_hit)
    qreq_.set_external_qaid_mask(None)  # undo state changes
    return qaid2_cm
示例#19
0
def extract_liberty_style_patches(ds_path, pairs):
    """
    CommandLine:
        python -m ibeis_cnn.ingest_data --test-grab_cached_liberty_data --show

    """
    from itertools import product
    import numpy as np
    from PIL import Image
    import subprocess

    patch_x = 64
    patch_y = 64
    rows = 16
    cols = 16

    def _available_patches(ds_path):
        """
        Number of patches in _dataset_ (a path).

        Only available through the line count
        in info.txt -- use unix 'wc'.

        _path_ is supposed to be a path to
        a directory with bmp patchsets.
        """
        fname = join(ds_path, "info.txt")
        p = subprocess.Popen(['wc', '-l', fname],
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        result, err = p.communicate()
        if p.returncode != 0:
            raise IOError(err)
        return int(result.strip().split()[0])

    #def read_patch_ids():
    #    pass

    def matches(ds_path, pairs):
        """Return _pairs_ many match/non-match pairs for _dataset_.
        _dataset_ is one of "liberty", "yosemite", "notredame".
        Every dataset has a number of match-files, that
        have _pairs_ many matches and non-matches (always
        the same number).
        The naming of these files is confusing, e.g. if there are 500 matching
        pairs and 500 non-matching pairs the file name is
        'm50_1000_1000_0.txt' -- in total 1000 patch-ids are used for matches,
        and 1000 patch-ids for non-matches. These patch-ids need not be
        unique.

        Also returns the used patch ids in a list.

        Extract all matching and non matching pairs from _fname_.
        Every line in the matchfile looks like:
            patchID1 3DpointID1 unused1 patchID2 3DpointID2 unused2
        'matches' have the same 3DpointID.

        Every file has the same number of matches and non-matches.
        """
        #pairs = 500
        match_fname = ''.join(
            ["m50_", str(2 * pairs), "_",
             str(2 * pairs), "_0.txt"])
        match_fpath = join(ds_path, match_fname)

        #print(pairs, "pairs each (matching/non_matching) from", match_fpath)

        with open(match_fpath) as match_file:
            # collect patches (id), and match/non-match pairs
            patch_ids, match, non_match = [], [], []

            for line in match_file:
                match_info = line.split()
                p1_id, p1_3d, p2_id, p2_3d = int(match_info[0]), int(
                    match_info[1]), int(match_info[3]), int(match_info[4])
                if p1_3d == p2_3d:
                    match.append((p1_id, p2_id))
                else:
                    non_match.append((p1_id, p2_id))
                patch_ids.append(p1_id)
                patch_ids.append(p2_id)

            patch_ids = list(set(patch_ids))
            patch_ids.sort()

            assert len(match) == len(
                non_match), "Different number of matches and non-matches."

        return match, non_match, patch_ids

    def _crop_to_numpy(patchfile, requested_indicies):
        """
        Convert _patchfile_ to a numpy array with patches per row.
        A _patchfile_ is a .bmp file.
        """
        pil_img = Image.open(patchfile)
        ptch_iter = (pil_img.crop(
            (col * patch_x, row * patch_y, (col + 1) * patch_x,
             (row + 1) * patch_y)) for index, (
                 row, col) in enumerate(product(range(rows), range(cols)))
                     if index in requested_indicies)
        patches = [np.array(ptch) for ptch in ptch_iter]
        pil_img.close()
        return patches

    num_patch_per_bmp = rows * cols
    total_num_patches = _available_patches(ds_path)
    num_bmp_files, mod = divmod(total_num_patches, num_patch_per_bmp)

    patchfile_list = [
        join(ds_path, ''.join(['patches', str(i).zfill(4), '.bmp']))
        for i in range(num_bmp_files)
    ]

    # Build matching labels
    match_pairs, non_match_pairs, all_requested_patch_ids = matches(
        ds_path, pairs)
    all_requested_patch_ids = np.array(all_requested_patch_ids)
    print('len(match_pairs) = %r' % (len(match_pairs, )))
    print('len(non_match_pairs) = %r' % (len(non_match_pairs, )))
    print('len(all_requested_patch_ids) = %r' %
          (len(all_requested_patch_ids, )))

    assert len(list(
        set(ut.flatten(match_pairs) +
            ut.flatten(non_match_pairs)))) == len(all_requested_patch_ids)
    assert max(all_requested_patch_ids) <= total_num_patches

    # Read all patches out of the bmp file store
    all_patches = {}
    for pfx, patchfile in ut.ProgressIter(list(enumerate(patchfile_list)),
                                          lbl='Reading Patches',
                                          adjust=True):
        patch_id_offset = pfx * num_patch_per_bmp
        # get local patch ids in this bmp file
        patch_ids_ = np.arange(num_patch_per_bmp) + patch_id_offset
        requested_patch_ids_ = np.intersect1d(patch_ids_,
                                              all_requested_patch_ids)
        requested_indicies = requested_patch_ids_ - patch_id_offset
        if len(requested_indicies) == 0:
            continue
        patches = _crop_to_numpy(patchfile, requested_indicies)
        for idx, patch in zip(requested_patch_ids_, patches):
            all_patches[idx] = patch

    # Read the last patches
    if mod > 0:
        pfx += 1
        patch_id_offset = pfx * num_patch_per_bmp
        patchfile = join(
            ds_path, ''.join(['patches',
                              str(num_bmp_files).zfill(4), '.bmp']))
        patch_ids_ = np.arange(mod) + patch_id_offset
        requested_patch_ids_ = np.intersect1d(patch_ids_,
                                              all_requested_patch_ids)
        requested_indicies = requested_patch_ids_ - patch_id_offset
        patches = _crop_to_numpy(patchfile, requested_indicies)
        for idx, patch in zip(requested_patch_ids_, patches):
            all_patches[idx] = patch

    print('read %d patches ' % (len(all_patches)))
    #patches_list += [patches]

    #all_patches = np.concatenate(patches_list, axis=0)

    matching_patches1 = [all_patches[idx1] for idx1, idx2 in match_pairs]
    matching_patches2 = [all_patches[idx2] for idx1, idx2 in match_pairs]
    nonmatching_patches1 = [
        all_patches[idx1] for idx1, idx2 in non_match_pairs
    ]
    nonmatching_patches2 = [
        all_patches[idx2] for idx1, idx2 in non_match_pairs
    ]

    labels = np.array(([True] * len(matching_patches1)) +
                      ([False] * len(nonmatching_patches1)))
    warped_patch1_list = matching_patches1 + nonmatching_patches1
    warped_patch2_list = matching_patches2 + nonmatching_patches2

    img_list = ut.flatten(list(zip(warped_patch1_list, warped_patch2_list)))
    data = np.array(img_list)
    del img_list
    #data_per_label = 2
    assert labels.shape[0] == data.shape[0] // 2
    return data, labels
示例#20
0
def merge_datasets(dataset_list):
    """
    Merges a list of dataset objects into a single combined dataset.
    """
    def consensus_check_factory():
        """
        Returns a temporary function used to check that all incoming values
        with the same key are consistent
        """
        from collections import defaultdict
        past_values = defaultdict(lambda: None)

        def consensus_check(value, key):
            assert past_values[key] is None or past_values[key] == value, (
                'key=%r with value=%r does not agree with past_value=%r' %
                (key, value, past_values[key]))
            past_values[key] = value
            return value

        return consensus_check

    total_num_labels = 0
    total_num_data = 0

    input_alias_list = [dataset.alias_key for dataset in dataset_list]

    alias_key = 'combo_' + ut.hashstr27(repr(input_alias_list), hashlen=8)
    training_dpath = ut.ensure_app_resource_dir('ibeis_cnn', 'training',
                                                alias_key)
    data_fpath = ut.unixjoin(training_dpath, alias_key + '_data.hdf5')
    labels_fpath = ut.unixjoin(training_dpath, alias_key + '_labels.hdf5')

    try:
        # Try and short circut cached loading
        merged_dataset = DataSet.from_alias_key(alias_key)
        return merged_dataset
    except (Exception, AssertionError) as ex:
        ut.printex(ex,
                   'alias definitions have changed. alias_key=%r' %
                   (alias_key, ),
                   iswarning=True)

    # Build the dataset
    consensus_check = consensus_check_factory()

    for dataset in dataset_list:
        print(ut.get_file_nBytes_str(dataset.data_fpath))
        print(dataset.data_fpath_dict['full'])
        print(dataset.num_labels)
        print(dataset.data_per_label)
        total_num_labels += dataset.num_labels
        total_num_data += (dataset.data_per_label * dataset.num_labels)
        # check that all data_dims agree
        data_shape = consensus_check(dataset.data_shape, 'data_shape')
        data_per_label = consensus_check(dataset.data_per_label,
                                         'data_per_label')

    # hack record this
    import numpy as np
    data_dtype = np.uint8
    label_dtype = np.int32
    data = np.empty((total_num_data, ) + data_shape, dtype=data_dtype)
    labels = np.empty(total_num_labels, dtype=label_dtype)

    #def iterable_assignment():
    #    pass
    data_left = 0
    data_right = None
    labels_left = 0
    labels_right = None
    for dataset in ut.ProgressIter(dataset_list,
                                   lbl='combining datasets',
                                   freq=1):
        X_all, y_all = dataset.subset('full')
        labels_right = labels_left + y_all.shape[0]
        data_right = data_left + X_all.shape[0]
        data[data_left:data_right] = X_all
        labels[labels_left:labels_right] = y_all
        data_left = data_right
        labels_left = labels_right

    ut.save_data(data_fpath, data)
    ut.save_data(labels_fpath, labels)

    labels = ut.load_data(labels_fpath)
    num_labels = len(labels)

    merged_dataset = DataSet.new_training_set(
        alias_key=alias_key,
        data_fpath=data_fpath,
        labels_fpath=labels_fpath,
        metadata_fpath=None,
        training_dpath=training_dpath,
        data_shape=data_shape,
        data_per_label=data_per_label,
        output_dims=1,
        num_labels=num_labels,
    )
    return merged_dataset
示例#21
0
def wildbook_signal_annot_name_changes(ibs,
                                       aid_list=None,
                                       wb_target=None,
                                       dryrun=False):
    r"""
    Args:
        aid_list (int):  list of annotation ids(default = None)
        tomcat_dpath (None): (default = None)
        wb_target (None): (default = None)
        dryrun (bool): (default = False)

    CommandLine:
        python -m ibeis wildbook_signal_annot_name_changes:0 --dryrun
        python -m ibeis wildbook_signal_annot_name_changes:1 --dryrun
        python -m ibeis wildbook_signal_annot_name_changes:1
        python -m ibeis wildbook_signal_annot_name_changes:2

    Setup:
        >>> wb_target = None
        >>> dryrun = ut.get_argflag('--dryrun')

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.control.manual_wildbook_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> #gid_list = ibs.get_valid_gids()[0:10]
        >>> gid_list = ibs.get_valid_gids()[3:5]
        >>> aid_list = ut.flatten(ibs.get_image_aids(gid_list))
        >>> # Test case where some names change, some do not. There are no new names.
        >>> old_nid_list = ibs.get_annot_name_rowids(aid_list)
        >>> new_nid_list = ut.list_roll(old_nid_list, 1)
        >>> ibs.set_annot_name_rowids(aid_list, new_nid_list)
        >>> result = ibs.wildbook_signal_annot_name_changes(aid_list, wb_target, dryrun)
        >>> ibs.set_annot_name_rowids(aid_list, old_nid_list)

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.control.manual_wildbook_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> #gid_list = ibs.get_valid_gids()[0:10]
        >>> gid_list = ibs.get_valid_gids()[3:5]
        >>> aid_list = ut.flatten(ibs.get_image_aids(gid_list))
        >>> # Test case where all names change to one known name
        >>> #old_nid_list = ibs.get_annot_name_rowids(aid_list)
        >>> #new_nid_list = [old_nid_list[0]] * len(old_nid_list)
        >>> old_nid_list = [1, 2]
        >>> new_nid_list = [1, 1]
        >>> print('old_nid_list = %r' % (old_nid_list,))
        >>> print('new_nid_list = %r' % (new_nid_list,))
        >>> ibs.set_annot_name_rowids(aid_list, new_nid_list)
        >>> result = ibs.wildbook_signal_annot_name_changes(aid_list, wb_target, dryrun)
        >>> # Undo changes here (not undone in wildbook)
        >>> #ibs.set_annot_name_rowids(aid_list, old_nid_list)

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.control.manual_wildbook_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> gid_list = ibs.get_valid_gids()[3:5]
        >>> aid_list = ut.flatten(ibs.get_image_aids(gid_list))
        >>> old_nid_list = [1, 2]
        >>> ibs.set_annot_name_rowids(aid_list, old_nid_list)
        >>> # Signal what currently exists (should put them back to normal)
        >>> result = ibs.wildbook_signal_annot_name_changes(aid_list, wb_target, dryrun)
    """
    print(
        '[ibs.wildbook_signal_imgsetid_list] signaling annot name changes to wildbook'
    )
    wb_url = ibs.get_wildbook_base_url(wb_target)
    try:
        ibs.assert_ia_available_for_wb(wb_target)
    except Exception:
        pass
    if aid_list is None:
        aid_list = ibs.get_valid_aids(is_known=True)

    annot_uuid_list = ibs.get_annot_uuids(aid_list)
    annot_name_text_list = ibs.get_annot_name_texts(aid_list)
    grouped_uuids = ut.group_items(annot_uuid_list, annot_name_text_list)
    url = wb_url + '/ia'
    payloads = [{
        'resolver': {
            'assignNameToAnnotations': {
                'name': new_name,
                'annotationIds': ut.lmap(str, annot_uuids),
            }
        }
    } for new_name, annot_uuids in grouped_uuids.items()]
    status_list = []
    for json_payload in ut.ProgressIter(payloads, lbl='submitting URL',
                                        freq=1):
        print('[_send] URL=%r with json_payload=%r' % (url, json_payload))
        if dryrun:
            status = False
        else:
            response = requests.post(url, json=json_payload)
            status = response.status_code == 200
            if not status:
                print('Failed to push new names')
                print(response.text)
        status_list.append(status)
    return status_list
示例#22
0
def myquery():
    r"""

    BUG::
        THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE?
        if everything is weightd ) how di the true positive even get a score
        while the true negative did not
        qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness']
        CORRECT STATS
        {
            'max'  : [0.832, 0.968, 0.604, 0.000],
            'min'  : [0.376, 0.524, 0.000, 0.000],
            'mean' : [0.561, 0.924, 0.217, 0.000],
            'std'  : [0.114, 0.072, 0.205, 0.000],
            'nMin' : [1, 1, 1, 51],
            'nMax' : [1, 1, 1, 1],
            'shape': (52, 4),
        }
        INCORRECT STATS
        {
            'max'  : [0.759, 0.963, 0.264, 0.000],
            'min'  : [0.379, 0.823, 0.000, 0.000],
            'mean' : [0.506, 0.915, 0.056, 0.000],
            'std'  : [0.125, 0.039, 0.078, 0.000],
            'nMin' : [1, 1, 1, 24],
            'nMax' : [1, 1, 1, 1],
            'shape': (26, 4),
        #   score_diff,  tp_score,  tn_score,       p,   K,  dcvs_clip_max,  fg_power,  homogerr_power
             0.494,     0.494,     0.000,  73.000,   2,          0.500,     0.100,          10.000

    see how seperability changes as we very things

    CommandLine:
        python -m ibeis.algo.hots.devcases --test-myquery
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 0
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 1
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 2

    References:
        http://en.wikipedia.org/wiki/Pareto_distribution <- look into

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.all_imports import *  # NOQA
        >>> from ibeis.algo.hots.devcases import *  # NOQA
        >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery()
        >>> pt.show_if_requested()
    """
    from ibeis.algo.hots import special_query  # NOQA
    from ibeis.algo.hots import distinctiveness_normalizer  # NOQA
    from ibeis import viz  # NOQA
    import plottool as pt
    index = ut.get_argval('--index', int, 0)
    ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index)
    qaids = [aid1]
    daids = [aid2] + [tn_aid]
    qvuuid = ibs.get_annot_visual_uuids(aid1)

    cfgdict_vsone = dict(
        sv_on=True,
        #sv_on=False,
        #codename='vsone_unnorm_dist_ratio_extern_distinctiveness',
        codename='vsone_unnorm_ratio_extern_distinctiveness',
        sver_output_weighting=True,
    )

    use_cache = False
    save_qcache = False

    qres_list, qreq_ = ibs.query_chips(qaids,
                                       daids,
                                       cfgdict=cfgdict_vsone,
                                       return_request=True,
                                       use_cache=use_cache,
                                       save_qcache=save_qcache,
                                       verbose=True)

    qreq_.load_distinctiveness_normalizer()
    qres = qres_list[0]
    top_aids = qres.get_top_aids()  # NOQA
    qres_orig = qres  # NOQA

    def test_config(qreq_, qres_orig, cfgdict):
        """ function to grid search over """
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        qres_vsone = qres_copy
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(
            qreq_, qres_copy, **cfgdict)
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone,
                                                   newfsv_list, newscore_aids,
                                                   filtkey)
        tp_score = qres_copy.aid2_score[aid2]
        tn_score = qres_copy.aid2_score[tn_aid]
        return qres_copy, tp_score, tn_score

    #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]),
    #FiltKeys = hstypes.FiltKeys
    # FIXME: Use other way of doing gridsearch
    grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis()
    gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid, ))
    print('Begin Grid Search')
    for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'):
        qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, cfgdict)
        gridsearch.append_result(tp_score, tn_score)
    print('Finish Grid Search')

    # Get best result
    best_cfgdict = gridsearch.get_rank_cfgdict()
    qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict)

    # Examine closely what you can do with scores
    if False:
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(
            qreq_, qres_copy, **cfgdict)
        ut.embed()

        def make_cm_very_old_tuple(qres_copy):
            assert ut.listfind(qres_copy.filtkey_list, filtkey) is None
            weight_filters = hstypes.WEIGHT_FILTERS
            weight_filtxs, nonweight_filtxs = special_query.index_partition(
                qres_copy.filtkey_list, weight_filters)

            aid2_fsv = {}
            aid2_fs = {}
            aid2_score = {}

            for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
                pass
                break
                #scorex_vsone  = ut.listfind(qres_copy.filtkey_list, filtkey)
                #if scorex_vsone is None:
                # TODO: add spatial verification as a filter score
                # augment the vsone scores
                # TODO: paramaterize
                weighted_ave_score = True
                if weighted_ave_score:
                    # weighted average scoring
                    new_fs_vsone = special_query.weighted_average_scoring(
                        new_fsv_vsone, weight_filtxs, nonweight_filtxs)
                else:
                    # product scoring
                    new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
                new_score_vsone = new_fs_vsone.sum()
                aid2_fsv[daid] = new_fsv_vsone
                aid2_fs[daid] = new_fs_vsone
                aid2_score[daid] = new_score_vsone
            return aid2_fsv, aid2_fs, aid2_score

        # Look at plot of query products
        for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
            new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
            scores_list = np.array(new_fs_vsone)[:, None].T
            pt.plot_sorted_scores(scores_list,
                                  logscale=False,
                                  figtitle=str(daid))
        pt.iup()
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy,
                                                   newfsv_list, newscore_aids,
                                                   filtkey)

    # PRINT INFO
    import functools
    #ut.rrrr()
    get_stats_str = functools.partial(ut.get_stats_str,
                                      axis=0,
                                      newlines=True,
                                      precision=3)
    tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':')
    tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':')
    info_str_list = []
    info_str_list.append('qres_copy.filtkey_list = %r' %
                         (qres_copy.filtkey_list, ))
    info_str_list.append('CORRECT STATS')
    info_str_list.append(tp_stats_str)
    info_str_list.append('INCORRECT STATS')
    info_str_list.append(tn_stats_str)
    info_str = '\n'.join(info_str_list)
    print(info_str)

    # SHOW BEST RESULT
    #qres_copy.ishow_top(ibs, fnum=pt.next_fnum())
    #qres_orig.ishow_top(ibs, fnum=pt.next_fnum())

    # Text Informatio
    param_lbl = 'dcvs_power'
    param_stats_str = gridsearch.get_dimension_stats_str(param_lbl)
    print(param_stats_str)

    csvtext = gridsearch.get_csv_results(10)
    print(csvtext)

    # Paramter visuzliation
    fnum = pt.next_fnum()
    # plot paramter influence
    param_label_list = gridsearch.get_param_lbls()
    pnum_ = pt.get_pnum_func(2, len(param_label_list))
    for px, param_label in enumerate(param_label_list):
        gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px))
    # plot match figure
    pnum2_ = pt.get_pnum_func(2, 2)
    qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2))
    qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3))
    # Add figure labels
    figtitle = 'Effect of parameters on vsone separation for a single case'
    subtitle = 'qvuuid = %r' % (qvuuid)
    figtitle += '\n' + subtitle
    pt.set_figtitle(figtitle)
    # Save Figure
    #fig_fpath = pt.save_figure(usetitle=True)
    #print(fig_fpath)
    # Write CSV Results
    #csv_fpath = fig_fpath + '.csv.txt'
    #ut.write_to(csv_fpath, csvtext)

    #qres_copy.ishow_top(ibs)
    #from matplotlib import pyplot as plt
    #plt.show()
    #print(ut.list_str()))
    # TODO: plot max variation dims
    #import plottool as pt
    #pt.plot(p_list, diff_list)
    """
示例#23
0
    def detect(bing, input_gpath_list, **kwargs):
        '''
            Run detection with a given loaded model on a list of images

            Args:
                input_gpath_list (list of str): the list of image paths that you want
                    to test

            Kwargs:
                numPerSz (int): the number of results per size

        '''
        # Default values
        params = odict([
            ('numPerSz', 130),
            ('batch_size', None),
            ('results_val_array', None),  # This value always gets overwritten
            ('results_len_array', None),  # This value always gets overwritten
            ('RESULT_LENGTH', None),  # This value always gets overwritten
            ('serial', False),
            ('verbose', bing.verbose),
            ('quiet', bing.quiet),
        ])
        params.update(kwargs)
        params['RESULT_LENGTH'] = RESULT_LENGTH

        # Try to determine the parallel processing batch size
        if params['batch_size'] is None:
            try:
                cpu_count = multiprocessing.cpu_count()
                if not params['quiet']:
                    print('[pybing py] Detecting with %d CPUs' % (cpu_count, ))
                params['batch_size'] = cpu_count
            except Exception:
                params['batch_size'] = 8

        # Run training algorithm
        batch_size = params['batch_size']
        del params['batch_size']  # Remove this value from params
        batch_num = int(len(input_gpath_list) / batch_size) + 1
        # Detect for each batch
        for batch in ut.ProgressIter(range(batch_num),
                                     lbl="[pybing py]",
                                     freq=1,
                                     invert_rate=True):
            begin = time.time()
            start = batch * batch_size
            end = start + batch_size
            if end > len(input_gpath_list):
                end = len(input_gpath_list)
            input_gpath_list_ = input_gpath_list[start:end]
            num_images = len(input_gpath_list_)
            # Set image detection to be run in serial if less than half a batch to run
            if num_images < min(batch_size / 2, 8):
                params['serial'] = True
            # Final sanity check
            params['results_val_array'] = np.empty(num_images,
                                                   dtype=NP_ARRAY_FLOAT)
            params['results_len_array'] = np.empty(num_images, dtype=C_INT)
            # Make the params_list
            params_list = [
                _cast_list_to_c(input_gpath_list_, C_CHAR),
                num_images,
            ] + params.values()
            BING_CLIB.detect(bing.detector_c_obj, *params_list)
            results_list = _extract_np_array(params['results_len_array'],
                                             params['results_val_array'],
                                             NP_ARRAY_FLOAT, NP_FLOAT32,
                                             RESULT_LENGTH)
            conclude = time.time()
            if not params['quiet']:
                print('[pybing py] Took %r seconds to compute %d images' % (
                    conclude - begin,
                    num_images,
                ))
            for input_gpath, result_list in zip(input_gpath_list_,
                                                results_list):
                result_list_ = []
                for result in result_list:
                    # Unpack result into a nice Python dictionary and return
                    temp = {}
                    temp['minx'] = int(result[0])
                    temp['miny'] = int(result[1])
                    temp['maxx'] = int(result[2])
                    temp['maxy'] = int(result[3])
                    result_list_.append(temp)
                yield (input_gpath, result_list_)
            params['results_val_array'] = None
            params['results_len_array'] = None
示例#24
0
def generate_feat_properties(ibs, cid_list, config2_=None, nInput=None):
    r"""
    Computes features and yields results asynchronously: TODO: Remove IBEIS from
    this equation. Move the firewall towards the controller

    Args:
        ibs (IBEISController):
        cid_list (list):
        nInput (None):

    Returns:
        generator : generates param tups

    SeeAlso:
        ~/code/ibeis_cnn/ibeis_cnn/_plugin.py

    CommandLine:
        python -m ibeis.algo.preproc.preproc_feat --test-generate_feat_properties:0 --show
        python -m ibeis.algo.preproc.preproc_feat --test-generate_feat_properties:1

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.preproc.preproc_feat import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb('testdb1')
        >>> config2_ = ibs.new_query_params({})
        >>> nInput = None
        >>> aid_list = ibs.get_valid_aids()[::2]
        >>> ut.assert_all_not_None(aid_list, 'aid_list')
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, config2_=config2_)
        >>> ut.assert_all_not_None(cid_list, 'cid_list')
        >>> featgen = generate_feat_properties(ibs, cid_list, config2_, nInput)
        >>> feat_list = list(featgen)
        >>> assert len(feat_list) == len(aid_list)
        >>> (nFeat, kpts, vecs) = feat_list[0]
        >>> assert nFeat == len(kpts) and nFeat == len(vecs)
        >>> assert kpts.shape[1] == 6
        >>> assert vecs.shape[1] == 128
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> chip_fpath = ibs.get_annot_chip_fpath(aid_list[0], config2_=config2_)
        >>> pt.interact_keypoints.ishow_keypoints(chip_fpath, kpts, vecs)
        >>> ut.show_if_requested()

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.preproc.preproc_feat import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> cfgdict = {}
        >>> cfgdict['feat_type'] = 'hesaff+siam128'
        >>> qreq_ = ibs.new_query_request([1], [1, 2, 3], cfgdict)
        >>> query_config2 = qreq_.get_external_query_config2()
        >>> data_config2 = qreq_.get_external_data_config2()
        >>> cid_list = ibs.get_annot_chip_rowids(ibs.get_valid_aids())
        >>> config2_ = query_config2
        >>> nInput = None
        >>> featgen = generate_feat_properties(ibs, cid_list, config2_, nInput)
        >>> result = list(featgen)
        >>> print(result)

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.preproc.preproc_feat import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb('PZ_MTEST')
        >>> config2_ = ibs.new_query_params({'affine_invariance': False, 'bgmethod': 'cnn'})
        >>> nInput = None
        >>> aid_list = ibs.get_valid_aids()[0:4]
        >>> ut.assert_all_not_None(aid_list, 'aid_list')
        >>> cid_list = ibs.get_annot_chip_rowids(aid_list, config2_=config2_)
        >>> ut.assert_all_not_None(cid_list, 'cid_list')
        >>> featgen = generate_feat_properties(ibs, cid_list, config2_, nInput)
        >>> feat_list = list(featgen)
        >>> assert len(feat_list) == len(aid_list)
        >>> (nFeat, kpts, vecs) = feat_list[0]
        >>> assert nFeat == len(kpts) and nFeat == len(vecs)
        >>> assert kpts.shape[1] == 6
        >>> assert vecs.shape[1] == 128
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> chip_fpath = ibs.get_annot_chip_fpath(aid_list[0], config2_=config2_)
        >>> pt.interact_keypoints.ishow_keypoints(chip_fpath, kpts, vecs)
        >>> ut.show_if_requested()

    Ignore:
        # STARTBLOCK
        import plottool as pt
        chip_fpath_list = ibs.get_chip_fpath(cid_list)
        fpath_list = list(ut.interleave((probchip_fpath_list, chip_fpath_list)))
        iteract_obj = pt.interact_multi_image.MultiImageInteraction(fpath_list, nPerPage=4)
        ut.show_if_requested()
        # ENDBLOCK
    """

    if nInput is None:
        nInput = len(cid_list)
    if config2_ is not None:
        # Get config from config2_ object
        #print('id(config2_) = ' + str(id(config2_)))
        feat_cfgstr = config2_.get('feat_cfgstr')
        hesaff_params = config2_.get('hesaff_params')
        feat_type = config2_.get('feat_type')
        bgmethod = config2_.get('bgmethod')
        assert feat_cfgstr is not None
        assert hesaff_params is not None
    else:
        # TODO: assert False here
        # Get config from IBEIS controller
        bgmethod = ibs.cfg.feat_cfg.bgmethod
        feat_type = ibs.cfg.feat_cfg.feat_type
        feat_cfgstr = ibs.cfg.feat_cfg.get_cfgstr()
        hesaff_params = ibs.cfg.feat_cfg.get_hesaff_params()

    ut.assert_all_not_None(cid_list, 'cid_list')
    chip_fpath_list = ibs.get_chip_fpath(cid_list, check_external_storage=True)

    if bgmethod is not None:
        aid_list = ibs.get_chip_aids(cid_list)
        probchip_fpath_list = ibs.get_annot_probchip_fpath(aid_list)
    else:
        probchip_fpath_list = (None for _ in range(nInput))

    if ut.NOT_QUIET:
        print('[preproc_feat] feat_cfgstr = %s' % feat_cfgstr)
        if ut.VERYVERBOSE:
            print('hesaff_params = ' + ut.dict_str(hesaff_params))

    if feat_type == 'hesaff+sift':
        if USE_OPENMP:
            # Use Avi's openmp parallelization
            assert bgmethod is None, 'not implemented'
            featgen_mp = gen_feat_openmp(cid_list, chip_fpath_list,
                                         hesaff_params)
            featgen = ut.ProgressIter(featgen_mp, lbl='openmp feat')
        else:
            # Multiprocessing parallelization
            featgen = extract_hesaff_sift_feats(chip_fpath_list,
                                                probchip_fpath_list,
                                                hesaff_params=hesaff_params,
                                                nInput=nInput,
                                                ordered=True)
    elif feat_type == 'hesaff+siam128':
        from ibeis_cnn import _plugin
        assert bgmethod is None, 'not implemented'
        featgen = _plugin.generate_siam_l2_128_feats(ibs,
                                                     cid_list,
                                                     config2_=config2_)
    else:
        raise AssertionError('unknown feat_type=%r' % (feat_type, ))

    for nFeat, kpts, vecs in featgen:
        yield (
            nFeat,
            kpts,
            vecs,
        )
def flann_add_time_experiment():
    """
    builds plot of number of annotations vs indexer build time.

    TODO: time experiment

    CommandLine:
        python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show
        python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show
        utprof.py -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show

        valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.hots._neighbor_experiment import *  # NOQA
        >>> import wbia
        >>> #ibs = wbia.opendb('PZ_MTEST')
        >>> result = flann_add_time_experiment()
        >>> # verify results
        >>> print(result)
        >>> ut.show_if_requested()

    """
    import wbia
    import utool as ut
    import numpy as np
    import wbia.plottool as pt

    def make_flann_index(vecs, flann_params):
        flann = pyflann.FLANN()
        flann.build_index(vecs, **flann_params)
        return flann

    db = ut.get_argval('--db')
    ibs = wbia.opendb(db=db)

    # Input
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        reindex_stride = 16
        addition_stride = 4
        max_ceiling = 120
    elif ibs.get_dbname() == 'PZ_Master0':
        # ibs = wbia.opendb(db='GZ_ALL')
        initial = 32
        reindex_stride = 32
        addition_stride = 16
        max_ceiling = 300001
    else:
        assert False
    # max_ceiling = 32
    all_daids = ibs.get_valid_aids()
    max_num = min(max_ceiling, len(all_daids))
    flann_params = vt.get_flann_params()

    # Output
    count_list, time_list_reindex = [], []
    count_list2, time_list_addition = [], []

    # Setup
    # all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    all_randomize_daids_ = all_daids
    # ensure all features are computed
    ibs.get_annot_vecs(all_randomize_daids_)

    def reindex_step(count, count_list, time_list_reindex):
        daids = all_randomize_daids_[0:count]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann = make_flann_index(vecs, flann_params)  # NOQA
        count_list.append(count)
        time_list_reindex.append(t.ellapsed)

    def addition_step(count, flann, count_list2, time_list_addition):
        daids = all_randomize_daids_[count:count + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann.add_points(vecs)
        count_list2.append(count)
        time_list_addition.append(t.ellapsed)

    def make_initial_index(initial):
        daids = all_randomize_daids_[0:initial + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        flann = make_flann_index(vecs, flann_params)
        return flann

    WITH_REINDEX = not ut.get_argflag('--no-with-reindex')
    if WITH_REINDEX:
        # Reindex Part
        reindex_lbl = 'Reindexing'
        _reindex_iter = range(1, max_num, reindex_stride)
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1)
        for count in reindex_iter:
            reindex_step(count, count_list, time_list_reindex)

    # Add Part
    flann = make_initial_index(initial)
    addition_lbl = 'Addition'
    _addition_iter = range(initial + 1, max_num, addition_stride)
    addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl)
    for count in addition_iter:
        addition_step(count, flann, count_list2, time_list_addition)

    logger.info('---')
    logger.info('Reindex took time_list_reindex %.2s seconds' %
                sum(time_list_reindex))
    logger.info('Addition took time_list_reindex  %.2s seconds' %
                sum(time_list_addition))
    logger.info('---')
    statskw = dict(precision=2, newlines=True)
    logger.info('Reindex stats ' +
                ut.get_stats_str(time_list_reindex, **statskw))
    logger.info('Addition stats ' +
                ut.get_stats_str(time_list_addition, **statskw))

    logger.info('Plotting')

    # with pt.FigureContext:

    next_fnum = iter(range(0, 2)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if WITH_REINDEX:
        pt.plot2(
            count_list,
            time_list_reindex,
            marker='-o',
            equal_aspect=False,
            x_label='num_annotations',
            label=reindex_lbl + ' Time',
            dark=False,
        )

    # pt.figure(fnum=next_fnum())
    pt.plot2(
        count_list2,
        time_list_addition,
        marker='-o',
        equal_aspect=False,
        x_label='num_annotations',
        label=addition_lbl + ' Time',
    )

    pt
    pt.legend()
def augment_nnindexer_experiment():
    """

    References:
        http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/

    CommandLine:
        utprof.py -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment

        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show


        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids

        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show

        # RUNS THE SEGFAULTING CASE
        python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        # Debug it
        gdb python
        run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        gdb python
        run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6


    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.hots._neighbor_experiment import *  # NOQA
        >>> # execute function
        >>> augment_nnindexer_experiment()
        >>> # verify results
        >>> ut.show_if_requested()

    """
    import wbia

    # build test data
    # ibs = wbia.opendb('PZ_MTEST')
    ibs = wbia.opendb(defaultdb='PZ_Master0')
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        addition_stride = 4
        max_ceiling = 100
    elif ibs.get_dbname() == 'PZ_Master0':
        initial = 128
        # addition_stride = 64
        # addition_stride = 128
        addition_stride = 256
        max_ceiling = 10000
        # max_ceiling = 4000
        # max_ceiling = 2000
        # max_ceiling = 600
    else:
        assert False
    all_daids = ibs.get_valid_aids(species='zebra_plains')
    qreq_ = ibs.new_query_request(all_daids, all_daids)
    max_num = min(max_ceiling, len(all_daids))

    # Clear Caches
    ibs.delete_flann_cachedir()
    neighbor_index_cache.clear_memcache()
    neighbor_index_cache.clear_uuid_cache(qreq_)

    # Setup
    all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    # ensure all features are computed

    nnindexer_list = []
    addition_lbl = 'Addition'
    _addition_iter = list(range(initial + 1, max_num, addition_stride))
    addition_iter = iter(
        ut.ProgressIter(_addition_iter,
                        lbl=addition_lbl,
                        freq=1,
                        autoadjust=False))
    time_list_addition = []
    # time_list_reindex = []
    addition_count_list = []
    tmp_cfgstr_list = []

    # for _ in range(80):
    #    next(addition_iter)
    try:
        memtrack = ut.MemoryTracker(disable=False)
        for count in addition_iter:
            aid_list_ = all_randomize_daids_[0:count]
            # Request an indexer which could be an augmented version of an existing indexer.
            with ut.Timer(verbose=False) as t:
                memtrack.report('BEFORE AUGMENT')
                nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer(
                    qreq_, aid_list_)
                memtrack.report('AFTER AUGMENT')
            nnindexer_list.append(nnindexer_)
            addition_count_list.append(count)
            time_list_addition.append(t.ellapsed)
            tmp_cfgstr_list.append(nnindexer_.cfgstr)
            logger.info('===============\n\n')
        logger.info(ut.repr2(time_list_addition))
        logger.info(ut.repr2(list(map(id, nnindexer_list))))
        logger.info(ut.repr2(tmp_cfgstr_list))
        logger.info(
            ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))

        IS_SMALL = False

        if IS_SMALL:
            nnindexer_list = []
        reindex_label = 'Reindex'
        # go backwards for reindex
        _reindex_iter = list(range(initial + 1, max_num,
                                   addition_stride))[::-1]
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label)
        time_list_reindex = []
        # time_list_reindex = []
        reindex_count_list = []

        for count in reindex_iter:
            logger.info('\n+===PREDONE====================\n')
            # check only a single size for memory leaks
            # count = max_num // 16 + ((x % 6) * 1)
            # x += 1

            aid_list_ = all_randomize_daids_[0:count]
            # Call the same code, but force rebuilds
            memtrack.report('BEFORE REINDEX')
            with ut.Timer(verbose=False) as t:
                nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer(
                    qreq_, aid_list_, force_rebuild=True, memtrack=memtrack)
            memtrack.report('AFTER REINDEX')
            ibs.print_cachestats_str()
            logger.info('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' %
                        (ut.get_object_size_str(
                            neighbor_index_cache.NEIGHBOR_CACHE.items()), ))
            logger.info('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' %
                        (len(neighbor_index_cache.NEIGHBOR_CACHE.items()), ))
            logger.info('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' %
                        (ut.get_object_size_str(
                            neighbor_index_cache.UUID_MAP_CACHE), ))
            logger.info('totalsize(nnindexer) = ' +
                        ut.get_object_size_str(nnindexer_))
            memtrack.report_type(neighbor_index_cache.NeighborIndex)
            ut.print_object_size_tree(nnindexer_, lbl='nnindexer_')
            if IS_SMALL:
                nnindexer_list.append(nnindexer_)
            reindex_count_list.append(count)
            time_list_reindex.append(t.ellapsed)
            # import cv2
            # import matplotlib as mpl
            # logger.info(mem_top.mem_top(limit=30, width=120,
            #                      #exclude_refs=[cv2.__dict__, mpl.__dict__]
            #     ))
            logger.info('L___________________\n\n\n')
        logger.info(ut.repr2(time_list_reindex))
        if IS_SMALL:
            logger.info(ut.repr2(list(map(id, nnindexer_list))))
            logger.info(
                ut.repr2(list([nnindxer.cfgstr
                               for nnindxer in nnindexer_list])))
    except KeyboardInterrupt:
        logger.info('\n[train] Caught CRTL+C')
        resolution = ''
        from six.moves import input

        while not (resolution.isdigit()):
            logger.info('\n[train] What do you want to do?')
            logger.info('[train]     0 - Continue')
            logger.info('[train]     1 - Embed')
            logger.info('[train]  ELSE - Stop network training')
            resolution = input('[train] Resolution: ')
        resolution = int(resolution)
        # We have a resolution
        if resolution == 0:
            logger.info('resuming training...')
        elif resolution == 1:
            ut.embed()

    import wbia.plottool as pt

    next_fnum = iter(range(0, 1)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if len(addition_count_list) > 0:
        pt.plot2(
            addition_count_list,
            time_list_addition,
            marker='-o',
            equal_aspect=False,
            x_label='num_annotations',
            label=addition_lbl + ' Time',
        )

    if len(reindex_count_list) > 0:
        pt.plot2(
            reindex_count_list,
            time_list_reindex,
            marker='-o',
            equal_aspect=False,
            x_label='num_annotations',
            label=reindex_label + ' Time',
        )

    pt.set_figtitle('Augmented indexer experiment')

    pt.legend()
示例#27
0
def process_batch(model, X, y, theano_fn, fix_output=False, buffered=False,
                  show=False, spatial=False, showprog=True, **kwargs):
    """
    Compute the loss over all training batches.
    Passes data to function that splits it into batches and appropriately
    preproecsses the data. Then this function sends that data to theano. Then
    the results are packaged up nicely before returning.

    CommandLine:
        python -m ibeis_cnn --tf process_batch --verbose
        python -m ibeis_cnn --tf process_batch:0 --verbose
        python -m ibeis_cnn --tf process_batch:1 --verbose

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.batch_processing import *  # NOQA
        >>> from ibeis_cnn import models
        >>> model = models.DummyModel(batch_size=128)
        >>> X, y = model.make_random_testdata(num=2000, seed=None)
        >>> model.init_arch()
        >>> theano_fn = model.build_predict_func()
        >>> kwargs = {'X_is_cv2_native': False, 'showprog': True,
        ...           'randomize_batch_order': True}
        >>> outputs_ = process_batch(model, X, y, theano_fn, **kwargs)
        >>> result = ut.dict_str(outputs_)
        >>> print(result)

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis_cnn.batch_processing import *  # NOQA
        >>> from ibeis_cnn import models
        >>> model = models.SiameseL2(batch_size=128, data_shape=(32, 32, 1),
        ...                          strict_batch_size=True)
        >>> X, y = model.make_random_testdata(num=2000, seed=None)
        >>> model.init_arch()
        >>> theano_fn = model.build_predict_func()
        >>> kwargs = {'X_is_cv2_native': False, 'showprog': True,
        ...           'randomize_batch_order': True}
        >>> outputs_ = process_batch(model, X, y, theano_fn, **kwargs)
        >>> result = ut.dict_str(outputs_)
        >>> print(result)

    Ignore:
        Xb, yb = batch_iter.next()
        assert Xb.shape == (8, 1, 4, 4)
        yb.shape == (8,)

    Ignore:
        X, y = model.make_random_testdata(num=2000, seed=None)
        kwargs = {'X_is_cv2_native': False, 'showprog': True,
                  'randomize_batch_order': True, 'time_thresh': .5,
                  }

        print('Testing Unbuffered')
        batch_iter = batch_iterator(model, X, y, lbl=theano_fn.name, **kwargs)
        for Xb, yb in ut.ProgressIter(batch_iter, lbl=':EXEC FG'):
            [ut.is_prime(346373) for _ in range(2)]

        # Notice how the progress iters are not interlaced like
        # they are in the unbuffered version
        import sys
        sys.stdout.flush()
        print('Testing Buffered')
        sys.stdout.flush()
        batch_iter2 = batch_iterator(model, X, y, lbl=theano_fn.name, **kwargs)
        batch_iter2 = ut.buffered_generator(batch_iter2, buffer_size=4)
        print('Iterating')
        for Xb, yb in ut.ProgressIter(batch_iter2, lbl=':EXEC FG'):
            [ut.is_prime(346373) for _ in range(2)]
    """
    import vtool as vt
    batch_output_list = []
    output_names = [
        str(outexpr.variable)
        if outexpr.variable.name is None else
        outexpr.variable.name
        for outexpr in theano_fn.outputs
    ]
    # augmented label list
    batch_target_list = []
    show = VERBOSE_BATCH or show

    # Break data into generated batches
    # generated data with explicit iteration
    batch_iter = batch_iterator(model, X, y, **kwargs)
    if buffered:
        batch_iter = ut.buffered_generator(batch_iter)

    if showprog:
        bs = VERBOSE_BATCH < 1
        num_batches = (X.shape[0] + model.batch_size - 1) // model.batch_size
        # progress iterator should be outside of this function
        batch_iter = ut.ProgressIter(batch_iter, nTotal=num_batches, lbl=theano_fn.name,
                                     freq=10, bs=bs, adjust=True)
    if y is None:
        # Labels are not known, only one argument
        for Xb, yb in batch_iter:
            pass
            batch_output = theano_fn(Xb)
            batch_output_list.append(batch_output)
    else:
        # TODO: sliced batches
        for Xb, yb in batch_iter:
            # Runs a batch through the network and updates the weights. Just
            # returns what it did
            batch_output = theano_fn(Xb, yb)
            batch_output_list.append(batch_output)
            batch_target_list.append(yb)

            if show:
                # Print the network output for the first batch
                print('--------------')
                print(ut.list_str(zip(output_names, batch_output)))
                print('Correct: ', yb)
                print('--------------')
                show = False

    # get outputs of each type
    unstacked_output_gen = ([bop[count] for bop in batch_output_list]
                            for count, name in enumerate(output_names))

    if spatial:
        unstacked_output_gen = list(unstacked_output_gen)
        stacked_output_list = [[] for _ in range(len(unstacked_output_gen))]
        for index, output in enumerate(unstacked_output_gen):
            output = np.vstack(output)
            stacked_output_list[index] = output
    else:
        stacked_output_list  = [
            vt.safe_cat(_output_unstacked, axis=0)
            # concatenate_hack(_output_unstacked, axis=0)
            for _output_unstacked in unstacked_output_gen
        ]

    outputs_ = dict(zip(output_names, stacked_output_list))

    if y  is not None:
        auglbl_list = np.hstack(batch_target_list)
        outputs_['auglbl_list'] = auglbl_list

    if fix_output:
        # batch iteration may wrap-around returned data. slice off the padding
        num_inputs = X.shape[0] / model.data_per_label_input
        num_outputs = num_inputs * model.data_per_label_output
        for key in six.iterkeys(outputs_):
            outputs_[key] = outputs_[key][0:num_outputs]

    encoder = getattr(model, 'encoder', None)
    if encoder is not None and 'predictions' in outputs_:
        pred = outputs_['predictions']
        outputs_['labeled_predictions'] = encoder.inverse_transform(pred)
    return outputs_
示例#28
0
文件: harness.py 项目: whaozl/ibeis
def make_single_testres(ibs,
                        qaids,
                        daids,
                        pipecfg_list,
                        cfgx2_lbl,
                        cfgdict_list,
                        lbl,
                        testnameid,
                        use_cache=None,
                        subindexer_partial=ut.ProgressIter):
    """
    CommandLine:
        python -m ibeis.expt.harness --exec-run_test_configurations2
    """
    cfgslice = None
    if cfgslice is not None:
        pipecfg_list = pipecfg_list[cfgslice]

    dbname = ibs.get_dbname()

    if ut.NOT_QUIET:
        print('[harn] Make single testres')

    cfgx2_qreq_ = [
        ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg)
        for pipe_cfg in ut.ProgressIter(
            pipecfg_list, lbl='Building qreq_', enabled=False)
    ]

    if use_cache is None:
        use_cache = USE_BIG_TEST_CACHE

    if use_cache:
        get_big_test_cache_info(ibs, cfgx2_qreq_)
        try:
            cachetup = get_big_test_cache_info(ibs, cfgx2_qreq_)
            testres = ut.load_cache(*cachetup)
            testres.cfgdict_list = cfgdict_list
            testres.cfgx2_lbl = cfgx2_lbl  # hack override
        except IOError:
            pass
        else:
            if ut.NOT_QUIET:
                ut.colorprint('[harn] single testres cache hit... returning',
                              'turquoise')
            return testres

    if ibs.table_cache:
        # HACK
        prev_feat_cfgstr = None

    cfgx2_cfgresinfo = []
    #nPipeCfg = len(pipecfg_list)
    cfgiter = subindexer_partial(range(len(cfgx2_qreq_)),
                                 lbl='query config',
                                 freq=1,
                                 adjust=False,
                                 separate=True)
    # Run each pipeline configuration
    for cfgx in cfgiter:
        qreq_ = cfgx2_qreq_[cfgx]

        ut.colorprint('testnameid=%r' % (testnameid, ), 'green')
        ut.colorprint(
            'annot_cfgstr = %s' %
            (qreq_.get_cfgstr(with_input=True, with_pipe=False), ), 'yellow')
        ut.colorprint(
            'pipe_cfgstr= %s' % (qreq_.get_cfgstr(with_data=False), ),
            'turquoise')
        ut.colorprint('pipe_hashstr = %s' % (qreq_.get_pipe_hashid(), ),
                      'teal')
        if DRY_RUN:
            continue

        indent_prefix = '[%s cfg %d/%d]' % (
            dbname,
            # cfgiter.count (doesnt work when quiet)
            (cfgiter.parent_index * cfgiter.nTotal) + cfgx,
            cfgiter.nTotal * cfgiter.parent_nTotal)

        with ut.Indenter(indent_prefix):
            # Run the test / read cache
            _need_compute = True
            if use_cache:
                # smaller cache for individual configuration runs
                st_cfgstr = qreq_.get_cfgstr(with_input=True)
                bt_cachedir = cachetup[0]
                st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests')
                st_cachename = 'smalltest'
                ut.ensuredir(st_cachedir)
                try:
                    cfgres_info = ut.load_cache(st_cachedir, st_cachename,
                                                st_cfgstr)
                except IOError:
                    _need_compute = True
                else:
                    _need_compute = False
            if _need_compute:
                assert not ibs.table_cache
                if ibs.table_cache:
                    if (len(prev_feat_cfgstr is not None and
                            prev_feat_cfgstr != qreq_.qparams.feat_cfgstr)):
                        # Clear features to preserve memory
                        ibs.clear_table_cache()
                        #qreq_.ibs.print_cachestats_str()
                cfgres_info = get_query_result_info(qreq_)
                # record previous feature configuration
                if ibs.table_cache:
                    prev_feat_cfgstr = qreq_.qparams.feat_cfgstr
                if use_cache:
                    ut.save_cache(st_cachedir, st_cachename, st_cfgstr,
                                  cfgres_info)
        if not NOMEMORY:
            # Store the results
            cfgx2_cfgresinfo.append(cfgres_info)
        else:
            cfgx2_qreq_[cfgx] = None
    if ut.NOT_QUIET:
        ut.colorprint('[harn] Completed running test configurations', 'white')
    if DRY_RUN:
        print('ran tests dryrun mode.')
        return
    if NOMEMORY:
        print('ran tests in memory savings mode. Cannot Print. exiting')
        return
    # Store all pipeline config results in a test result object
    testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cfgresinfo,
                                     cfgx2_qreq_)
    testres.testnameid = testnameid
    testres.lbl = lbl
    testres.cfgdict_list = cfgdict_list
    testres.aidcfg = None
    if use_cache:
        try:
            ut.save_cache(*tuple(list(cachetup) + [testres]))
        except Exception as ex:
            ut.printex(ex, 'error saving testres cache', iswarning=True)
            if ut.SUPER_STRICT:
                raise
    return testres
示例#29
0
文件: harness.py 项目: whaozl/ibeis
def run_test_configurations2(ibs,
                             acfg_name_list,
                             test_cfg_name_list,
                             use_cache=None,
                             qaid_override=None,
                             daid_override=None,
                             initial_aids=None):
    """
    Loops over annot configs.

    Try and use this function as a starting point to clean up this module.
    The code is getting too untenable.

    CommandLine:
        python -m ibeis.expt.harness --exec-run_test_configurations2

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.expt.harness import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> default_acfgstrs = ['controlled:qsize=20,dpername=1,dsize=10', 'controlled:qsize=20,dpername=10,dsize=100']
        >>> acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstrs)
        >>> test_cfg_name_list = ut.get_argval(('-t', '-p')), type_=list, default=['custom', 'custom:fg_on=False'])
        >>> use_cache = False
        >>> testres_list = run_test_configurations2(ibs, acfg_name_list, test_cfg_name_list, use_cache)
    """
    print('[harn] run_test_configurations2')
    # Generate list of database annotation configurations
    if len(acfg_name_list) == 0:
        raise ValueError('must give acfg name list')

    acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list(
        ibs,
        acfg_name_list,
        qaid_override=qaid_override,
        daid_override=daid_override,
        initial_aids=initial_aids,
        use_cache=use_cache)

    # Generate list of query pipeline param configs
    cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(
        test_cfg_name_list, ibs=ibs)

    cfgx2_lbl = experiment_helpers.get_varied_pipecfg_lbls(cfgdict_list)
    # NOTE: Can specify --pcfginfo or --acfginfo

    if ut.NOT_QUIET:
        ut.colorprint(
            textwrap.dedent("""

        [harn]================
        [harn] harness.test_configurations2()""").strip(), 'white')
        msg = '[harn] Running %s using %s and %s' % (
            ut.quantstr('test',
                        len(acfg_list) * len(pipecfg_list)),
            ut.quantstr('pipeline config', len(pipecfg_list)),
            ut.quantstr('annot config', len(acfg_list)),
        )
        ut.colorprint(msg, 'white')

    testres_list = []

    nAcfg = len(acfg_list)

    testnameid = ibs.get_dbname() + ' ' + str(test_cfg_name_list) + str(
        acfg_name_list)
    lbl = '[harn] TEST_CFG ' + str(test_cfg_name_list) + str(acfg_name_list)
    expanded_aids_iter = ut.ProgressIter(expanded_aids_list,
                                         lbl='annot config',
                                         freq=1,
                                         autoadjust=False,
                                         enabled=ut.NOT_QUIET)

    for acfgx, (qaids, daids) in enumerate(expanded_aids_iter):
        assert len(qaids) != 0, ('[harness] No query annotations specified')
        assert len(daids) != 0, ('[harness] No database annotations specified')
        acfg = acfg_list[acfgx]
        if ut.NOT_QUIET:
            ut.colorprint('\n---Annot config testnameid=%r' % (testnameid, ),
                          'turquoise')
        subindexer_partial = partial(ut.ProgressIter,
                                     parent_index=acfgx,
                                     parent_nTotal=nAcfg,
                                     enabled=ut.NOT_QUIET)
        testres = make_single_testres(ibs,
                                      qaids,
                                      daids,
                                      pipecfg_list,
                                      cfgx2_lbl,
                                      cfgdict_list,
                                      lbl,
                                      testnameid,
                                      use_cache=use_cache,
                                      subindexer_partial=subindexer_partial)
        if DRY_RUN:
            continue
        testres.acfg = acfg
        testres.test_cfg_name_list = test_cfg_name_list
        testres_list.append(testres)
    if DRY_RUN:
        print('DRYRUN: Cannot continue past run_test_configurations2')
        sys.exit(0)

    return testres_list
示例#30
0
def test_progress():
    """
    CommandLine:
        python -m utool.util_progress --test-test_progress

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_progress import *  # NOQA
        >>> test_progress()
    """
    import utool as ut
    #import time
    #ut.rrrr()
    print('_________________')
    #numiter = 50
    #sleeptime = 1E-4
    #sleeptime2 = 1E-2
    numiter = 20
    sleeptime = 1E-7
    sleeptime2 = 1E-7
    with ut.Timer():
        for x in ut.ProgressIter(range(0, numiter), freq=8, adjust=True):
            time.sleep(sleeptime)
    print('_________________')
    numiter = 50
    sleeptime = 1E-4
    with ut.Timer():
        for x in ut.ProgressIter(range(0, numiter), freq=8, adjust=True):
            time.sleep(sleeptime)
    print('_________________')
    print('No frequncy run:')
    with ut.Timer():
        for x in range(0, numiter):
            time.sleep(sleeptime)
    print('_________________')
    numiter = 500
    sleeptime = 8E-7
    with ut.Timer():
        for x in ut.ProgressIter(range(0, numiter), freq=8, adjust=True):
            time.sleep(sleeptime)
    print('_________________')
    with ut.Timer():
        for x in ut.ProgressIter(range(0, numiter), freq=200):
            time.sleep(sleeptime)
    print('_________________')
    print('No frequncy run:')
    with ut.Timer():
        for x in range(0, numiter):
            time.sleep(sleeptime)
    print('_________________')
    # Test nested iter
    # progiter1 = ut.ProgressIter(range(0, 10), lbl='prog1', freq=1, adjust=False)
    # for count1 in progiter1:
    #     progiter_partials = progiter1.get_subindexers(1)
    #     progiter2 = progiter_partials[0](range(0, 7), lbl='sub_prog1', freq=1, adjust=False)
    #     for count2 in progiter2:
    #         pass
    for x in ut.ProgressIter(zip(range(10), range(10)), freq=8, adjust=True):
        time.sleep(sleeptime)
        #progiter3 = progiter_partials[1](range(0, 3), lbl='sub_prog2', freq=1, adjust=False)
        #for count3 in progiter3:
        #    pass
    print('Double backspace progress 1')
    progiter1 = ut.ProgressIter(range(0, 10), lbl='prog1', freq=1, adjust=False, backspace=False)
    for count1 in progiter1:
        progiter2 = ut.ProgressIter(range(0, 10), lbl='prog2', freq=1, adjust=False, backspace=True)
        for count2 in progiter2:
            time.sleep(sleeptime2)

    print('Double backspace progress 2')
    progiter1 = ut.ProgressIter(range(0, 10), lbl='prog1', freq=1, adjust=False, backspace=True)
    for count1 in progiter1:
        progiter2 = ut.ProgressIter(range(0, 10), lbl='prog2', freq=1, adjust=False, backspace=True)
        for count2 in progiter2:
            time.sleep(sleeptime2)