예제 #1
0
        def make_cacher(name, cfgstr=None):
            if cfgstr is None:
                cfgstr = ut.hashstr27(qreq_.get_cfgstr())
            if False and ut.is_developer():
                return ut.Cacher(fname=name + '_' + qreq_.ibs.get_dbname(),
                                 cfgstr=cfgstr,
                                 cache_dir=ut.ensuredir(
                                     ut.truepath('~/Desktop/smkcache')))
            else:
                wrp = ut.DynStruct()

                def ensure(func):
                    return func()

                wrp.ensure = ensure
                return wrp
예제 #2
0
 def get_big_cacher(qreq_):
     bc_dpath, bc_fname, bc_cfgstr = qreq_.get_bigcache_info()
     cacher = ut.Cacher(bc_fname, bc_cfgstr, cache_dir=bc_dpath)
     return cacher
def bigcache_vsone(qreq_, hyper_params):
    """
    Cached output of one-vs-one matches

        >>> from wbia.scripts.script_vsone import *  # NOQA
        >>> self = OneVsOneProblem()
        >>> qreq_ = self.qreq_
        >>> hyper_params = self.hyper_params
    """
    import vtool as vt
    import wbia

    # Get a set of training pairs
    ibs = qreq_.ibs
    cm_list = qreq_.execute()
    infr = wbia.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)

    # Per query choose a set of correct, incorrect, and random training pairs
    aid_pairs_ = infr._cm_training_pairs(
        rng=np.random.RandomState(42), **hyper_params.pair_sample
    )

    aid_pairs_ = vt.unique_rows(np.array(aid_pairs_), directed=False).tolist()

    pb_aid_pairs_ = photobomb_samples(ibs)

    # TODO: try to add in more non-comparable samples
    aid_pairs_ = pb_aid_pairs_ + aid_pairs_
    aid_pairs_ = vt.unique_rows(np.array(aid_pairs_))

    # ======================================
    # Compute one-vs-one scores and local_measures
    # ======================================

    # Prepare lazy attributes for annotations
    qreq_ = infr.qreq_
    ibs = qreq_.ibs
    qconfig2_ = qreq_.extern_query_config2
    dconfig2_ = qreq_.extern_data_config2
    qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_)
    dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_)

    # Remove any pairs missing features
    if dannot_cfg == qannot_cfg:
        unique_annots = ibs.annots(np.unique(np.array(aid_pairs_)), config=dannot_cfg)
        bad_aids = unique_annots.compress(~np.array(unique_annots.num_feats) > 0).aids
        bad_aids = set(bad_aids)
    else:
        annots1_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 0)), config=qannot_cfg)
        annots2_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 1)), config=dannot_cfg)
        bad_aids1 = annots1_.compress(~np.array(annots1_.num_feats) > 0).aids
        bad_aids2 = annots2_.compress(~np.array(annots2_.num_feats) > 0).aids
        bad_aids = set(bad_aids1 + bad_aids2)
    subset_idxs = np.where(
        [not (a1 in bad_aids or a2 in bad_aids) for a1, a2 in aid_pairs_]
    )[0]
    # Keep only a random subset
    if hyper_params.subsample:
        rng = np.random.RandomState(3104855634)
        num_max = hyper_params.subsample
        if num_max < len(subset_idxs):
            subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)
            subset_idxs = sorted(subset_idxs)

    # Take the current selection
    aid_pairs = ut.take(aid_pairs_, subset_idxs)

    if True:
        # NEW WAY
        config = hyper_params.vsone_assign
        # TODO: ensure annot probs like chips and features can be appropriately
        # set via qreq_ config or whatever
        matches = infr.exec_vsone_subset(aid_pairs, config=config)
    else:
        query_aids = ut.take_column(aid_pairs, 0)
        data_aids = ut.take_column(aid_pairs, 1)
        # OLD WAY
        # Determine a unique set of annots per config
        configured_aids = ut.ddict(set)
        configured_aids[qannot_cfg].update(query_aids)
        configured_aids[dannot_cfg].update(data_aids)

        # Make efficient annot-object representation
        configured_obj_annots = {}
        for config, aids in configured_aids.items():
            annots = ibs.annots(sorted(list(aids)), config=config)
            configured_obj_annots[config] = annots

        annots1 = configured_obj_annots[qannot_cfg].loc(query_aids)
        annots2 = configured_obj_annots[dannot_cfg].loc(data_aids)

        # Get hash based on visual annotation appearence of each pair
        # as well as algorithm configurations used to compute those properties
        qvuuids = annots1.visual_uuids
        dvuuids = annots2.visual_uuids
        qcfgstr = annots1._config.get_cfgstr()
        dcfgstr = annots2._config.get_cfgstr()
        annots_cfgstr = ut.hashstr27(qcfgstr) + ut.hashstr27(dcfgstr)
        vsone_uuids = [
            ut.combine_uuids(uuids, salt=annots_cfgstr)
            for uuids in ut.ProgIter(
                zip(qvuuids, dvuuids), length=len(qvuuids), label='hashing ids'
            )
        ]

        # Combine into a big cache for the entire 1-v-1 matching run
        big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True)
        cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train')

        cached_data = cacher.tryload()
        if cached_data is not None:
            # Caching doesn't work 100% for PairwiseMatch object, so we need to do
            # some postprocessing
            configured_lazy_annots = ut.ddict(dict)
            for config, annots in configured_obj_annots.items():
                annot_dict = configured_lazy_annots[config]
                for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):
                    annot_dict[_annot.aid] = _annot._make_lazy_dict()

            # Extract pairs of annot objects (with shared caches)
            lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)
            lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)

            # Create a set of PairwiseMatches with the correct annot properties
            matches = [
                vt.PairwiseMatch(annot1, annot2)
                for annot1, annot2 in zip(lazy_annots1, lazy_annots2)
            ]

            # Updating a new matches dictionary ensure the annot1/annot2 properties
            # are set correctly
            for key, cached_matches in list(cached_data.items()):
                fixed_matches = [match.copy() for match in matches]
                for fixed, internal in zip(fixed_matches, cached_matches):
                    dict_ = internal.__dict__
                    ut.delete_dict_keys(dict_, ['annot1', 'annot2'])
                    fixed.__dict__.update(dict_)
                cached_data[key] = fixed_matches
        else:
            cached_data = vsone_(
                qreq_,
                query_aids,
                data_aids,
                qannot_cfg,
                dannot_cfg,
                configured_obj_annots,
                hyper_params,
            )
            cacher.save(cached_data)
        # key_ = 'SV_LNBNN'
        key_ = 'RAT_SV'
        # for key in list(cached_data.keys()):
        #     if key != 'SV_LNBNN':
        #         del cached_data[key]
        matches = cached_data[key_]
    return matches, infr
예제 #4
0
def monkeypatch_encounters(ibs, aids, cache=None, **kwargs):
    """
    Hacks in a temporary custom definition of encounters for this controller

    50 days for PZ_MTEST
    kwargs = dict(days=50)

    if False:
        name_mindeltas = []
        for name in annots.group_items(annots.nids).values():
            times = name.image_unixtimes_asfloat
            deltas = [ut.unixtime_to_timedelta(np.abs(t1 - t2))
                      for t1, t2 in ut.combinations(times, 2)]
            if deltas:
                name_mindeltas.append(min(deltas))
        print(ut.repr3(ut.lmap(ut.get_timedelta_str,
                               sorted(name_mindeltas))))
    """
    from ibeis.algo.preproc.occurrence_blackbox import cluster_timespace_sec
    import numpy as np
    import datetime
    if len(aids) == 0:
        return
    annots = ibs.annots(sorted(set(aids)))
    thresh_sec = datetime.timedelta(**kwargs).total_seconds()
    # thresh_sec = datetime.timedelta(minutes=30).seconds

    if cache is None:
        cache = True
        # cache = len(aids) > 200
    cfgstr = str(ut.combine_uuids(annots.visual_uuids)) + str(thresh_sec)
    cacher = ut.Cacher('occurrence_labels', cfgstr=cfgstr, enabled=cache)
    data = cacher.tryload()
    if data is None:
        print('Computing occurrences for monkey patch for %d aids' %
              (len(aids)))
        posixtimes = annots.image_unixtimes_asfloat
        latlons = annots.gps
        data = cluster_timespace_sec(posixtimes,
                                     latlons,
                                     thresh_sec=thresh_sec,
                                     km_per_sec=.002)
        cacher.save(data)
    occurrence_ids = data
    if occurrence_ids is None:
        # return
        # each annot is its own occurrence
        occurrence_ids = list(range(len(annots)))

    ndec = int(np.ceil(np.log10(max(occurrence_ids))))
    suffmt = '-monkey-occur%0' + str(ndec) + 'd'
    encounter_labels = [
        n + suffmt % (o, ) for o, n in zip(occurrence_ids, annots.names)
    ]
    occurrence_labels = [suffmt[1:] % (o, ) for o in occurrence_ids]
    enc_lookup = ut.dzip(annots.aids, encounter_labels)
    occur_lookup = ut.dzip(annots.aids, occurrence_labels)

    # annots_per_enc = ut.dict_hist(encounter_labels, ordered=True)
    # ut.get_stats(list(annots_per_enc.values()))

    # encounters = ibs._annot_groups(annots.group(encounter_labels)[1])
    # enc_names = ut.take_column(encounters.nids, 0)
    # name_to_encounters = ut.group_items(encounters, enc_names)

    # print('name_to_encounters = %s' % (ut.repr3(name_to_encounters)),)
    # print('Names to num encounters')
    # name_to_num_enc = ut.dict_hist(
    #     ut.map_dict_vals(len, name_to_encounters).values())

    # monkey patch to override encounter info
    def _monkey_get_annot_occurrence_text(ibs, aids):
        return ut.dict_take(occur_lookup, aids)

    def _monkey_get_annot_encounter_text(ibs, aids):
        return ut.dict_take(enc_lookup, aids)

    ut.inject_func_as_method(ibs,
                             _monkey_get_annot_encounter_text,
                             'get_annot_encounter_text',
                             force=True)
    ut.inject_func_as_method(ibs,
                             _monkey_get_annot_occurrence_text,
                             'get_annot_occurrence_text',
                             force=True)