def compute_agg_rvecs(invassign, wx): """ Sums and normalizes all rvecs that belong to the same word and the same annotation id """ rvecs_list, error_flags = invassign.compute_nonagg_rvecs(wx) ax_list = invassign.wx2_axs[wx] maw_list = invassign.wx2_maws[wx] # group members of each word by aid, we will collapse these groups unique_ax, groupxs = vt.group_indices(ax_list) # (weighted aggregation with multi-assign-weights) grouped_maws = vt.apply_grouping(maw_list, groupxs) grouped_rvecs = vt.apply_grouping(rvecs_list, groupxs) grouped_flags = vt.apply_grouping(~error_flags, groupxs) grouped_rvecs2_ = vt.zipcompress(grouped_rvecs, grouped_flags, axis=0) grouped_maws2_ = vt.zipcompress(grouped_maws, grouped_flags) is_good = [len(rvecs) > 0 for rvecs in grouped_rvecs2_] aggvecs = [ aggregate_rvecs(rvecs, maws)[0] for rvecs, maws in zip(grouped_rvecs2_, grouped_maws2_) ] unique_ax2_ = unique_ax.compress(is_good) ax2_aggvec = dict(zip(unique_ax2_, aggvecs)) # Need to recompute flags for consistency # flag is true when aggvec is all zeros return ax2_aggvec
def print_confusion_stats(): """ CommandLine: python dev.py --allgt --print-scorediff-mat-stats --print-confusion-stats -t rrvsone_grid """ # Prints nextbest ranks print('-------------') print('ScoreDiffMatStats: %s' % testnameid) print('column_lbls = %r' % (column_lbls,)) #cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') #cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score') gt_rawscores_mat = ut.replace_nones(cfgx2_gt_rawscores, np.nan) gf_rawscores_mat = ut.replace_nones(cfgx2_gf_rawscores, np.nan) tp_rawscores = vt.zipcompress(gt_rawscores_mat, istrue_list) fp_rawscores = vt.zipcompress(gt_rawscores_mat, isfalse_list) tn_rawscores = vt.zipcompress(gf_rawscores_mat, istrue_list) fn_rawscores = vt.zipcompress(gf_rawscores_mat, isfalse_list) tp_rawscores_str, tp_rawscore_statstr = jagged_stats_info(tp_rawscores, 'tp_rawscores', cfgx2_lbl) fp_rawscores_str, fp_rawscore_statstr = jagged_stats_info(fp_rawscores, 'fp_rawscores', cfgx2_lbl) tn_rawscores_str, tn_rawscore_statstr = jagged_stats_info(tn_rawscores, 'tn_rawscores', cfgx2_lbl) fn_rawscores_str, fn_rawscore_statstr = jagged_stats_info(fn_rawscores, 'fn_rawscores', cfgx2_lbl) #print(tp_rawscores_str) #print(fp_rawscores_str) #print(tn_rawscores_str) #print(fn_rawscores_str) print(tp_rawscore_statstr) print(fp_rawscore_statstr) print(tn_rawscore_statstr) print(fn_rawscore_statstr)
def compute_nsum_score2(cm, qreq_=None): r""" Example3: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.name_scoring import * # NOQA >>> #ibs, qreq_, cm_list = plh.testdata_pre_sver('testdb1', qaid_list=[1]) >>> ibs, qreq_, cm_list = plh.testdata_post_sver('testdb1', qaid_list=[1], cfgdict=dict(fg_on=False, augment_queryside_hack=True)) >>> cm = cm_list[0] >>> cm.evaluate_dnids(qreq_.ibs) >>> nsum_nid_list1, nsum_score_list1, featflag_list1 = compute_nsum_score2(cm, qreq_) >>> nsum_nid_list2, nsum_score_list2 = compute_nsum_score(cm, qreq_) >>> ut.quit_if_noshow() >>> cm.show_ranked_matches(qreq_, ori=True) """ featflag_list2 = get_chipmatch_namescore_nonvoting_feature_flags(cm, qreq_) fs_list = cm.get_fsv_prod_list() name_groupxs2 = cm.name_groupxs nsum_nid_list2 = cm.unique_nids #-- valid_fs_list2 = vt.zipcompress(fs_list, featflag_list2) name_grouped_valid_fs_list2 = vt.apply_grouping_(valid_fs_list2, name_groupxs2) nsum_score_list2 = np.array([sum(list(map(np.sum, valid_fs_group))) for valid_fs_group in name_grouped_valid_fs_list2]) if False: nsum_score_list3 = np.array([ # NOQA np.sum([fs_group.sum() for fs_group in valid_fs_group]) for valid_fs_group in name_grouped_valid_fs_list2]) return nsum_nid_list2, nsum_score_list2, featflag_list2
def print_confusion_stats(): """ CommandLine: python dev.py --allgt --print-scorediff-mat-stats --print-confusion-stats -t rrvsone_grid """ # Prints nextbest ranks print('-------------') print('ScoreDiffMatStats: %s' % testnameid) print('column_lbls = %r' % (column_lbls, )) #cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') #cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score') gt_rawscores_mat = ut.replace_nones(cfgx2_gt_rawscores, np.nan) gf_rawscores_mat = ut.replace_nones(cfgx2_gf_rawscores, np.nan) tp_rawscores = vt.zipcompress(gt_rawscores_mat, istrue_list) fp_rawscores = vt.zipcompress(gt_rawscores_mat, isfalse_list) tn_rawscores = vt.zipcompress(gf_rawscores_mat, istrue_list) fn_rawscores = vt.zipcompress(gf_rawscores_mat, isfalse_list) tp_rawscores_str, tp_rawscore_statstr = jagged_stats_info( tp_rawscores, 'tp_rawscores', cfgx2_lbl) fp_rawscores_str, fp_rawscore_statstr = jagged_stats_info( fp_rawscores, 'fp_rawscores', cfgx2_lbl) tn_rawscores_str, tn_rawscore_statstr = jagged_stats_info( tn_rawscores, 'tn_rawscores', cfgx2_lbl) fn_rawscores_str, fn_rawscore_statstr = jagged_stats_info( fn_rawscores, 'fn_rawscores', cfgx2_lbl) #print(tp_rawscores_str) #print(fp_rawscores_str) #print(tn_rawscores_str) #print(fn_rawscores_str) print(tp_rawscore_statstr) print(fp_rawscore_statstr) print(tn_rawscore_statstr) print(fn_rawscore_statstr)
def compute_agg_rvecs(invassign, wx): """ Sums and normalizes all rvecs that belong to the same word and the same annotation id """ rvecs_list, error_flags = invassign.compute_nonagg_rvecs(wx) ax_list = invassign.wx2_axs[wx] maw_list = invassign.wx2_maws[wx] # group members of each word by aid, we will collapse these groups unique_ax, groupxs = vt.group_indices(ax_list) # (weighted aggregation with multi-assign-weights) grouped_maws = vt.apply_grouping(maw_list, groupxs) grouped_rvecs = vt.apply_grouping(rvecs_list, groupxs) grouped_flags = vt.apply_grouping(~error_flags, groupxs) grouped_rvecs2_ = vt.zipcompress(grouped_rvecs, grouped_flags, axis=0) grouped_maws2_ = vt.zipcompress(grouped_maws, grouped_flags) is_good = [len(rvecs) > 0 for rvecs in grouped_rvecs2_] aggvecs = [aggregate_rvecs(rvecs, maws)[0] for rvecs, maws in zip(grouped_rvecs2_, grouped_maws2_)] unique_ax2_ = unique_ax.compress(is_good) ax2_aggvec = dict(zip(unique_ax2_, aggvecs)) # Need to recompute flags for consistency # flag is true when aggvec is all zeros return ax2_aggvec
def run_asmk_script(): with ut.embed_on_exception_context: # NOQA """ >>> from wbia.algo.smk.script_smk import * """ # NOQA # ============================================== # PREPROCESSING CONFIGURATION # ============================================== config = { # 'data_year': 2013, 'data_year': None, 'dtype': 'float32', # 'root_sift': True, 'root_sift': False, # 'centering': True, 'centering': False, 'num_words': 2**16, # 'num_words': 1E6 # 'num_words': 8000, 'kmeans_impl': 'sklearn.mini', 'extern_words': False, 'extern_assign': False, 'assign_algo': 'kdtree', 'checks': 1024, 'int_rvec': True, 'only_xy': False, } # Define which params are relevant for which operations relevance = {} relevance['feats'] = ['dtype', 'root_sift', 'centering', 'data_year'] relevance['words'] = relevance['feats'] + [ 'num_words', 'extern_words', 'kmeans_impl', ] relevance['assign'] = relevance['words'] + [ 'checks', 'extern_assign', 'assign_algo', ] # relevance['ydata'] = relevance['assign'] + ['int_rvec'] # relevance['xdata'] = relevance['assign'] + ['only_xy', 'int_rvec'] nAssign = 1 class SMKCacher(ut.Cacher): def __init__(self, fname, ext='.cPkl'): relevant_params = relevance[fname] relevant_cfg = ut.dict_subset(config, relevant_params) cfgstr = ut.get_cfg_lbl(relevant_cfg) dbdir = ut.truepath('/raid/work/Oxford/') super(SMKCacher, self).__init__(fname, cfgstr, cache_dir=dbdir, ext=ext) # ============================================== # LOAD DATASET, EXTRACT AND POSTPROCESS FEATURES # ============================================== if config['data_year'] == 2007: data = load_oxford_2007() elif config['data_year'] == 2013: data = load_oxford_2013() elif config['data_year'] is None: data = load_oxford_wbia() offset_list = data['offset_list'] all_kpts = data['all_kpts'] raw_vecs = data['all_vecs'] query_uri_order = data['query_uri_order'] data_uri_order = data['data_uri_order'] # del data # ================ # PRE-PROCESS # ================ import vtool as vt # Alias names to avoid errors in interactive sessions proc_vecs = raw_vecs del raw_vecs feats_cacher = SMKCacher('feats', ext='.npy') all_vecs = feats_cacher.tryload() if all_vecs is None: if config['dtype'] == 'float32': logger.info('Converting vecs to float32') proc_vecs = proc_vecs.astype(np.float32) else: proc_vecs = proc_vecs raise NotImplementedError('other dtype') if config['root_sift']: with ut.Timer('Apply root sift'): np.sqrt(proc_vecs, out=proc_vecs) vt.normalize(proc_vecs, ord=2, axis=1, out=proc_vecs) if config['centering']: with ut.Timer('Apply centering'): mean_vec = np.mean(proc_vecs, axis=0) # Center and then re-normalize np.subtract(proc_vecs, mean_vec[None, :], out=proc_vecs) vt.normalize(proc_vecs, ord=2, axis=1, out=proc_vecs) if config['dtype'] == 'int8': smk_funcs all_vecs = proc_vecs feats_cacher.save(all_vecs) del proc_vecs # ===================================== # BUILD VISUAL VOCABULARY # ===================================== if config['extern_words']: words = data['words'] assert config['num_words'] is None or len( words) == config['num_words'] else: word_cacher = SMKCacher('words') words = word_cacher.tryload() if words is None: with ut.embed_on_exception_context: if config['kmeans_impl'] == 'sklearn.mini': import sklearn.cluster rng = np.random.RandomState(13421421) # init_size = int(config['num_words'] * 8) init_size = int(config['num_words'] * 4) # converged after 26043 iterations clusterer = sklearn.cluster.MiniBatchKMeans( config['num_words'], init_size=init_size, batch_size=1000, compute_labels=False, max_iter=20, random_state=rng, n_init=1, verbose=1, ) clusterer.fit(all_vecs) words = clusterer.cluster_centers_ elif config['kmeans_impl'] == 'yael': from yael import ynumpy centroids, qerr, dis, assign, nassign = ynumpy.kmeans( all_vecs, config['num_words'], init='kmeans++', verbose=True, output='all', ) words = centroids word_cacher.save(words) # ===================================== # ASSIGN EACH VECTOR TO ITS NEAREST WORD # ===================================== if config['extern_assign']: assert config[ 'extern_words'], 'need extern cluster to extern assign' idx_to_wxs = vt.atleast_nd(data['idx_to_wx'], 2) idx_to_maws = np.ones(idx_to_wxs.shape, dtype=np.float32) idx_to_wxs = np.ma.array(idx_to_wxs) idx_to_maws = np.ma.array(idx_to_maws) else: from wbia.algo.smk import vocab_indexer vocab = vocab_indexer.VisualVocab(words) dassign_cacher = SMKCacher('assign') assign_tup = dassign_cacher.tryload() if assign_tup is None: vocab.flann_params['algorithm'] = config['assign_algo'] vocab.build() # Takes 12 minutes to assign jegous vecs to 2**16 vocab with ut.Timer('assign vocab neighbors'): _idx_to_wx, _idx_to_wdist = vocab.nn_index( all_vecs, nAssign, checks=config['checks']) if nAssign > 1: idx_to_wxs, idx_to_maws = smk_funcs.weight_multi_assigns( _idx_to_wx, _idx_to_wdist, massign_alpha=1.2, massign_sigma=80.0, massign_equal_weights=True, ) else: idx_to_wxs = np.ma.masked_array(_idx_to_wx, fill_value=-1) idx_to_maws = np.ma.ones(idx_to_wxs.shape, fill_value=-1, dtype=np.float32) idx_to_maws.mask = idx_to_wxs.mask assign_tup = (idx_to_wxs, idx_to_maws) dassign_cacher.save(assign_tup) idx_to_wxs, idx_to_maws = assign_tup # Breakup vectors, keypoints, and word assignments by annotation wx_lists = [ idx_to_wxs[left:right] for left, right in ut.itertwo(offset_list) ] maw_lists = [ idx_to_maws[left:right] for left, right in ut.itertwo(offset_list) ] vecs_list = [ all_vecs[left:right] for left, right in ut.itertwo(offset_list) ] kpts_list = [ all_kpts[left:right] for left, right in ut.itertwo(offset_list) ] # ======================= # FIND QUERY SUBREGIONS # ======================= ibs, query_annots, data_annots, qx_to_dx = load_ordered_annots( data_uri_order, query_uri_order) daids = data_annots.aids qaids = query_annots.aids query_super_kpts = ut.take(kpts_list, qx_to_dx) query_super_vecs = ut.take(vecs_list, qx_to_dx) query_super_wxs = ut.take(wx_lists, qx_to_dx) query_super_maws = ut.take(maw_lists, qx_to_dx) # Mark which keypoints are within the bbox of the query query_flags_list = [] only_xy = config['only_xy'] for kpts_, bbox in zip(query_super_kpts, query_annots.bboxes): flags = kpts_inside_bbox(kpts_, bbox, only_xy=only_xy) query_flags_list.append(flags) logger.info('Queries are crops of existing database images.') logger.info('Looking at average percents') percent_list = [ flags_.sum() / flags_.shape[0] for flags_ in query_flags_list ] percent_stats = ut.get_stats(percent_list) logger.info('percent_stats = %s' % (ut.repr4(percent_stats), )) import vtool as vt query_kpts = vt.zipcompress(query_super_kpts, query_flags_list, axis=0) query_vecs = vt.zipcompress(query_super_vecs, query_flags_list, axis=0) query_wxs = vt.zipcompress(query_super_wxs, query_flags_list, axis=0) query_maws = vt.zipcompress(query_super_maws, query_flags_list, axis=0) # ======================= # CONSTRUCT QUERY / DATABASE REPR # ======================= # int_rvec = not config['dtype'].startswith('float') int_rvec = config['int_rvec'] X_list = [] _prog = ut.ProgPartial(length=len(qaids), label='new X', bs=True, adjust=True) for aid, fx_to_wxs, fx_to_maws in _prog( zip(qaids, query_wxs, query_maws)): X = new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec) X_list.append(X) # ydata_cacher = SMKCacher('ydata') # Y_list = ydata_cacher.tryload() # if Y_list is None: Y_list = [] _prog = ut.ProgPartial(length=len(daids), label='new Y', bs=True, adjust=True) for aid, fx_to_wxs, fx_to_maws in _prog(zip(daids, wx_lists, maw_lists)): Y = new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec) Y_list.append(Y) # ydata_cacher.save(Y_list) # ====================== # Add in some groundtruth logger.info('Add in some groundtruth') for Y, nid in zip(Y_list, ibs.get_annot_nids(daids)): Y.nid = nid for X, nid in zip(X_list, ibs.get_annot_nids(qaids)): X.nid = nid for Y, qual in zip(Y_list, ibs.get_annot_quality_texts(daids)): Y.qual = qual # ====================== # Add in other properties for Y, vecs, kpts in zip(Y_list, vecs_list, kpts_list): Y.vecs = vecs Y.kpts = kpts imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images') for Y, imgid in zip(Y_list, data_uri_order): gpath = ut.unixjoin(imgdir, imgid + '.jpg') Y.gpath = gpath for X, vecs, kpts in zip(X_list, query_vecs, query_kpts): X.kpts = kpts X.vecs = vecs # ====================== logger.info('Building inverted list') daids = [Y.aid for Y in Y_list] # wx_list = sorted(ut.list_union(*[Y.wx_list for Y in Y_list])) wx_list = sorted(set.union(*[Y.wx_set for Y in Y_list])) assert daids == data_annots.aids assert len(wx_list) <= config['num_words'] wx_to_aids = smk_funcs.invert_lists(daids, [Y.wx_list for Y in Y_list], all_wxs=wx_list) # Compute IDF weights logger.info('Compute IDF weights') ndocs_total = len(daids) # Use only the unique number of words ndocs_per_word = np.array([len(set(wx_to_aids[wx])) for wx in wx_list]) logger.info('ndocs_perword stats: ' + ut.repr4(ut.get_stats(ndocs_per_word))) idf_per_word = smk_funcs.inv_doc_freq(ndocs_total, ndocs_per_word) wx_to_weight = dict(zip(wx_list, idf_per_word)) logger.info('idf stats: ' + ut.repr4(ut.get_stats(wx_to_weight.values()))) # Filter junk Y_list_ = [Y for Y in Y_list if Y.qual != 'junk'] # ======================= # CHOOSE QUERY KERNEL # ======================= params = { 'asmk': dict(alpha=3.0, thresh=0.0), 'bow': dict(), 'bow2': dict(), } # method = 'bow' method = 'bow2' method = 'asmk' smk = SMK(wx_to_weight, method=method, **params[method]) # Specific info for the type of query if method == 'asmk': # Make residual vectors if True: # The stacked way is 50x faster # TODO: extend for multi-assignment and record fxs flat_query_vecs = np.vstack(query_vecs) flat_query_wxs = np.vstack(query_wxs) flat_query_offsets = np.array( [0] + ut.cumsum(ut.lmap(len, query_wxs))) flat_wxs_assign = flat_query_wxs flat_offsets = flat_query_offsets flat_vecs = flat_query_vecs tup = smk_funcs.compute_stacked_agg_rvecs( words, flat_wxs_assign, flat_vecs, flat_offsets) all_agg_vecs, all_error_flags, agg_offset_list = tup if int_rvec: all_agg_vecs = smk_funcs.cast_residual_integer( all_agg_vecs) agg_rvecs_list = [ all_agg_vecs[left:right] for left, right in ut.itertwo(agg_offset_list) ] agg_flags_list = [ all_error_flags[left:right] for left, right in ut.itertwo(agg_offset_list) ] for X, agg_rvecs, agg_flags in zip(X_list, agg_rvecs_list, agg_flags_list): X.agg_rvecs = agg_rvecs X.agg_flags = agg_flags[:, None] flat_wxs_assign = idx_to_wxs flat_offsets = offset_list flat_vecs = all_vecs tup = smk_funcs.compute_stacked_agg_rvecs( words, flat_wxs_assign, flat_vecs, flat_offsets) all_agg_vecs, all_error_flags, agg_offset_list = tup if int_rvec: all_agg_vecs = smk_funcs.cast_residual_integer( all_agg_vecs) agg_rvecs_list = [ all_agg_vecs[left:right] for left, right in ut.itertwo(agg_offset_list) ] agg_flags_list = [ all_error_flags[left:right] for left, right in ut.itertwo(agg_offset_list) ] for Y, agg_rvecs, agg_flags in zip(Y_list, agg_rvecs_list, agg_flags_list): Y.agg_rvecs = agg_rvecs Y.agg_flags = agg_flags[:, None] else: # This non-stacked way is about 500x slower _prog = ut.ProgPartial(label='agg Y rvecs', bs=True, adjust=True) for Y in _prog(Y_list_): make_agg_vecs(Y, words, Y.vecs) _prog = ut.ProgPartial(label='agg X rvecs', bs=True, adjust=True) for X in _prog(X_list): make_agg_vecs(X, words, X.vecs) elif method == 'bow2': # Hack for orig tf-idf bow vector nwords = len(words) for X in ut.ProgIter(X_list, label='make bow vector'): ensure_tf(X) bow_vector(X, wx_to_weight, nwords) for Y in ut.ProgIter(Y_list_, label='make bow vector'): ensure_tf(Y) bow_vector(Y, wx_to_weight, nwords) if method != 'bow2': for X in ut.ProgIter(X_list, 'compute X gamma'): X.gamma = smk.gamma(X) for Y in ut.ProgIter(Y_list_, 'compute Y gamma'): Y.gamma = smk.gamma(Y) # Execute matches (could go faster by enumerating candidates) scores_list = [] for X in ut.ProgIter(X_list, label='query %s' % (smk, )): scores = [smk.kernel(X, Y) for Y in Y_list_] scores = np.array(scores) scores = np.nan_to_num(scores) scores_list.append(scores) import sklearn.metrics avep_list = [] _iter = list(zip(scores_list, X_list)) _iter = ut.ProgIter(_iter, label='evaluate %s' % (smk, )) for scores, X in _iter: truth = [X.nid == Y.nid for Y in Y_list_] avep = sklearn.metrics.average_precision_score(truth, scores) avep_list.append(avep) avep_list = np.array(avep_list) mAP = np.mean(avep_list) logger.info('mAP = %r' % (mAP, ))
def get_support_data(qreq_, daid_list): """ CommandLine: python -m wbia.algo.hots.neighbor_index get_support_data --show Example: >>> # xdoctest: +REQUIRES(module:wbia_cnn) >>> from wbia.algo.hots.neighbor_index import * # NOQA >>> import wbia >>> qreq_ = wbia.testdata_qreq_(defaultdb='PZ_MTEST', p=':fgw_thresh=.9,maxscale_thresh=10', a=':size=2') >>> daid_list = qreq_.daids >>> tup = get_support_data(qreq_, daid_list) >>> vecs_list, fgws_list, fxs_list = tup >>> assert all([np.all(fgws > .9) for fgws in fgws_list]) >>> result = ('depth_profile = %r' % (ut.depth_profile(tup),)) >>> print(result) depth_profile = [[(128, 128), (174, 128)], [128, 174], [128, 174]] I can't figure out why this tests isn't determenistic all the time and I can't get it to reproduce non-determenism. This could be due to theano. depth_profile = [[(39, 128), (22, 128)], [39, 22], [39, 22]] depth_profile = [[(35, 128), (24, 128)], [35, 24], [35, 24]] depth_profile = [[(34, 128), (31, 128)], [34, 31], [34, 31]] depth_profile = [[(83, 128), (129, 128)], [83, 129], [83, 129]] depth_profile = [[(13, 128), (104, 128)], [13, 104], [13, 104]] """ config2_ = qreq_.get_internal_data_config2() vecs_list = qreq_.ibs.get_annot_vecs(daid_list, config2_=config2_) # Create corresponding feature indicies fxs_list = [np.arange(len(vecs)) for vecs in vecs_list] # <HACK:featweight> # hack to get feature weights. returns None if feature weights are turned # off in config settings if config2_.minscale_thresh is not None or config2_.maxscale_thresh is not None: min_ = -np.inf if config2_.minscale_thresh is None else config2_.minscale_thresh max_ = np.inf if config2_.maxscale_thresh is None else config2_.maxscale_thresh kpts_list = qreq_.ibs.get_annot_kpts(daid_list, config2_=config2_) # kpts_list = vt.ziptake(kpts_list, fxs_list, axis=0) # not needed for first filter scales_list = [vt.get_scales(kpts) for kpts in kpts_list] # Remove data under the threshold flags_list = [ np.logical_and(scales >= min_, scales <= max_) for scales in scales_list ] vecs_list = vt.zipcompress(vecs_list, flags_list, axis=0) fxs_list = vt.zipcompress(fxs_list, flags_list, axis=0) if qreq_.qparams.fg_on: # I've found that the call to get_annot_fgweights is different on # different machines. Something must be configured differently. fgws_list = qreq_.ibs.get_annot_fgweights(daid_list, config2_=config2_, ensure=True) fgws_list = vt.ziptake(fgws_list, fxs_list, axis=0) # assert list(map(len, fgws_list)) == list(map(len, vecs_list)), 'bad corresponding vecs' if config2_.fgw_thresh is not None and config2_.fgw_thresh > 0: flags_list = [fgws > config2_.fgw_thresh for fgws in fgws_list] # Remove data under the threshold fgws_list = vt.zipcompress(fgws_list, flags_list, axis=0) vecs_list = vt.zipcompress(vecs_list, flags_list, axis=0) fxs_list = vt.zipcompress(fxs_list, flags_list, axis=0) else: fgws_list = None # </HACK:featweight> return vecs_list, fgws_list, fxs_list