def exec_(script): import utool as ut print('+**** exec %s script *******' % (script.type_)) print('repo = %r' % (repo,)) with ut.ChdirContext(repo.dpath): if script.is_fpath_valid(): normbuild_flag = '--no-rmbuild' if ut.get_argflag(normbuild_flag): ut.cmd(script.fpath + ' ' + normbuild_flag) else: ut.cmd(script.fpath) else: if script.text is not None: print('ABOUT TO EXECUTE') ut.print_code(script.text, 'bash') if ut.are_you_sure('execute above script?'): from os.path import join scriptdir = ut.ensure_app_resource_dir('utool', 'build_scripts') script_path = join(scriptdir, 'script_' + script.type_ + '_' + ut.hashstr27(script.text) + '.sh') ut.writeto(script_path, script.text) _ = ut.cmd('bash ', script_path) # NOQA else: print("CANT QUITE EXECUTE THIS YET") ut.print_code(script.text, 'bash') #os.system(scriptname) print('L**** exec %s script *******' % (script.type_))
def get_match_thumb_fname(cm, daid, qreq_, view_orientation='vertical', draw_matches=True): """ CommandLine: python -m ibeis.gui.id_review_api --exec-get_match_thumb_fname Example: >>> # DISABLE_DOCTEST >>> from ibeis.gui.id_review_api import * # NOQA >>> import ibeis >>> cm, qreq_ = ibeis.testdata_cm('PZ_MTEST') >>> thumbsize = (128, 128) >>> daid = cm.get_top_aids()[0] >>> match_thumb_fname = get_match_thumb_fname(cm, daid, qreq_) >>> result = match_thumb_fname >>> print(result) match_aids=1,1_cfgstr=ubpzwu5k54h6xbnr.jpg """ # Make thumbnail name config_hash = ut.hashstr27(qreq_.get_cfgstr()) qaid = cm.qaid args = ( qaid, daid, config_hash, draw_matches, view_orientation, ) match_thumb_fname = 'match_aids=%d,%d_cfgstr=%s_draw=%s_orientation=%s.jpg' % args return match_thumb_fname
def exec_(script): import utool as ut print("+**** exec %s script *******" % (script.type_)) print("repo = %r" % (repo,)) with ut.ChdirContext(repo.dpath): if script.is_fpath_valid(): normbuild_flag = "--no-rmbuild" if ut.get_argflag(normbuild_flag): ut.cmd(script.fpath + " " + normbuild_flag) else: ut.cmd(script.fpath) else: if script.text is not None: print("ABOUT TO EXECUTE") ut.print_code(script.text, "bash") if ut.are_you_sure("execute above script?"): from os.path import join scriptdir = ut.ensure_app_resource_dir("utool", "build_scripts") script_path = join( scriptdir, "script_" + script.type_ + "_" + ut.hashstr27(script.text) + ".sh" ) ut.writeto(script_path, script.text) _ = ut.cmd("bash ", script_path) # NOQA else: print("CANT QUITE EXECUTE THIS YET") ut.print_code(script.text, "bash") # os.system(scriptname) print("L**** exec %s script *******" % (script.type_))
def make_cacher(name, cfgstr=None): if cfgstr is None: cfgstr = ut.hashstr27(qreq_.get_cfgstr()) if False and ut.is_developer(): return ut.Cacher(fname=name + '_' + qreq_.ibs.get_dbname(), cfgstr=cfgstr, cache_dir=ut.ensuredir( ut.truepath('~/Desktop/smkcache'))) else: wrp = ut.DynStruct() def ensure(func): return func() wrp.ensure = ensure return wrp
def get_cfgstr(qreq_, with_input=False, with_data=True, with_pipe=True, hash_pipe=False): cfgstr_list = [] if with_input: cfgstr_list.append(qreq_.get_query_hashid()) if with_data: cfgstr_list.append(qreq_.get_data_hashid()) if with_pipe: pipe_cfgstr = qreq_.get_pipe_cfgstr() if hash_pipe: pipe_cfgstr = ut.hashstr27(pipe_cfgstr) cfgstr_list.append(pipe_cfgstr) cfgstr = ''.join(cfgstr_list) return cfgstr
def vim_grep_project(pat, hashid=None): import vim import utool as ut ut.ENABLE_COLORS = False ut.util_str.ENABLE_COLORS = False if hashid is None: hashid = ut.hashstr27(pat) print('Grepping for pattern = %r' % (pat,)) msg_list = ut.grep_projects([pat], verbose=False, colored=False) fname = 'tmp_grep_' + hashid + '.txt' dpath = ut.get_app_resource_dir('utool') fpath = ut.unixjoin(dpath, fname) #pyvim_funcs.vim_fpath_cmd('split', fpath) vim_fpath_cmd('new', fpath) text = '\n'.join(msg_list) overwrite_text(text) vim.command(":exec ':w'")
def load_feat_scores(qreq_, qaids): import wbia # NOQA from os.path import dirname, join # NOQA # HACKY CACHE cfgstr = qreq_.get_cfgstr(with_input=True) cache_dir = join(dirname(dirname(wbia.__file__)), 'TMP_FEATSCORE_CACHE') namemode = ut.get_argval('--namemode', default=True) fsvx = ut.get_argval('--fsvx', type_='fuzzy_subset', default=slice(None, None, None)) threshx = ut.get_argval('--threshx', type_=int, default=None) thresh = ut.get_argval('--thresh', type_=float, default=0.9) num = ut.get_argval('--num', type_=int, default=1) cfg_components = [ cfgstr, disttype, namemode, fsvx, threshx, thresh, f, num ] cache_cfgstr = ','.join(ut.lmap(six.text_type, cfg_components)) cache_hashid = ut.hashstr27(cache_cfgstr + '_v1') cache_name = 'get_cfgx_feat_scores_' + cache_hashid @ut.cached_func(cache_name, cache_dir=cache_dir, key_argx=[], use_cache=True) def get_cfgx_feat_scores(qreq_, qaids): from wbia.algo.hots import scorenorm cm_list = qreq_.execute(qaids) # logger.info('Done loading cached chipmatches') tup = scorenorm.get_training_featscores(qreq_, cm_list, disttype, namemode, fsvx, threshx, thresh, num=num) # logger.info(ut.depth_profile(tup)) tp_scores, tn_scores, scorecfg = tup return tp_scores, tn_scores, scorecfg tp_scores, tn_scores, scorecfg = get_cfgx_feat_scores(qreq_, qaids) return tp_scores, tn_scores, scorecfg
def infodict2(player): infodict = ut.odict( [ # ('id_', player.id_), ("turn", player.turn), ("life", player.life), ("library_hash", ut.hashstr27(str(player.deck.library))), ("library_size", len(player.deck)), ("hand_size", len(player.hand)), ("bfield_size", len(player.bfield)), ("graveyard_size", len(player.graveyard)), ("exiled_size", len(player.exiled)), ("hand", mtgobjs.CardGroup(player.hand).infohist), ("bfield", mtgobjs.CardGroup(player.bfield).infohist), ("graveyard", mtgobjs.CardGroup(player.graveyard).infohist), ] ) return infodict
def get_config_rowid(table, config=None): #config_hashid = config.get('feat_cfgstr') #assert config_hashid is not None # TODO store config_rowid in qparams #else: # config_hashid = db.cfg.feat_cfg.get_cfgstr() if config is not None: try: #config_hashid = 'none' config_hashid = config.get(table.tablename + '_hashid') except KeyError: try: subconfig = config.get(table.tablename + '_config') config_hashid = ut.hashstr27(ut.to_json(subconfig)) except KeyError: print('Warning: Config must either contain a string <tablename>_hashid or a dict <tablename>_config') raise else: config_hashid = 'none' config_rowid = table.add_config(config_hashid) return config_rowid
def get_hashid(cfg): return ut.hashstr27(cfg.get_cfgstr())
def get_pipe_hashid(qreq_): # this changes invalidates match_chip4 bibcaches generated before # august 24 2015 #pipe_hashstr = ut.hashstr(qreq_.get_pipe_cfgstr()) pipe_hashstr = ut.hashstr27(qreq_.get_pipe_cfgstr()) return pipe_hashstr
def get_pipe_hashid(self): return ut.hashstr27(self.get_pipe_cfgstr())
def distinct_colors(N, brightness=.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None): r""" Args: N (int): brightness (float): Returns: list: RGB_tuples CommandLine: python -m plottool.color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95 python -m plottool.color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95 python -m plottool.color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95 python -m plottool.color_funcs --test-distinct_colors --N 3 --show --no-randomize python -m plottool.color_funcs --test-distinct_colors --N 4 --show --no-randomize python -m plottool.color_funcs --test-distinct_colors --N 20 --show References: http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html CommandLine: python -m plottool.color_funcs --exec-distinct_colors --show python -m plottool.color_funcs --exec-distinct_colors --show --no-randomize --N 50 python -m plottool.color_funcs --exec-distinct_colors --show --cmap_seed=foobar Example: >>> # ENABLE_DOCTEST >>> from plottool.color_funcs import * # NOQA >>> # build test data >>> N = ut.get_argval('--N', int, 2) >>> randomize = not ut.get_argflag('--no-randomize') >>> brightness = 0.878 >>> # execute function >>> cmap_seed = ut.get_argval('--cmap_seed', str, default=None) >>> hue_range = ut.get_argval('--hue-range', list, default=(0.00, 1.0)) >>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed) >>> # verify results >>> assert len(RGB_tuples) == N >>> result = str(RGB_tuples) >>> print(result) >>> ut.quit_if_noshow() >>> color_list = RGB_tuples >>> testshow_colors(color_list) >>> ut.show_if_requested() """ # TODO: Add sin wave modulation to the sat and value #import plottool as pt if True: import plottool as pt # HACK for white figures remove_yellow = not pt.is_default_dark_bg() #if not pt.is_default_dark_bg(): # brightness = .8 use_jet = False if use_jet: import plottool as pt cmap = pt.plt.cm.jet RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N)))) elif cmap_seed is not None: # Randomized map based on a seed #cmap_ = 'Set1' #cmap_ = 'Dark2' choices = [ #'Set1', 'Dark2', 'jet', #'gist_rainbow', #'rainbow', #'gnuplot', #'Accent' ] cmap_hack = ut.get_argval('--cmap-hack', type_=str, default=None) ncolor_hack = ut.get_argval('--ncolor-hack', type_=int, default=None) if cmap_hack is not None: choices = [cmap_hack] if ncolor_hack is not None: N = ncolor_hack N_ = N seed = sum(list(map(ord, ut.hashstr27(cmap_seed)))) rng = np.random.RandomState(seed + 48930) cmap_str = rng.choice(choices, 1)[0] #print('cmap_str = %r' % (cmap_str,)) cmap = pt.plt.cm.get_cmap(cmap_str) #ut.hashstr27(cmap_seed) #cmap_seed = 0 #pass jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N ** 2))) range_ = np.linspace(0, 1, N, endpoint=False) #print('range_ = %r' % (range_,)) range_ = range_ + jitter #print('range_ = %r' % (range_,)) while not (np.all(range_ >= 0) and np.all(range_ <= 1)): range_[range_ < 0] = np.abs(range_[range_ < 0] ) range_[range_ > 1] = 2 - range_[range_ > 1] #print('range_ = %r' % (range_,)) shift = rng.rand() range_ = (range_ + shift) % 1 #print('jitter = %r' % (jitter,)) #print('shift = %r' % (shift,)) #print('range_ = %r' % (range_,)) if ncolor_hack is not None: range_ = range_[0:N_] RGB_tuples = list(map(tuple, cmap(range_))) else: sat = brightness val = brightness hmin, hmax = hue_range if remove_yellow: hue_skips = [(.13, .24)] else: hue_skips = [] hue_skip_ranges = [_[1] - _[0] for _ in hue_skips] total_skip = sum(hue_skip_ranges) hmax_ = hmax - total_skip hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float) # Remove colors (like hard to see yellows) in specified ranges for skip, range_ in zip(hue_skips, hue_skip_ranges): hue_list = [hue if hue <= skip[0] else hue + range_ for hue in hue_list] HSV_tuples = [(hue, sat, val) for hue in hue_list] RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples] if randomize: ut.deterministic_shuffle(RGB_tuples) return RGB_tuples
def unsorted_list_hash(list_): return ut.hashstr27(str(sorted(list_)))
def learn_featscore_normalizer(qreq_, datakw={}, learnkw={}): r""" Takes the result of queries and trains a score encoder Args: qreq_ (ibeis.QueryRequest): query request object with hyper-parameters Returns: vtool.ScoreNormalizer: encoder CommandLine: python -m ibeis --tf learn_featscore_normalizer --show -t default: python -m ibeis --tf learn_featscore_normalizer --show --fsvx=0 --threshx=1 --show python -m ibeis --tf learn_featscore_normalizer --show -a default:size=40 -t default:fg_on=False,lnbnn_on=False,ratio_thresh=1.0,K=1,Knorm=6,sv_on=False,normalizer_rule=name --fsvx=0 --threshx=1 --show python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -t default:K=1 python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -a timectrl -t default:K=1 --db PZ_Master1 python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio -a timectrl -t default:K=1 --db PZ_Master1 python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn -a timectrl -t default:K=1 --db PZ_Master1 # LOOK AT THIS python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db PZ_Master1 #python -m ibeis --tf learn_featscore_normalizer --show --disttype=parzen -a timectrl -t default:K=1 --db PZ_Master1 #python -m ibeis --tf learn_featscore_normalizer --show --disttype=norm_parzen -a timectrl -t default:K=1 --db PZ_Master1 python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn --db PZ_Master1 -a timectrl -t best Example: >>> # ENABLE_DOCTEST >>> from ibeis.algo.hots.scorenorm import * # NOQA >>> import ibeis >>> learnkw = {} >>> datakw = NormFeatScoreConfig.from_argv_dict() >>> qreq_ = ibeis.testdata_qreq_( >>> defaultdb='PZ_MTEST', a=['default'], p=['default']) >>> encoder = learn_featscore_normalizer(qreq_, datakw, learnkw) >>> ut.quit_if_noshow() >>> encoder.visualize(figtitle=encoder.get_cfgstr()) >>> ut.show_if_requested() """ cm_list = qreq_.execute() print('learning scorenorm') print('datakw = %s' % ut.repr3(datakw)) tp_scores, tn_scores, scorecfg = get_training_featscores( qreq_, cm_list, **datakw) _learnkw = dict(monotonize=True, adjust=2) _learnkw.update(learnkw) encoder = vt.ScoreNormalizer(**_learnkw) encoder.fit_partitioned(tp_scores, tn_scores, verbose=False) # ut.hashstr27(qreq_.get_cfgstr()) # Maintain regen command info: TODO: generalize and integrate encoder._regen_info = { 'cmd': 'python -m ibeis --tf learn_featscore_normalizer', 'scorecfg': scorecfg, 'learnkw': learnkw, 'datakw': datakw, 'qaids': qreq_.qaids, 'daids': qreq_.daids, 'qreq_cfg': qreq_.get_full_cfgstr(), 'qreq_regen_info': getattr(qreq_, '_regen_info', {}), } # 'timestamp': ut.get_printable_timestamp(), scorecfg_safe = scorecfg scorecfg_safe = re.sub('[' + re.escape('()= ') + ']', '', scorecfg_safe) scorecfg_safe = re.sub('[' + re.escape('+*<>[]') + ']', '_', scorecfg_safe) hashid = ut.hashstr27(ut.to_json(encoder._regen_info)) naidinfo = ('q%s_d%s' % (len(qreq_.qaids), len(qreq_.daids))) cfgstr = 'featscore_{}_{}_{}_{}'.format(scorecfg_safe, qreq_.ibs.get_dbname(), naidinfo, hashid) encoder.cfgstr = cfgstr return encoder
def hashid(dataset): if dataset.cfgstr is None: return '' else: return ut.hashstr27(dataset.cfgstr, hashlen=8)
def __hash__(self): return hash(ut.hashstr27(self.astuple()))
def get_pipe_hashid(request): return ut.hashstr27(request.get_pipe_cfgstr())
def merge_datasets(dataset_list): """ Merges a list of dataset objects into a single combined dataset. """ def consensus_check_factory(): """ Returns a temporary function used to check that all incoming values with the same key are consistent """ from collections import defaultdict past_values = defaultdict(lambda: None) def consensus_check(value, key): assert past_values[key] is None or past_values[key] == value, ( 'key=%r with value=%r does not agree with past_value=%r' % (key, value, past_values[key])) past_values[key] = value return value return consensus_check total_num_labels = 0 total_num_data = 0 input_alias_list = [dataset.alias_key for dataset in dataset_list] alias_key = 'combo_' + ut.hashstr27(repr(input_alias_list), hashlen=8) training_dpath = ut.ensure_app_resource_dir('ibeis_cnn', 'training', alias_key) data_fpath = ut.unixjoin(training_dpath, alias_key + '_data.hdf5') labels_fpath = ut.unixjoin(training_dpath, alias_key + '_labels.hdf5') try: # Try and short circut cached loading merged_dataset = DataSet.from_alias_key(alias_key) return merged_dataset except (Exception, AssertionError) as ex: ut.printex(ex, 'alias definitions have changed. alias_key=%r' % (alias_key, ), iswarning=True) # Build the dataset consensus_check = consensus_check_factory() for dataset in dataset_list: print(ut.get_file_nBytes_str(dataset.data_fpath)) print(dataset.data_fpath_dict['full']) print(dataset.num_labels) print(dataset.data_per_label) total_num_labels += dataset.num_labels total_num_data += (dataset.data_per_label * dataset.num_labels) # check that all data_dims agree data_shape = consensus_check(dataset.data_shape, 'data_shape') data_per_label = consensus_check(dataset.data_per_label, 'data_per_label') # hack record this import numpy as np data_dtype = np.uint8 label_dtype = np.int32 data = np.empty((total_num_data, ) + data_shape, dtype=data_dtype) labels = np.empty(total_num_labels, dtype=label_dtype) #def iterable_assignment(): # pass data_left = 0 data_right = None labels_left = 0 labels_right = None for dataset in ut.ProgressIter(dataset_list, lbl='combining datasets', freq=1): X_all, y_all = dataset.subset('full') labels_right = labels_left + y_all.shape[0] data_right = data_left + X_all.shape[0] data[data_left:data_right] = X_all labels[labels_left:labels_right] = y_all data_left = data_right labels_left = labels_right ut.save_data(data_fpath, data) ut.save_data(labels_fpath, labels) labels = ut.load_data(labels_fpath) num_labels = len(labels) merged_dataset = DataSet.new_training_set( alias_key=alias_key, data_fpath=data_fpath, labels_fpath=labels_fpath, metadata_fpath=None, training_dpath=training_dpath, data_shape=data_shape, data_per_label=data_per_label, output_dims=1, num_labels=num_labels, ) return merged_dataset
def get_pipe_hashid(qreq_): return ut.hashstr27(str(qreq_.stack_config))
def bigcache_vsone(qreq_, hyper_params): """ Cached output of one-vs-one matches >>> from wbia.scripts.script_vsone import * # NOQA >>> self = OneVsOneProblem() >>> qreq_ = self.qreq_ >>> hyper_params = self.hyper_params """ import vtool as vt import wbia # Get a set of training pairs ibs = qreq_.ibs cm_list = qreq_.execute() infr = wbia.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True) # Per query choose a set of correct, incorrect, and random training pairs aid_pairs_ = infr._cm_training_pairs( rng=np.random.RandomState(42), **hyper_params.pair_sample ) aid_pairs_ = vt.unique_rows(np.array(aid_pairs_), directed=False).tolist() pb_aid_pairs_ = photobomb_samples(ibs) # TODO: try to add in more non-comparable samples aid_pairs_ = pb_aid_pairs_ + aid_pairs_ aid_pairs_ = vt.unique_rows(np.array(aid_pairs_)) # ====================================== # Compute one-vs-one scores and local_measures # ====================================== # Prepare lazy attributes for annotations qreq_ = infr.qreq_ ibs = qreq_.ibs qconfig2_ = qreq_.extern_query_config2 dconfig2_ = qreq_.extern_data_config2 qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_) dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_) # Remove any pairs missing features if dannot_cfg == qannot_cfg: unique_annots = ibs.annots(np.unique(np.array(aid_pairs_)), config=dannot_cfg) bad_aids = unique_annots.compress(~np.array(unique_annots.num_feats) > 0).aids bad_aids = set(bad_aids) else: annots1_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 0)), config=qannot_cfg) annots2_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 1)), config=dannot_cfg) bad_aids1 = annots1_.compress(~np.array(annots1_.num_feats) > 0).aids bad_aids2 = annots2_.compress(~np.array(annots2_.num_feats) > 0).aids bad_aids = set(bad_aids1 + bad_aids2) subset_idxs = np.where( [not (a1 in bad_aids or a2 in bad_aids) for a1, a2 in aid_pairs_] )[0] # Keep only a random subset if hyper_params.subsample: rng = np.random.RandomState(3104855634) num_max = hyper_params.subsample if num_max < len(subset_idxs): subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False) subset_idxs = sorted(subset_idxs) # Take the current selection aid_pairs = ut.take(aid_pairs_, subset_idxs) if True: # NEW WAY config = hyper_params.vsone_assign # TODO: ensure annot probs like chips and features can be appropriately # set via qreq_ config or whatever matches = infr.exec_vsone_subset(aid_pairs, config=config) else: query_aids = ut.take_column(aid_pairs, 0) data_aids = ut.take_column(aid_pairs, 1) # OLD WAY # Determine a unique set of annots per config configured_aids = ut.ddict(set) configured_aids[qannot_cfg].update(query_aids) configured_aids[dannot_cfg].update(data_aids) # Make efficient annot-object representation configured_obj_annots = {} for config, aids in configured_aids.items(): annots = ibs.annots(sorted(list(aids)), config=config) configured_obj_annots[config] = annots annots1 = configured_obj_annots[qannot_cfg].loc(query_aids) annots2 = configured_obj_annots[dannot_cfg].loc(data_aids) # Get hash based on visual annotation appearence of each pair # as well as algorithm configurations used to compute those properties qvuuids = annots1.visual_uuids dvuuids = annots2.visual_uuids qcfgstr = annots1._config.get_cfgstr() dcfgstr = annots2._config.get_cfgstr() annots_cfgstr = ut.hashstr27(qcfgstr) + ut.hashstr27(dcfgstr) vsone_uuids = [ ut.combine_uuids(uuids, salt=annots_cfgstr) for uuids in ut.ProgIter( zip(qvuuids, dvuuids), length=len(qvuuids), label='hashing ids' ) ] # Combine into a big cache for the entire 1-v-1 matching run big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True) cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train') cached_data = cacher.tryload() if cached_data is not None: # Caching doesn't work 100% for PairwiseMatch object, so we need to do # some postprocessing configured_lazy_annots = ut.ddict(dict) for config, annots in configured_obj_annots.items(): annot_dict = configured_lazy_annots[config] for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'): annot_dict[_annot.aid] = _annot._make_lazy_dict() # Extract pairs of annot objects (with shared caches) lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids) lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids) # Create a set of PairwiseMatches with the correct annot properties matches = [ vt.PairwiseMatch(annot1, annot2) for annot1, annot2 in zip(lazy_annots1, lazy_annots2) ] # Updating a new matches dictionary ensure the annot1/annot2 properties # are set correctly for key, cached_matches in list(cached_data.items()): fixed_matches = [match.copy() for match in matches] for fixed, internal in zip(fixed_matches, cached_matches): dict_ = internal.__dict__ ut.delete_dict_keys(dict_, ['annot1', 'annot2']) fixed.__dict__.update(dict_) cached_data[key] = fixed_matches else: cached_data = vsone_( qreq_, query_aids, data_aids, qannot_cfg, dannot_cfg, configured_obj_annots, hyper_params, ) cacher.save(cached_data) # key_ = 'SV_LNBNN' key_ = 'RAT_SV' # for key in list(cached_data.keys()): # if key != 'SV_LNBNN': # del cached_data[key] matches = cached_data[key_] return matches, infr
def distinct_colors(N, brightness=0.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None): r""" Args: N (int): brightness (float): Returns: list: RGB_tuples CommandLine: python -m wbia.plottool.color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95 python -m wbia.plottool.color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95 python -m wbia.plottool.color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95 python -m wbia.plottool.color_funcs --test-distinct_colors --N 3 --show --no-randomize python -m wbia.plottool.color_funcs --test-distinct_colors --N 4 --show --no-randomize python -m wbia.plottool.color_funcs --test-distinct_colors --N 6 --show --no-randomize python -m wbia.plottool.color_funcs --test-distinct_colors --N 20 --show References: http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html CommandLine: python -m wbia.plottool.color_funcs --exec-distinct_colors --show python -m wbia.plottool.color_funcs --exec-distinct_colors --show --no-randomize --N 50 python -m wbia.plottool.color_funcs --exec-distinct_colors --show --cmap_seed=foobar Example: >>> # ENABLE_DOCTEST >>> from wbia.plottool.color_funcs import * # NOQA >>> # build test data >>> N = ut.get_argval('--N', int, 2) >>> randomize = not ut.get_argflag('--no-randomize') >>> brightness = 0.878 >>> # execute function >>> cmap_seed = ut.get_argval('--cmap_seed', str, default=None) >>> hue_range = ut.get_argval('--hue-range', list, default=(0.00, 1.0)) >>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed) >>> # verify results >>> assert len(RGB_tuples) == N >>> result = str(RGB_tuples) >>> print(result) >>> ut.quit_if_noshow() >>> color_list = RGB_tuples >>> testshow_colors(color_list) >>> import wbia.plottool as pt >>> pt.show_if_requested() """ # TODO: Add sin wave modulation to the sat and value # import wbia.plottool as pt if True: import wbia.plottool as pt # HACK for white figures remove_yellow = not pt.is_default_dark_bg() # if not pt.is_default_dark_bg(): # brightness = .8 use_jet = False if use_jet: import wbia.plottool as pt cmap = pt.plt.cm.jet RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N)))) elif cmap_seed is not None: # Randomized map based on a seed # cmap_ = 'Set1' # cmap_ = 'Dark2' choices = [ # 'Set1', 'Dark2', 'jet', # 'gist_rainbow', # 'rainbow', # 'gnuplot', # 'Accent' ] cmap_hack = ut.get_argval('--cmap-hack', type_=str, default=None) ncolor_hack = ut.get_argval('--ncolor-hack', type_=int, default=None) if cmap_hack is not None: choices = [cmap_hack] if ncolor_hack is not None: N = ncolor_hack N_ = N seed = sum(list(map(ord, ut.hashstr27(cmap_seed)))) rng = np.random.RandomState(seed + 48930) cmap_str = rng.choice(choices, 1)[0] # print('cmap_str = %r' % (cmap_str,)) cmap = pt.plt.cm.get_cmap(cmap_str) # ut.hashstr27(cmap_seed) # cmap_seed = 0 # pass jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N**2))) range_ = np.linspace(0, 1, N, endpoint=False) # print('range_ = %r' % (range_,)) range_ = range_ + jitter # print('range_ = %r' % (range_,)) while not (np.all(range_ >= 0) and np.all(range_ <= 1)): range_[range_ < 0] = np.abs(range_[range_ < 0]) range_[range_ > 1] = 2 - range_[range_ > 1] # print('range_ = %r' % (range_,)) shift = rng.rand() range_ = (range_ + shift) % 1 # print('jitter = %r' % (jitter,)) # print('shift = %r' % (shift,)) # print('range_ = %r' % (range_,)) if ncolor_hack is not None: range_ = range_[0:N_] RGB_tuples = list(map(tuple, cmap(range_))) else: sat = brightness val = brightness hmin, hmax = hue_range if remove_yellow: hue_skips = [(0.13, 0.24)] else: hue_skips = [] hue_skip_ranges = [_[1] - _[0] for _ in hue_skips] total_skip = sum(hue_skip_ranges) hmax_ = hmax - total_skip hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float) # Remove colors (like hard to see yellows) in specified ranges for skip, range_ in zip(hue_skips, hue_skip_ranges): hue_list = [ hue if hue <= skip[0] else hue + range_ for hue in hue_list ] HSV_tuples = [(hue, sat, val) for hue in hue_list] RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples] if randomize: ut.deterministic_shuffle(RGB_tuples) return RGB_tuples
def get_pipe_hashid(qreq_): pipe_hashstr = ut.hashstr27(qreq_.get_pipe_cfgstr()) return pipe_hashstr
def ensure_data(qreq_): """ >>> import wbia qreq_ = wbia.testdata_qreq_( defaultdb='Oxford', a='oxford', p='default:proot=smk,nAssign=1,num_words=64000,SV=False,can_match_sameimg=True,dim_size=None') """ logger.info('Ensure data for %s' % (qreq_, )) # qreq_.cachedir = ut.ensuredir((ibs.cachedir, 'smk')) qreq_.ensure_nids() def make_cacher(name, cfgstr=None): if cfgstr is None: cfgstr = ut.hashstr27(qreq_.get_cfgstr()) if False and ut.is_developer(): return ut.Cacher( fname=name + '_' + qreq_.ibs.get_dbname(), cfgstr=cfgstr, cache_dir=ut.ensuredir(ut.truepath('~/Desktop/smkcache')), ) else: wrp = ut.DynStruct() def ensure(func): return func() wrp.ensure = ensure return wrp import copy dconfig = copy.deepcopy(qreq_.qparams) qconfig = qreq_.qparams if qreq_.qparams['data_ma']: # Disable database-dise multi-assignment dconfig['nAssign'] = 1 wwm = qreq_.qparams['word_weight_method'] depc = qreq_.ibs.depc vocab_aids = qreq_.daids cheat = False if cheat: import wbia ut.cprint('CHEATING', 'red') vocab_aids = wbia.init.filter_annots.sample_annots_wrt_ref( qreq_.ibs, qreq_.daids, {'exclude_ref_contact': True}, qreq_.qaids, verbose=1, ) vocab_rowid = depc.get_rowids('vocab', (vocab_aids, ), config=dconfig, ensure=False)[0] assert vocab_rowid is not None depc = qreq_.ibs.depc dinva_pcfgstr = depc.stacked_config(None, 'inverted_agg_assign', config=dconfig).get_cfgstr() qinva_pcfgstr = depc.stacked_config(None, 'inverted_agg_assign', config=qconfig).get_cfgstr() dannot_vuuid = qreq_.ibs.get_annot_hashid_visual_uuid( qreq_.daids).strip('_') qannot_vuuid = qreq_.ibs.get_annot_hashid_visual_uuid( qreq_.qaids).strip('_') tannot_vuuid = dannot_vuuid dannot_suuid = qreq_.ibs.get_annot_hashid_semantic_uuid( qreq_.daids).strip('_') qannot_suuid = qreq_.ibs.get_annot_hashid_semantic_uuid( qreq_.qaids).strip('_') dinva_phashid = ut.hashstr27(dinva_pcfgstr + tannot_vuuid) qinva_phashid = ut.hashstr27(qinva_pcfgstr + tannot_vuuid) dinva_cfgstr = '_'.join([dannot_vuuid, dinva_phashid]) qinva_cfgstr = '_'.join([qannot_vuuid, qinva_phashid]) # vocab = inverted_index.new_load_vocab(ibs, qreq_.daids, config) dinva_cacher = make_cacher('inva', dinva_cfgstr) qinva_cacher = make_cacher('inva', qinva_cfgstr) dwwm_cacher = make_cacher('word_weight', wwm + dinva_cfgstr) gamma_phashid = ut.hashstr27(qreq_.get_pipe_cfgstr() + tannot_vuuid) dgamma_cfgstr = '_'.join([dannot_suuid, gamma_phashid]) qgamma_cfgstr = '_'.join([qannot_suuid, gamma_phashid]) dgamma_cacher = make_cacher('dgamma', cfgstr=dgamma_cfgstr) qgamma_cacher = make_cacher('qgamma', cfgstr=qgamma_cfgstr) dinva = dinva_cacher.ensure( lambda: inverted_index.InvertedAnnots.from_depc( depc, qreq_.daids, vocab_aids, dconfig)) qinva = qinva_cacher.ensure( lambda: inverted_index.InvertedAnnots.from_depc( depc, qreq_.qaids, vocab_aids, qconfig)) dinva.wx_to_aids = dinva.compute_inverted_list() wx_to_weight = dwwm_cacher.ensure( lambda: dinva.compute_word_weights(wwm)) dinva.wx_to_weight = wx_to_weight qinva.wx_to_weight = wx_to_weight thresh = qreq_.qparams['smk_thresh'] alpha = qreq_.qparams['smk_alpha'] dinva.gamma_list = dgamma_cacher.ensure( lambda: dinva.compute_gammas(alpha, thresh)) qinva.gamma_list = qgamma_cacher.ensure( lambda: qinva.compute_gammas(alpha, thresh)) qreq_.qinva = qinva qreq_.dinva = dinva logger.info('loading keypoints') if qreq_.qparams.sv_on: qreq_.data_kpts = qreq_.ibs.get_annot_kpts( qreq_.daids, config2_=qreq_.extern_data_config2) logger.info('building aid index') qreq_.daid_to_didx = ut.make_index_lookup(qreq_.daids)