def setup_incremental_test(ibs_gt, clear_names=True, aid_order='shuffle'): r""" CommandLine: python -m ibeis.algo.hots.automated_helpers --test-setup_incremental_test:0 python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va --index 0 4 8 --verbose Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('PZ_MTEST') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('GZ_ALL') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) """ print('\n\n---- SETUP INCREMENTAL TEST ---\n\n') # Take a known dataase # Create an empty database to test in ONLY_GT = True if ONLY_GT: # use only annotations that will have matches in test aid_list1_ = ibs_gt.get_aids_with_groundtruth() else: # use every annotation in test aid_list1_ = ibs_gt.get_valid_aids() if ut.get_argflag('--gzdev'): # Use a custom selection of gzall from ibeis.algo.hots import devcases assert ibs_gt.get_dbname() == 'GZ_ALL', 'not gzall' vuuid_list, ignore_vuuids = devcases.get_gzall_small_test() # TODO; include all names of these annots too aid_list = ibs_gt.get_annot_aids_from_visual_uuid(vuuid_list) ignore_aid_list = ibs_gt.get_annot_aids_from_visual_uuid(ignore_vuuids) ignore_nid_list = ibs_gt.get_annot_nids(ignore_aid_list) ut.assert_all_not_None(aid_list) other_aids = ut.flatten(ibs_gt.get_annot_groundtruth(aid_list)) aid_list.extend(other_aids) aid_list = sorted(set(aid_list)) nid_list = ibs_gt.get_annot_nids(aid_list) isinvalid_list = [nid in ignore_nid_list for nid in nid_list] print('Filtering %r annots specified to ignore' % (sum(isinvalid_list), )) aid_list = ut.filterfalse_items(aid_list, isinvalid_list) #ut.embed() aid_list1_ = aid_list #ut.embed() # Add aids in a random order VALID_ORDERS = ['shuffle', 'stagger', 'same'] #AID_ORDER = 'shuffle' aid_order = ut.get_argval('--aid-order', default=aid_order) assert VALID_ORDERS.index(aid_order) > -1 if aid_order == 'shuffle': aid_list1 = ut.deterministic_shuffle(aid_list1_[:]) elif aid_order == 'stagger': from six.moves import zip_longest, filter aid_groups, unique_nid_list = ibs_gt.group_annots_by_name(aid_list1_) def stagger_group(list_): return ut.filter_Nones(ut.iflatten(zip_longest(*list_))) aid_multiton_group = list( filter(lambda aids: len(aids) > 1, aid_groups)) aid_list1 = stagger_group(aid_multiton_group) pass elif aid_order == 'same': aid_list1 = aid_list1_ # If reset is true the test database is started completely from scratch reset = ut.get_argflag('--reset') aid1_to_aid2 = {} # annotation mapping ibs2 = make_incremental_test_database(ibs_gt, aid_list1, reset) # Preadd all annotatinos to the test database aids_chunk1 = aid_list1 aid_list2 = add_annot_chunk(ibs_gt, ibs2, aids_chunk1, aid1_to_aid2) #ut.embed() # Assert annotation visual uuids are in agreement if ut.DEBUG2: annot_testdb_consistency_checks(ibs_gt, ibs2, aid_list1, aid_list2) # Remove names and exemplar information from test database if clear_names: ensure_testdb_clean_data(ibs_gt, ibs2, aid_list1, aid_list2) # Preprocess features before testing ibs2.ensure_annotation_data(aid_list2, featweights=True) return ibs2, aid_list1, aid1_to_aid2
def setup_incremental_test(ibs_gt, clear_names=True, aid_order='shuffle'): r""" CommandLine: python -m ibeis.algo.hots.automated_helpers --test-setup_incremental_test:0 python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va python dev.py -t custom --cfg codename:vsone_unnorm --db PZ_MTEST --allgt --vf --va --index 0 4 8 --verbose Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('PZ_MTEST') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.automated_helpers import * # NOQA >>> import ibeis # NOQA >>> ibs_gt = ibeis.opendb('GZ_ALL') >>> ibs2, aid_list1, aid1_to_aid2 = setup_incremental_test(ibs_gt) """ print('\n\n---- SETUP INCREMENTAL TEST ---\n\n') # Take a known dataase # Create an empty database to test in ONLY_GT = True if ONLY_GT: # use only annotations that will have matches in test aid_list1_ = ibs_gt.get_aids_with_groundtruth() else: # use every annotation in test aid_list1_ = ibs_gt.get_valid_aids() if ut.get_argflag('--gzdev'): # Use a custom selection of gzall from ibeis.algo.hots import devcases assert ibs_gt.get_dbname() == 'GZ_ALL', 'not gzall' vuuid_list, ignore_vuuids = devcases.get_gzall_small_test() # TODO; include all names of these annots too aid_list = ibs_gt.get_annot_aids_from_visual_uuid(vuuid_list) ignore_aid_list = ibs_gt.get_annot_aids_from_visual_uuid(ignore_vuuids) ignore_nid_list = ibs_gt.get_annot_nids(ignore_aid_list) ut.assert_all_not_None(aid_list) other_aids = ut.flatten(ibs_gt.get_annot_groundtruth(aid_list)) aid_list.extend(other_aids) aid_list = sorted(set(aid_list)) nid_list = ibs_gt.get_annot_nids(aid_list) isinvalid_list = [nid in ignore_nid_list for nid in nid_list] print('Filtering %r annots specified to ignore' % (sum(isinvalid_list),)) aid_list = ut.filterfalse_items(aid_list, isinvalid_list) #ut.embed() aid_list1_ = aid_list #ut.embed() # Add aids in a random order VALID_ORDERS = ['shuffle', 'stagger', 'same'] #AID_ORDER = 'shuffle' aid_order = ut.get_argval('--aid-order', default=aid_order) assert VALID_ORDERS.index(aid_order) > -1 if aid_order == 'shuffle': aid_list1 = ut.deterministic_shuffle(aid_list1_[:]) elif aid_order == 'stagger': from six.moves import zip_longest, filter aid_groups, unique_nid_list = ibs_gt.group_annots_by_name(aid_list1_) def stagger_group(list_): return ut.filter_Nones(ut.iflatten(zip_longest(*list_))) aid_multiton_group = list(filter(lambda aids: len(aids) > 1, aid_groups)) aid_list1 = stagger_group(aid_multiton_group) pass elif aid_order == 'same': aid_list1 = aid_list1_ # If reset is true the test database is started completely from scratch reset = ut.get_argflag('--reset') aid1_to_aid2 = {} # annotation mapping ibs2 = make_incremental_test_database(ibs_gt, aid_list1, reset) # Preadd all annotatinos to the test database aids_chunk1 = aid_list1 aid_list2 = add_annot_chunk(ibs_gt, ibs2, aids_chunk1, aid1_to_aid2) #ut.embed() # Assert annotation visual uuids are in agreement if ut.DEBUG2: annot_testdb_consistency_checks(ibs_gt, ibs2, aid_list1, aid_list2) # Remove names and exemplar information from test database if clear_names: ensure_testdb_clean_data(ibs_gt, ibs2, aid_list1, aid_list2) # Preprocess features before testing ibs2.ensure_annotation_data(aid_list2, featweights=True) return ibs2, aid_list1, aid1_to_aid2
def augment_nnindexer_experiment(): """ References: http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/ CommandLine: utprof.py -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show # RUNS THE SEGFAULTING CASE python -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show # Debug it gdb python run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show gdb python run -m wbia.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots._neighbor_experiment import * # NOQA >>> # execute function >>> augment_nnindexer_experiment() >>> # verify results >>> ut.show_if_requested() """ import wbia # build test data # ibs = wbia.opendb('PZ_MTEST') ibs = wbia.opendb(defaultdb='PZ_Master0') if ibs.get_dbname() == 'PZ_MTEST': initial = 1 addition_stride = 4 max_ceiling = 100 elif ibs.get_dbname() == 'PZ_Master0': initial = 128 # addition_stride = 64 # addition_stride = 128 addition_stride = 256 max_ceiling = 10000 # max_ceiling = 4000 # max_ceiling = 2000 # max_ceiling = 600 else: assert False all_daids = ibs.get_valid_aids(species='zebra_plains') qreq_ = ibs.new_query_request(all_daids, all_daids) max_num = min(max_ceiling, len(all_daids)) # Clear Caches ibs.delete_flann_cachedir() neighbor_index_cache.clear_memcache() neighbor_index_cache.clear_uuid_cache(qreq_) # Setup all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:]) # ensure all features are computed nnindexer_list = [] addition_lbl = 'Addition' _addition_iter = list(range(initial + 1, max_num, addition_stride)) addition_iter = iter( ut.ProgressIter(_addition_iter, lbl=addition_lbl, freq=1, autoadjust=False)) time_list_addition = [] # time_list_reindex = [] addition_count_list = [] tmp_cfgstr_list = [] # for _ in range(80): # next(addition_iter) try: memtrack = ut.MemoryTracker(disable=False) for count in addition_iter: aid_list_ = all_randomize_daids_[0:count] # Request an indexer which could be an augmented version of an existing indexer. with ut.Timer(verbose=False) as t: memtrack.report('BEFORE AUGMENT') nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer( qreq_, aid_list_) memtrack.report('AFTER AUGMENT') nnindexer_list.append(nnindexer_) addition_count_list.append(count) time_list_addition.append(t.ellapsed) tmp_cfgstr_list.append(nnindexer_.cfgstr) logger.info('===============\n\n') logger.info(ut.repr2(time_list_addition)) logger.info(ut.repr2(list(map(id, nnindexer_list)))) logger.info(ut.repr2(tmp_cfgstr_list)) logger.info( ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list]))) IS_SMALL = False if IS_SMALL: nnindexer_list = [] reindex_label = 'Reindex' # go backwards for reindex _reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1] reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label) time_list_reindex = [] # time_list_reindex = [] reindex_count_list = [] for count in reindex_iter: logger.info('\n+===PREDONE====================\n') # check only a single size for memory leaks # count = max_num // 16 + ((x % 6) * 1) # x += 1 aid_list_ = all_randomize_daids_[0:count] # Call the same code, but force rebuilds memtrack.report('BEFORE REINDEX') with ut.Timer(verbose=False) as t: nnindexer_ = neighbor_index_cache.request_augmented_wbia_nnindexer( qreq_, aid_list_, force_rebuild=True, memtrack=memtrack) memtrack.report('AFTER REINDEX') ibs.print_cachestats_str() logger.info('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % (ut.get_object_size_str( neighbor_index_cache.NEIGHBOR_CACHE.items()), )) logger.info('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' % (len(neighbor_index_cache.NEIGHBOR_CACHE.items()), )) logger.info('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' % (ut.get_object_size_str( neighbor_index_cache.UUID_MAP_CACHE), )) logger.info('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_)) memtrack.report_type(neighbor_index_cache.NeighborIndex) ut.print_object_size_tree(nnindexer_, lbl='nnindexer_') if IS_SMALL: nnindexer_list.append(nnindexer_) reindex_count_list.append(count) time_list_reindex.append(t.ellapsed) # import cv2 # import matplotlib as mpl # logger.info(mem_top.mem_top(limit=30, width=120, # #exclude_refs=[cv2.__dict__, mpl.__dict__] # )) logger.info('L___________________\n\n\n') logger.info(ut.repr2(time_list_reindex)) if IS_SMALL: logger.info(ut.repr2(list(map(id, nnindexer_list)))) logger.info( ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list]))) except KeyboardInterrupt: logger.info('\n[train] Caught CRTL+C') resolution = '' from six.moves import input while not (resolution.isdigit()): logger.info('\n[train] What do you want to do?') logger.info('[train] 0 - Continue') logger.info('[train] 1 - Embed') logger.info('[train] ELSE - Stop network training') resolution = input('[train] Resolution: ') resolution = int(resolution) # We have a resolution if resolution == 0: logger.info('resuming training...') elif resolution == 1: ut.embed() import wbia.plottool as pt next_fnum = iter(range(0, 1)).next # python3 PY3 pt.figure(fnum=next_fnum()) if len(addition_count_list) > 0: pt.plot2( addition_count_list, time_list_addition, marker='-o', equal_aspect=False, x_label='num_annotations', label=addition_lbl + ' Time', ) if len(reindex_count_list) > 0: pt.plot2( reindex_count_list, time_list_reindex, marker='-o', equal_aspect=False, x_label='num_annotations', label=reindex_label + ' Time', ) pt.set_figtitle('Augmented indexer experiment') pt.legend()
def distinct_colors(N, brightness=.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None): r""" Args: N (int): brightness (float): Returns: list: RGB_tuples CommandLine: python -m plottool.color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95 python -m plottool.color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95 python -m plottool.color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95 python -m plottool.color_funcs --test-distinct_colors --N 3 --show --no-randomize python -m plottool.color_funcs --test-distinct_colors --N 4 --show --no-randomize python -m plottool.color_funcs --test-distinct_colors --N 20 --show References: http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html CommandLine: python -m plottool.color_funcs --exec-distinct_colors --show python -m plottool.color_funcs --exec-distinct_colors --show --no-randomize --N 50 python -m plottool.color_funcs --exec-distinct_colors --show --cmap_seed=foobar Example: >>> # ENABLE_DOCTEST >>> from plottool.color_funcs import * # NOQA >>> # build test data >>> N = ut.get_argval('--N', int, 2) >>> randomize = not ut.get_argflag('--no-randomize') >>> brightness = 0.878 >>> # execute function >>> cmap_seed = ut.get_argval('--cmap_seed', str, default=None) >>> hue_range = ut.get_argval('--hue-range', list, default=(0.00, 1.0)) >>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed) >>> # verify results >>> assert len(RGB_tuples) == N >>> result = str(RGB_tuples) >>> print(result) >>> ut.quit_if_noshow() >>> color_list = RGB_tuples >>> testshow_colors(color_list) >>> ut.show_if_requested() """ # TODO: Add sin wave modulation to the sat and value #import plottool as pt if True: import plottool as pt # HACK for white figures remove_yellow = not pt.is_default_dark_bg() #if not pt.is_default_dark_bg(): # brightness = .8 use_jet = False if use_jet: import plottool as pt cmap = pt.plt.cm.jet RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N)))) elif cmap_seed is not None: # Randomized map based on a seed #cmap_ = 'Set1' #cmap_ = 'Dark2' choices = [ #'Set1', 'Dark2', 'jet', #'gist_rainbow', #'rainbow', #'gnuplot', #'Accent' ] cmap_hack = ut.get_argval('--cmap-hack', type_=str, default=None) ncolor_hack = ut.get_argval('--ncolor-hack', type_=int, default=None) if cmap_hack is not None: choices = [cmap_hack] if ncolor_hack is not None: N = ncolor_hack N_ = N seed = sum(list(map(ord, ut.hashstr27(cmap_seed)))) rng = np.random.RandomState(seed + 48930) cmap_str = rng.choice(choices, 1)[0] #print('cmap_str = %r' % (cmap_str,)) cmap = pt.plt.cm.get_cmap(cmap_str) #ut.hashstr27(cmap_seed) #cmap_seed = 0 #pass jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N ** 2))) range_ = np.linspace(0, 1, N, endpoint=False) #print('range_ = %r' % (range_,)) range_ = range_ + jitter #print('range_ = %r' % (range_,)) while not (np.all(range_ >= 0) and np.all(range_ <= 1)): range_[range_ < 0] = np.abs(range_[range_ < 0] ) range_[range_ > 1] = 2 - range_[range_ > 1] #print('range_ = %r' % (range_,)) shift = rng.rand() range_ = (range_ + shift) % 1 #print('jitter = %r' % (jitter,)) #print('shift = %r' % (shift,)) #print('range_ = %r' % (range_,)) if ncolor_hack is not None: range_ = range_[0:N_] RGB_tuples = list(map(tuple, cmap(range_))) else: sat = brightness val = brightness hmin, hmax = hue_range if remove_yellow: hue_skips = [(.13, .24)] else: hue_skips = [] hue_skip_ranges = [_[1] - _[0] for _ in hue_skips] total_skip = sum(hue_skip_ranges) hmax_ = hmax - total_skip hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float) # Remove colors (like hard to see yellows) in specified ranges for skip, range_ in zip(hue_skips, hue_skip_ranges): hue_list = [hue if hue <= skip[0] else hue + range_ for hue in hue_list] HSV_tuples = [(hue, sat, val) for hue in hue_list] RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples] if randomize: ut.deterministic_shuffle(RGB_tuples) return RGB_tuples
def augment_nnindexer_experiment(): """ References: http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/ CommandLine: utprof.py -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show # RUNS THE SEGFAULTING CASE python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show # Debug it gdb python run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show gdb python run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots._neighbor_experiment import * # NOQA >>> # execute function >>> augment_nnindexer_experiment() >>> # verify results >>> ut.show_if_requested() """ import ibeis # build test data #ibs = ibeis.opendb('PZ_MTEST') ibs = ibeis.opendb(defaultdb='PZ_Master0') if ibs.get_dbname() == 'PZ_MTEST': initial = 1 addition_stride = 4 max_ceiling = 100 elif ibs.get_dbname() == 'PZ_Master0': initial = 128 #addition_stride = 64 #addition_stride = 128 addition_stride = 256 max_ceiling = 10000 #max_ceiling = 4000 #max_ceiling = 2000 #max_ceiling = 600 else: assert False all_daids = ibs.get_valid_aids(species='zebra_plains') qreq_ = ibs.new_query_request(all_daids, all_daids) max_num = min(max_ceiling, len(all_daids)) # Clear Caches ibs.delete_flann_cachedir() neighbor_index_cache.clear_memcache() neighbor_index_cache.clear_uuid_cache(qreq_) # Setup all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:]) # ensure all features are computed #ibs.get_annot_vecs(all_randomize_daids_, ensure=True) #ibs.get_annot_fgweights(all_randomize_daids_, ensure=True) nnindexer_list = [] addition_lbl = 'Addition' _addition_iter = list(range(initial + 1, max_num, addition_stride)) addition_iter = iter(ut.ProgressIter(_addition_iter, lbl=addition_lbl, freq=1, autoadjust=False)) time_list_addition = [] #time_list_reindex = [] addition_count_list = [] tmp_cfgstr_list = [] #for _ in range(80): # next(addition_iter) try: memtrack = ut.MemoryTracker(disable=False) for count in addition_iter: aid_list_ = all_randomize_daids_[0:count] # Request an indexer which could be an augmented version of an existing indexer. with ut.Timer(verbose=False) as t: memtrack.report('BEFORE AUGMENT') nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(qreq_, aid_list_) memtrack.report('AFTER AUGMENT') nnindexer_list.append(nnindexer_) addition_count_list.append(count) time_list_addition.append(t.ellapsed) tmp_cfgstr_list.append(nnindexer_.cfgstr) print('===============\n\n') print(ut.list_str(time_list_addition)) print(ut.list_str(list(map(id, nnindexer_list)))) print(ut.list_str(tmp_cfgstr_list)) print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list]))) IS_SMALL = False if IS_SMALL: nnindexer_list = [] reindex_label = 'Reindex' # go backwards for reindex _reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1] reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label) time_list_reindex = [] #time_list_reindex = [] reindex_count_list = [] for count in reindex_iter: print('\n+===PREDONE====================\n') # check only a single size for memory leaks #count = max_num // 16 + ((x % 6) * 1) #x += 1 aid_list_ = all_randomize_daids_[0:count] # Call the same code, but force rebuilds memtrack.report('BEFORE REINDEX') with ut.Timer(verbose=False) as t: nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer( qreq_, aid_list_, force_rebuild=True, memtrack=memtrack) memtrack.report('AFTER REINDEX') ibs.print_cachestats_str() print('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % ( ut.get_object_size_str(neighbor_index_cache.NEIGHBOR_CACHE.items()),)) print('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' % ( len(neighbor_index_cache.NEIGHBOR_CACHE.items()),)) print('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' % ( ut.get_object_size_str(neighbor_index_cache.UUID_MAP_CACHE),)) print('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_)) memtrack.report_type(neighbor_index_cache.NeighborIndex) ut.print_object_size_tree(nnindexer_, lbl='nnindexer_') if IS_SMALL: nnindexer_list.append(nnindexer_) reindex_count_list.append(count) time_list_reindex.append(t.ellapsed) #import cv2 #import matplotlib as mpl #print(mem_top.mem_top(limit=30, width=120, # #exclude_refs=[cv2.__dict__, mpl.__dict__] # )) print('L___________________\n\n\n') print(ut.list_str(time_list_reindex)) if IS_SMALL: print(ut.list_str(list(map(id, nnindexer_list)))) print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list]))) except KeyboardInterrupt: print('\n[train] Caught CRTL+C') resolution = '' from six.moves import input while not (resolution.isdigit()): print('\n[train] What do you want to do?') print('[train] 0 - Continue') print('[train] 1 - Embed') print('[train] ELSE - Stop network training') resolution = input('[train] Resolution: ') resolution = int(resolution) # We have a resolution if resolution == 0: print('resuming training...') elif resolution == 1: ut.embed() import plottool as pt next_fnum = iter(range(0, 1)).next # python3 PY3 pt.figure(fnum=next_fnum()) if len(addition_count_list) > 0: pt.plot2(addition_count_list, time_list_addition, marker='-o', equal_aspect=False, x_label='num_annotations', label=addition_lbl + ' Time') if len(reindex_count_list) > 0: pt.plot2(reindex_count_list, time_list_reindex, marker='-o', equal_aspect=False, x_label='num_annotations', label=reindex_label + ' Time') pt.set_figtitle('Augmented indexer experiment') pt.legend()
def distinct_colors(N, brightness=0.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None): r""" Args: N (int): brightness (float): Returns: list: RGB_tuples CommandLine: python -m wbia.plottool.color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95 python -m wbia.plottool.color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95 python -m wbia.plottool.color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95 python -m wbia.plottool.color_funcs --test-distinct_colors --N 3 --show --no-randomize python -m wbia.plottool.color_funcs --test-distinct_colors --N 4 --show --no-randomize python -m wbia.plottool.color_funcs --test-distinct_colors --N 6 --show --no-randomize python -m wbia.plottool.color_funcs --test-distinct_colors --N 20 --show References: http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html CommandLine: python -m wbia.plottool.color_funcs --exec-distinct_colors --show python -m wbia.plottool.color_funcs --exec-distinct_colors --show --no-randomize --N 50 python -m wbia.plottool.color_funcs --exec-distinct_colors --show --cmap_seed=foobar Example: >>> # ENABLE_DOCTEST >>> from wbia.plottool.color_funcs import * # NOQA >>> # build test data >>> N = ut.get_argval('--N', int, 2) >>> randomize = not ut.get_argflag('--no-randomize') >>> brightness = 0.878 >>> # execute function >>> cmap_seed = ut.get_argval('--cmap_seed', str, default=None) >>> hue_range = ut.get_argval('--hue-range', list, default=(0.00, 1.0)) >>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed) >>> # verify results >>> assert len(RGB_tuples) == N >>> result = str(RGB_tuples) >>> print(result) >>> ut.quit_if_noshow() >>> color_list = RGB_tuples >>> testshow_colors(color_list) >>> import wbia.plottool as pt >>> pt.show_if_requested() """ # TODO: Add sin wave modulation to the sat and value # import wbia.plottool as pt if True: import wbia.plottool as pt # HACK for white figures remove_yellow = not pt.is_default_dark_bg() # if not pt.is_default_dark_bg(): # brightness = .8 use_jet = False if use_jet: import wbia.plottool as pt cmap = pt.plt.cm.jet RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N)))) elif cmap_seed is not None: # Randomized map based on a seed # cmap_ = 'Set1' # cmap_ = 'Dark2' choices = [ # 'Set1', 'Dark2', 'jet', # 'gist_rainbow', # 'rainbow', # 'gnuplot', # 'Accent' ] cmap_hack = ut.get_argval('--cmap-hack', type_=str, default=None) ncolor_hack = ut.get_argval('--ncolor-hack', type_=int, default=None) if cmap_hack is not None: choices = [cmap_hack] if ncolor_hack is not None: N = ncolor_hack N_ = N seed = sum(list(map(ord, ut.hashstr27(cmap_seed)))) rng = np.random.RandomState(seed + 48930) cmap_str = rng.choice(choices, 1)[0] # print('cmap_str = %r' % (cmap_str,)) cmap = pt.plt.cm.get_cmap(cmap_str) # ut.hashstr27(cmap_seed) # cmap_seed = 0 # pass jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N**2))) range_ = np.linspace(0, 1, N, endpoint=False) # print('range_ = %r' % (range_,)) range_ = range_ + jitter # print('range_ = %r' % (range_,)) while not (np.all(range_ >= 0) and np.all(range_ <= 1)): range_[range_ < 0] = np.abs(range_[range_ < 0]) range_[range_ > 1] = 2 - range_[range_ > 1] # print('range_ = %r' % (range_,)) shift = rng.rand() range_ = (range_ + shift) % 1 # print('jitter = %r' % (jitter,)) # print('shift = %r' % (shift,)) # print('range_ = %r' % (range_,)) if ncolor_hack is not None: range_ = range_[0:N_] RGB_tuples = list(map(tuple, cmap(range_))) else: sat = brightness val = brightness hmin, hmax = hue_range if remove_yellow: hue_skips = [(0.13, 0.24)] else: hue_skips = [] hue_skip_ranges = [_[1] - _[0] for _ in hue_skips] total_skip = sum(hue_skip_ranges) hmax_ = hmax - total_skip hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float) # Remove colors (like hard to see yellows) in specified ranges for skip, range_ in zip(hue_skips, hue_skip_ranges): hue_list = [ hue if hue <= skip[0] else hue + range_ for hue in hue_list ] HSV_tuples = [(hue, sat, val) for hue in hue_list] RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples] if randomize: ut.deterministic_shuffle(RGB_tuples) return RGB_tuples