def show_power_law_plots(): """ CommandLine: python -m ibeis.algo.hots.devcases --test-show_power_law_plots --show Example: >>> # DISABLE_DOCTEST >>> #%pylab qt4 >>> from ibeis.all_imports import * # NOQA >>> from ibeis.algo.hots.devcases import * # NOQA >>> show_power_law_plots() >>> pt.show_if_requested() """ import numpy as np import plottool as pt xdata = np.linspace(0, 1, 1000) ydata = xdata fnum = 1 powers = [.01, .1, .5, 1, 2, 30, 70, 100, 1000] nRows, nCols = pt.get_square_row_cols(len(powers), fix=True) pnum_next = pt.make_pnum_nextgen(nRows, nCols) for p in powers: plotkw = dict(fnum=fnum, marker='g-', linewidth=2, pnum=pnum_next(), title='p=%r' % (p, )) ydata_ = ydata**p pt.plot2(xdata, ydata_, **plotkw) pt.set_figtitle('power laws y = x ** p')
def subindexer_time_experiment(): """ builds plot of number of annotations vs indexer build time. TODO: time experiment """ import ibeis import utool as ut import pyflann import plottool as pt ibs = ibeis.opendb(db='PZ_Master0') daid_list = ibs.get_valid_aids() count_list = [] time_list = [] flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params() for count in ut.ProgressIter(range(1, 301)): daids_ = daid_list[:] np.random.shuffle(daids_) daids = daids_[0:count] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann = pyflann.FLANN() flann.build_index(vecs, **flann_params) count_list.append(count) time_list.append(t.ellapsed) count_arr = np.array(count_list) time_arr = np.array(time_list) pt.plot2(count_arr, time_arr, marker='-', equal_aspect=False, x_label='num_annotations', y_label='FLANN build time')
def draw_precision_recall_curve(recall_domain, p_interp, title_pref=None, fnum=1, pnum=None, color=None): import plottool as pt if color is None: color = (0.4, 1.0, 0.4) if pt.is_default_dark_bg() else (0.1, 0.4, 0.4) if recall_domain is None: recall_domain = np.array([]) p_interp = np.array([]) if recall_domain is None: ave_p = -1.0 # np.nan else: ave_p = p_interp.sum() / p_interp.size pt.plot2(recall_domain, p_interp, marker='o--', x_label='recall', y_label='precision', unitbox=True, flipx=False, color=color, fnum=fnum, pnum=pnum, title='Interplated Precision Vs Recall\n' + 'avep = %.3f' % ave_p)
def draw_precision_recall_curve(recall_domain, p_interp, title_pref=None, fnum=1, pnum=None, color=None): import plottool as pt if color is None: color = (0.4, 1.0, 0.4) if pt.is_default_dark_bg() else (0.1, 0.4, 0.4) if recall_domain is None: recall_domain = np.array([]) p_interp = np.array([]) if recall_domain is None: ave_p = -1.0 # np.nan else: ave_p = p_interp.sum() / p_interp.size pt.plot2( recall_domain, p_interp, marker="o--", x_label="recall", y_label="precision", unitbox=True, flipx=False, color=color, fnum=fnum, pnum=pnum, title="Interplated Precision Vs Recall\n" + "avep = %.3f" % ave_p, )
def show_power_law_plots(): """ CommandLine: python -m ibeis.algo.hots.devcases --test-show_power_law_plots --show Example: >>> # DISABLE_DOCTEST >>> #%pylab qt4 >>> from ibeis.all_imports import * # NOQA >>> from ibeis.algo.hots.devcases import * # NOQA >>> show_power_law_plots() >>> pt.show_if_requested() """ import numpy as np import plottool as pt xdata = np.linspace(0, 1, 1000) ydata = xdata fnum = 1 powers = [.01, .1, .5, 1, 2, 30, 70, 100, 1000] nRows, nCols = pt.get_square_row_cols(len(powers), fix=True) pnum_next = pt.make_pnum_nextgen(nRows, nCols) for p in powers: plotkw = dict( fnum=fnum, marker='g-', linewidth=2, pnum=pnum_next(), title='p=%r' % (p,) ) ydata_ = ydata ** p pt.plot2(xdata, ydata_, **plotkw) pt.set_figtitle('power laws y = x ** p')
def plotdata(data_list): count_arr = ut.get_list_column(data_list, 1) time_arr = ut.get_list_column(data_list, 2) pt.plot2(count_arr, time_arr, marker='-o', equal_aspect=False, x_label='num_vectors', y_label='FLANN build time')
def _plotpts(data, px, color=pt.BLUE, label='', marker='.', **kwargs): #pt.figure(9003, docla=True, pnum=(1, 1, px)) pt.plot2(data.T[0], data.T[1], marker, '', color=color, label=label, **kwargs)
def draw_precision_recall_curve_(recall_range_, p_interp_curve, title_pref=None, fnum=1): import plottool as pt if recall_range_ is None: recall_range_ = np.array([]) p_interp_curve = np.array([]) fig = pt.figure(fnum=fnum, docla=True, doclf=True) # NOQA if recall_range_ is None: ave_p = np.nan else: ave_p = p_interp_curve.sum() / p_interp_curve.size pt.plot2(recall_range_, p_interp_curve, marker='o--', x_label='recall', y_label='precision', unitbox=True, flipx=False, color='r', title='Interplated Precision Vs Recall\n' + 'avep = %r' % ave_p) print('Interplated Precision') print(ut.repr2(list(zip(recall_range_, p_interp_curve))))
def draw_precision_recall_curve_(recall_range_, p_interp_curve, title_pref=None, fnum=1): import plottool as pt if recall_range_ is None: recall_range_ = np.array([]) p_interp_curve = np.array([]) fig = pt.figure(fnum=fnum, docla=True, doclf=True) # NOQA if recall_range_ is None: ave_p = np.nan else: ave_p = p_interp_curve.sum() / p_interp_curve.size pt.plot2(recall_range_, p_interp_curve, marker='o--', x_label='recall', y_label='precision', unitbox=True, flipx=False, color='r', title='Interplated Precision Vs Recall\n' + 'avep = %r' % ave_p) print('Interplated Precision') print(ut.list_str(list(zip(recall_range_, p_interp_curve))))
def flann_add_time_experiment(): """ builds plot of number of annotations vs indexer build time. TODO: time experiment CommandLine: python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show utprof.py -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots._neighbor_experiment import * # NOQA >>> import ibeis >>> #ibs = ibeis.opendb('PZ_MTEST') >>> result = flann_add_time_experiment() >>> # verify results >>> print(result) >>> ut.show_if_requested() """ import ibeis import utool as ut import numpy as np import plottool as pt def make_flann_index(vecs, flann_params): flann = pyflann.FLANN() flann.build_index(vecs, **flann_params) return flann db = ut.get_argval('--db') ibs = ibeis.opendb(db=db) # Input if ibs.get_dbname() == 'PZ_MTEST': initial = 1 reindex_stride = 16 addition_stride = 4 max_ceiling = 120 elif ibs.get_dbname() == 'PZ_Master0': #ibs = ibeis.opendb(db='GZ_ALL') initial = 32 reindex_stride = 32 addition_stride = 16 max_ceiling = 300001 else: assert False #max_ceiling = 32 all_daids = ibs.get_valid_aids() max_num = min(max_ceiling, len(all_daids)) flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params() # Output count_list, time_list_reindex = [], [] count_list2, time_list_addition = [], [] # Setup #all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:]) all_randomize_daids_ = all_daids # ensure all features are computed ibs.get_annot_vecs(all_randomize_daids_) def reindex_step(count, count_list, time_list_reindex): daids = all_randomize_daids_[0:count] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann = make_flann_index(vecs, flann_params) # NOQA count_list.append(count) time_list_reindex.append(t.ellapsed) def addition_step(count, flann, count_list2, time_list_addition): daids = all_randomize_daids_[count:count + 1] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann.add_points(vecs) count_list2.append(count) time_list_addition.append(t.ellapsed) def make_initial_index(initial): daids = all_randomize_daids_[0:initial + 1] vecs = np.vstack(ibs.get_annot_vecs(daids)) flann = make_flann_index(vecs, flann_params) return flann WITH_REINDEX = not ut.get_argflag('--no-with-reindex') if WITH_REINDEX: # Reindex Part reindex_lbl = 'Reindexing' _reindex_iter = range(1, max_num, reindex_stride) reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1) for count in reindex_iter: reindex_step(count, count_list, time_list_reindex) # Add Part flann = make_initial_index(initial) addition_lbl = 'Addition' _addition_iter = range(initial + 1, max_num, addition_stride) addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl) for count in addition_iter: addition_step(count, flann, count_list2, time_list_addition) print('---') print('Reindex took time_list_reindex %.2s seconds' % sum(time_list_reindex)) print('Addition took time_list_reindex %.2s seconds' % sum(time_list_addition)) print('---') statskw = dict(precision=2, newlines=True) print('Reindex stats ' + ut.get_stats_str(time_list_reindex, **statskw)) print('Addition stats ' + ut.get_stats_str(time_list_addition, **statskw)) print('Plotting') #with pt.FigureContext: next_fnum = iter(range(0, 2)).next # python3 PY3 pt.figure(fnum=next_fnum()) if WITH_REINDEX: pt.plot2(count_list, time_list_reindex, marker='-o', equal_aspect=False, x_label='num_annotations', label=reindex_lbl + ' Time', dark=False) #pt.figure(fnum=next_fnum()) pt.plot2(count_list2, time_list_addition, marker='-o', equal_aspect=False, x_label='num_annotations', label=addition_lbl + ' Time') pt pt.legend()
def augment_nnindexer_experiment(): """ References: http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/ CommandLine: utprof.py -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show # RUNS THE SEGFAULTING CASE python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show # Debug it gdb python run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show gdb python run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots._neighbor_experiment import * # NOQA >>> # execute function >>> augment_nnindexer_experiment() >>> # verify results >>> ut.show_if_requested() """ import ibeis # build test data #ibs = ibeis.opendb('PZ_MTEST') ibs = ibeis.opendb(defaultdb='PZ_Master0') if ibs.get_dbname() == 'PZ_MTEST': initial = 1 addition_stride = 4 max_ceiling = 100 elif ibs.get_dbname() == 'PZ_Master0': initial = 128 #addition_stride = 64 #addition_stride = 128 addition_stride = 256 max_ceiling = 10000 #max_ceiling = 4000 #max_ceiling = 2000 #max_ceiling = 600 else: assert False all_daids = ibs.get_valid_aids(species='zebra_plains') qreq_ = ibs.new_query_request(all_daids, all_daids) max_num = min(max_ceiling, len(all_daids)) # Clear Caches ibs.delete_flann_cachedir() neighbor_index_cache.clear_memcache() neighbor_index_cache.clear_uuid_cache(qreq_) # Setup all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:]) # ensure all features are computed #ibs.get_annot_vecs(all_randomize_daids_, ensure=True) #ibs.get_annot_fgweights(all_randomize_daids_, ensure=True) nnindexer_list = [] addition_lbl = 'Addition' _addition_iter = list(range(initial + 1, max_num, addition_stride)) addition_iter = iter(ut.ProgressIter(_addition_iter, lbl=addition_lbl, freq=1, autoadjust=False)) time_list_addition = [] #time_list_reindex = [] addition_count_list = [] tmp_cfgstr_list = [] #for _ in range(80): # next(addition_iter) try: memtrack = ut.MemoryTracker(disable=False) for count in addition_iter: aid_list_ = all_randomize_daids_[0:count] # Request an indexer which could be an augmented version of an existing indexer. with ut.Timer(verbose=False) as t: memtrack.report('BEFORE AUGMENT') nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(qreq_, aid_list_) memtrack.report('AFTER AUGMENT') nnindexer_list.append(nnindexer_) addition_count_list.append(count) time_list_addition.append(t.ellapsed) tmp_cfgstr_list.append(nnindexer_.cfgstr) print('===============\n\n') print(ut.list_str(time_list_addition)) print(ut.list_str(list(map(id, nnindexer_list)))) print(ut.list_str(tmp_cfgstr_list)) print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list]))) IS_SMALL = False if IS_SMALL: nnindexer_list = [] reindex_label = 'Reindex' # go backwards for reindex _reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1] reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label) time_list_reindex = [] #time_list_reindex = [] reindex_count_list = [] for count in reindex_iter: print('\n+===PREDONE====================\n') # check only a single size for memory leaks #count = max_num // 16 + ((x % 6) * 1) #x += 1 aid_list_ = all_randomize_daids_[0:count] # Call the same code, but force rebuilds memtrack.report('BEFORE REINDEX') with ut.Timer(verbose=False) as t: nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer( qreq_, aid_list_, force_rebuild=True, memtrack=memtrack) memtrack.report('AFTER REINDEX') ibs.print_cachestats_str() print('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % ( ut.get_object_size_str(neighbor_index_cache.NEIGHBOR_CACHE.items()),)) print('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' % ( len(neighbor_index_cache.NEIGHBOR_CACHE.items()),)) print('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' % ( ut.get_object_size_str(neighbor_index_cache.UUID_MAP_CACHE),)) print('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_)) memtrack.report_type(neighbor_index_cache.NeighborIndex) ut.print_object_size_tree(nnindexer_, lbl='nnindexer_') if IS_SMALL: nnindexer_list.append(nnindexer_) reindex_count_list.append(count) time_list_reindex.append(t.ellapsed) #import cv2 #import matplotlib as mpl #print(mem_top.mem_top(limit=30, width=120, # #exclude_refs=[cv2.__dict__, mpl.__dict__] # )) print('L___________________\n\n\n') print(ut.list_str(time_list_reindex)) if IS_SMALL: print(ut.list_str(list(map(id, nnindexer_list)))) print(ut.list_str(list([nnindxer.cfgstr for nnindxer in nnindexer_list]))) except KeyboardInterrupt: print('\n[train] Caught CRTL+C') resolution = '' from six.moves import input while not (resolution.isdigit()): print('\n[train] What do you want to do?') print('[train] 0 - Continue') print('[train] 1 - Embed') print('[train] ELSE - Stop network training') resolution = input('[train] Resolution: ') resolution = int(resolution) # We have a resolution if resolution == 0: print('resuming training...') elif resolution == 1: ut.embed() import plottool as pt next_fnum = iter(range(0, 1)).next # python3 PY3 pt.figure(fnum=next_fnum()) if len(addition_count_list) > 0: pt.plot2(addition_count_list, time_list_addition, marker='-o', equal_aspect=False, x_label='num_annotations', label=addition_lbl + ' Time') if len(reindex_count_list) > 0: pt.plot2(reindex_count_list, time_list_reindex, marker='-o', equal_aspect=False, x_label='num_annotations', label=reindex_label + ' Time') pt.set_figtitle('Augmented indexer experiment') pt.legend()
def draw_roc_curve( fpr, tpr, fnum=None, pnum=None, marker="-", target_tpr=None, target_fpr=None, thresholds=None, color=None, show_operating_point=False, ): r""" Args: fpr (?): tpr (?): fnum (int): figure number(default = None) pnum (tuple): plot number(default = None) marker (str): (default = '-x') target_tpr (None): (default = None) target_fpr (None): (default = None) thresholds (None): (default = None) color (None): (default = None) show_operating_point (bool): (default = False) CommandLine: python -m vtool.confusion --exec-draw_roc_curve --show --lightbg Example: >>> # DISABLE_DOCTEST >>> from vtool.confusion import * # NOQA >>> scores, labels = testdata_scores_labels() >>> confusions = get_confusion_metrics(scores, labels) >>> fpr = confusions.fpr >>> tpr = confusions.tpr >>> thresholds = confusions.thresholds >>> fnum = None >>> pnum = None >>> marker = '-x' >>> target_tpr = .85 >>> target_fpr = None >>> color = None >>> show_operating_point = True >>> draw_roc_curve(fpr, tpr, fnum, pnum, marker, target_tpr, target_fpr, >>> thresholds, color, show_operating_point) >>> ut.show_if_requested() """ import plottool as pt if fnum is None: fnum = pt.next_fnum() if color is None: color = (0.4, 1.0, 0.4) if pt.is_default_dark_bg() else (0.1, 0.4, 0.4) roc_auc = sklearn.metrics.auc(fpr, tpr) title_suffix = "" if target_fpr is not None: # func = scipy.interpolate.interp1d(fpr, tpr, kind='linear', assume_sorted=False) # func = scipy.interpolate.interp1d(xdata, ydata, kind='nearest', assume_sorted=False) # interp_vals[interp_mask] = func(pt[interp_mask]) target_fpr = np.clip(target_fpr, 0, 1) interp_tpr = interpolate_replbounds(fpr, tpr, target_fpr) choice_tpr = interp_tpr choice_fpr = target_fpr elif target_tpr is not None: target_tpr = np.clip(target_tpr, 0, 1) interp_fpr = interpolate_replbounds(tpr, fpr, target_tpr) choice_tpr = target_tpr choice_fpr = interp_fpr else: choice_tpr = None choice_fpr = None if choice_fpr is not None: choice_thresh = 0 if thresholds is not None: try: index = np.nonzero(tpr >= choice_tpr)[0][0] except IndexError: index = len(thresholds) - 1 choice_thresh = thresholds[index] # percent = ut.scalar_str(choice_tpr * 100).split('.')[0] # title_suffix = ', FPR%s=%05.2f%%' % (percent, choice_fpr) title_suffix = "" if show_operating_point: title_suffix = ", fpr=%.2f, tpr=%.2f, thresh=%.2f" % (choice_fpr, choice_tpr, choice_thresh) else: title_suffix = "" # if recall_domain is None: # ave_p = np.nan # else: # ave_p = p_interp.sum() / p_interp.size title = "Receiver operating characteristic\n" + "AUC=%.3f" % (roc_auc,) title += title_suffix pt.plot2( fpr, tpr, marker=marker, x_label="False Positive Rate", y_label="True Positive Rate", unitbox=True, flipx=False, color=color, fnum=fnum, pnum=pnum, title=title, ) if False: # Interp does not work right because of duplicate values # in xdomain line_ = np.linspace(0.11, 0.9, 20) # np.append([np.inf], np.diff(fpr)) > 0 # np.append([np.inf], np.diff(tpr)) > 0 unique_tpr_idxs = np.nonzero(np.append([np.inf], np.diff(tpr)) > 0)[0] unique_fpr_idxs = np.nonzero(np.append([np.inf], np.diff(fpr)) > 0)[0] pt.plt.plot(line_, interpolate_replbounds(fpr[unique_fpr_idxs], tpr[unique_fpr_idxs], line_), "b-x") pt.plt.plot(interpolate_replbounds(tpr[unique_tpr_idxs], fpr[unique_tpr_idxs], line_), line_, "r-x") if choice_fpr is not None: pt.plot(choice_fpr, choice_tpr, "o", color=pt.PINK)
def draw_roc_curve(fpr, tpr, fnum=None, pnum=None, marker='-', target_tpr=None, target_fpr=None, thresholds=None, color=None, show_operating_point=False): r""" Args: fpr (?): tpr (?): fnum (int): figure number(default = None) pnum (tuple): plot number(default = None) marker (str): (default = '-x') target_tpr (None): (default = None) target_fpr (None): (default = None) thresholds (None): (default = None) color (None): (default = None) show_operating_point (bool): (default = False) CommandLine: python -m vtool.confusion --exec-draw_roc_curve --show --lightbg Example: >>> # DISABLE_DOCTEST >>> from vtool.confusion import * # NOQA >>> scores, labels = testdata_scores_labels() >>> confusions = get_confusion_metrics(scores, labels) >>> fpr = confusions.fpr >>> tpr = confusions.tpr >>> thresholds = confusions.thresholds >>> fnum = None >>> pnum = None >>> marker = '-x' >>> target_tpr = .85 >>> target_fpr = None >>> color = None >>> show_operating_point = True >>> draw_roc_curve(fpr, tpr, fnum, pnum, marker, target_tpr, target_fpr, >>> thresholds, color, show_operating_point) >>> ut.show_if_requested() """ import plottool as pt if fnum is None: fnum = pt.next_fnum() if color is None: color = (0.4, 1.0, 0.4) if pt.is_default_dark_bg() else (0.1, 0.4, 0.4) roc_auc = sklearn.metrics.auc(fpr, tpr) title_suffix = '' if target_fpr is not None: #func = scipy.interpolate.interp1d(fpr, tpr, kind='linear', assume_sorted=False) #func = scipy.interpolate.interp1d(xdata, ydata, kind='nearest', assume_sorted=False) #interp_vals[interp_mask] = func(pt[interp_mask]) target_fpr = np.clip(target_fpr, 0, 1) interp_tpr = interpolate_replbounds(fpr, tpr, target_fpr) choice_tpr = interp_tpr choice_fpr = target_fpr elif target_tpr is not None: target_tpr = np.clip(target_tpr, 0, 1) interp_fpr = interpolate_replbounds(tpr, fpr, target_tpr) choice_tpr = target_tpr choice_fpr = interp_fpr else: choice_tpr = None choice_fpr = None if choice_fpr is not None: choice_thresh = 0 if thresholds is not None: try: index = np.nonzero(tpr >= choice_tpr)[0][0] except IndexError: index = len(thresholds) - 1 choice_thresh = thresholds[index] #percent = ut.scalar_str(choice_tpr * 100).split('.')[0] #title_suffix = ', FPR%s=%05.2f%%' % (percent, choice_fpr) title_suffix = '' if show_operating_point: title_suffix = ', fpr=%.2f, tpr=%.2f, thresh=%.2f' % ( choice_fpr, choice_tpr, choice_thresh) else: title_suffix = '' #if recall_domain is None: # ave_p = np.nan #else: # ave_p = p_interp.sum() / p_interp.size title = 'Receiver operating characteristic\n' + 'AUC=%.3f' % (roc_auc, ) title += title_suffix pt.plot2(fpr, tpr, marker=marker, x_label='False Positive Rate', y_label='True Positive Rate', unitbox=True, flipx=False, color=color, fnum=fnum, pnum=pnum, title=title) if False: # Interp does not work right because of duplicate values # in xdomain line_ = np.linspace(.11, .9, 20) #np.append([np.inf], np.diff(fpr)) > 0 #np.append([np.inf], np.diff(tpr)) > 0 unique_tpr_idxs = np.nonzero(np.append([np.inf], np.diff(tpr)) > 0)[0] unique_fpr_idxs = np.nonzero(np.append([np.inf], np.diff(fpr)) > 0)[0] pt.plt.plot( line_, interpolate_replbounds(fpr[unique_fpr_idxs], tpr[unique_fpr_idxs], line_), 'b-x') pt.plt.plot( interpolate_replbounds(tpr[unique_tpr_idxs], fpr[unique_tpr_idxs], line_), line_, 'r-x') if choice_fpr is not None: pt.plot(choice_fpr, choice_tpr, 'o', color=pt.PINK)