def done_part(cand, num_neighbs): # Find the first `num_neighbs` complete columns in each row rowxs, colxs = np.where(cand.validflags) unique_rows, groupxs = vt.group_indices(rowxs, assume_sorted=True) first_k_groupxs = [groupx[0:num_neighbs] for groupx in groupxs] if DEBUG_REQUERY: assert all(ut.issorted(groupx) for groupx in groupxs) assert all([len(group) == num_neighbs for group in first_k_groupxs]) chosen_xs = np.array(ut.flatten(first_k_groupxs), dtype=np.int) # chosen_xs = np.hstack(first_k_groupxs) # then convert these to multi-indices done_rows = rowxs.take(chosen_xs) done_cols = colxs.take(chosen_xs) multi_index = (done_rows, done_cols) # done_shape = (cand.validflags.shape[0], num_neighbs) # flat_xs = np.ravel_multi_index(multi_index, done_shape) flat_xs = np.ravel_multi_index(multi_index, cand.idxs.shape) _shape = (-1, num_neighbs) idxs = cand.idxs.take(flat_xs).reshape(_shape) dists = cand.dists.take(flat_xs).reshape(_shape) trueks = colxs.take(chosen_xs).reshape(_shape) if DEBUG_REQUERY: # dists2 = dists.copy() for count, (row, cols) in enumerate(zip(unique_rows, groupxs)): pass assert np.all(np.diff(dists, axis=1) >= 0) valid = cand.validflags.take(flat_xs).reshape(_shape) assert np.all(valid) return idxs, dists, trueks
def trytest_multiple_add_removes(): r""" CommandLine: python -m wbia.algo.hots._neighbor_experiment --exec-test_multiple_add_removes Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots._neighbor_experiment import * # NOQA >>> result = test_multiple_add_removes() >>> print(result) """ from wbia.algo.hots.neighbor_index_cache import test_nnindexer K = 4 nnindexer, qreq_, ibs = test_nnindexer('PZ_MTEST', use_memcache=False) assert len(nnindexer.get_removed_idxs()) == 0 logger.info('\n\n --- got nnindex testdata --- ') logger.info('') @ut.tracefunc_xml def print_nnindexer(nnindexer): logger.info('nnindexer.get_indexed_aids() = %r' % (nnindexer.get_indexed_aids(), )) logger.info('nnindexer.num_indexed_vecs() = %r' % (nnindexer.num_indexed_vecs(), )) logger.info('nnindexer.get_removed_idxs().shape = %r' % (nnindexer.get_removed_idxs().shape, )) logger.info('INITIALIZE TEST') print_nnindexer(nnindexer) config2_ = qreq_.get_internal_query_config2() qaid = 1 qfx2_vec = ibs.get_annot_vecs(qaid, config2_=config2_) (qfx2_idx1, qfx2_dist1) = nnindexer.knn(qfx2_vec, K) aids1 = set(nnindexer.get_nn_aids(qfx2_idx1).ravel()) logger.info('aids1 = %r' % (aids1, )) logger.info('') logger.info('TESTING ADD') add_first_daids = [17, 22] nnindexer.add_wbia_support(qreq_, add_first_daids) print_nnindexer(nnindexer) (qfx2_idx0, qfx2_dist0) = nnindexer.knn(qfx2_vec, K) assert np.any(qfx2_idx0 != qfx2_idx1), 'some should change' aids0 = set(nnindexer.get_nn_aids(qfx2_idx0).ravel()) logger.info('aids0 = %r' % (aids0, )) # execute test function logger.info('') logger.info('TESTING REMOVE') remove_daid_list = [8, 10, 11] nnindexer.remove_wbia_support(qreq_, remove_daid_list) print_nnindexer(nnindexer) # test after modification (qfx2_idx2, qfx2_dist2) = nnindexer.knn(qfx2_vec, K) aids2 = set(nnindexer.get_nn_aids(qfx2_idx2).ravel()) logger.info('aids2 = %r' % (aids2, )) assert len(aids2.intersection(remove_daid_list)) == 0 __removed_ids = nnindexer.flann._FLANN__removed_ids invalid_idxs = nnindexer.get_removed_idxs() assert len(np.intersect1d(invalid_idxs, __removed_ids)) == len(__removed_ids) logger.info('') logger.info('TESTING DUPLICATE REMOVE') nnindexer.remove_wbia_support(qreq_, remove_daid_list) print_nnindexer(nnindexer) # test after modification (qfx2_idx2_, qfx2_dist2_) = nnindexer.knn(qfx2_vec, K) assert np.all(qfx2_idx2_ == qfx2_idx2) assert np.all(qfx2_dist2_ == qfx2_dist2) logger.info('') logger.info('TESTING ADD AFTER REMOVE') # Is the error here happening because added points seem to # get the ids of the removed points? new_daid_list = [8, 10] nnindexer.add_wbia_support(qreq_, new_daid_list) print_nnindexer(nnindexer) # test after modification (qfx2_idx3, qfx2_dist3) = nnindexer.knn(qfx2_vec, K) qfx2_aid3 = nnindexer.get_nn_aids(qfx2_idx3) found_removed_idxs = np.intersect1d(qfx2_idx3, nnindexer.get_removed_idxs()) if len(found_removed_idxs) != 0: logger.info('found_removed_idxs.max() = %r' % (found_removed_idxs.max(), )) logger.info('found_removed_idxs.min() = %r' % (found_removed_idxs.min(), )) raise AssertionError('found_removed_idxs.shape = %r' % (found_removed_idxs.shape, )) aids3 = set(qfx2_aid3.ravel()) assert aids3.intersection(remove_daid_list) == set( new_daid_list).intersection(remove_daid_list) logger.info('TESTING DUPLICATE ADD') new_daid_list = [8, 10] nnindexer.add_wbia_support(qreq_, new_daid_list) # test after modification print_nnindexer(nnindexer) (qfx2_idx3_, qfx2_dist3_) = nnindexer.knn(qfx2_vec, K) qfx2_aid3_ = nnindexer.get_nn_aids(qfx2_idx3_) assert np.all(qfx2_aid3 == qfx2_aid3_) logger.info('TESTING ADD QUERY TO DATABASE') add_daid_list1 = [qaid] nnindexer.add_wbia_support(qreq_, add_daid_list1) print_nnindexer(nnindexer) (qfx2_idx4_, qfx2_dist4_) = nnindexer.knn(qfx2_vec, K) qfx2_aid4_ = nnindexer.get_nn_aids(qfx2_idx4_) qfx2_fx4_ = nnindexer.get_nn_featxs(qfx2_idx4_) assert np.all(qfx2_aid4_.T[0] == qaid), 'should find self' assert ut.issorted(qfx2_fx4_.T[0]), 'should be in order' logger.info('TESTING REMOVE QUERY POINTS') add_daid_list1 = [qaid] nnindexer.remove_wbia_support(qreq_, add_daid_list1) print_nnindexer(nnindexer) (qfx2_idx5_, qfx2_dist5_) = nnindexer.knn(qfx2_vec, K) issame = qfx2_idx5_ == qfx2_idx3_ percentsame = issame.sum() / issame.size logger.info('percentsame = %r' % (percentsame, )) assert (percentsame > 0.85 ), 'a large majority of the feature idxs should remain the same' print_nnindexer(nnindexer) # Do this multiple times for _ in range(10): add_daid_list1 = [qaid] nnindexer.add_wbia_support(qreq_, add_daid_list1, verbose=False) nnindexer.remove_wbia_support(qreq_, add_daid_list1, verbose=False) (qfx2_idxX_, qfx2_distX_) = nnindexer.knn(qfx2_vec, K) issame = qfx2_idxX_ == qfx2_idx3_ percentsame = issame.sum() / issame.size logger.info('percentsame = %r' % (percentsame, )) assert ( percentsame > 0.85 ), 'a large majority of the feature idxs should remain the same' # Test again with more data logger.info('testing remove query points with more data') nnindexer.add_wbia_support(qreq_, ibs.get_valid_aids()) (qfx2_idx6_, qfx2_dist6_) = nnindexer.knn(qfx2_vec, K) qfx2_aid6_ = nnindexer.get_nn_aids(qfx2_idx6_) assert np.all(qfx2_aid6_.T[0] == qaid), 'should be same' nnindexer.remove_wbia_support(qreq_, add_daid_list1) print_nnindexer(nnindexer) (qfx2_idx7_, qfx2_dist6_) = nnindexer.knn(qfx2_vec, K) qfx2_aid7_ = nnindexer.get_nn_aids(qfx2_idx7_) assert np.all(qfx2_aid7_.T[0] != qaid), 'should not be same' # Do this multiple times for _ in range(10): add_daid_list1 = [qaid] nnindexer.add_wbia_support(qreq_, add_daid_list1, verbose=True) nnindexer.remove_wbia_support(qreq_, add_daid_list1, verbose=True) # weird that all seem to work here (qfx2_idxX_, qfx2_distX_) = nnindexer.knn(qfx2_vec, K) issame = qfx2_idxX_ == qfx2_idx7_ percentsame = issame.sum() / issame.size logger.info('percentsame = %r' % (percentsame, )) print_nnindexer(nnindexer) assert ( percentsame > 0.85 ), 'a large majority of the feature idxs should remain the same' nnindexer, qreq_, ibs = test_nnindexer('PZ_MTEST', use_memcache=False) big_set = ibs.get_valid_aids()[5:] remove_later = big_set[10:14] nnindexer.add_wbia_support(qreq_, big_set) # Try again where remove is not the last operation logger.info('testing remove query points with more op') extra_data = np.setdiff1d(ibs.get_valid_aids()[0:5], add_daid_list1) nnindexer.remove_wbia_support(qreq_, extra_data) nnindexer.add_wbia_support(qreq_, add_daid_list1) nnindexer.add_wbia_support(qreq_, extra_data) (qfx2_idx8_, qfx2_dist8_) = nnindexer.knn(qfx2_vec, K) qfx2_aid8_ = nnindexer.get_nn_aids(qfx2_idx8_) assert np.all(qfx2_aid8_.T[0] == qaid), 'should be same' nnindexer.remove_wbia_support(qreq_, extra_data) (qfx2_idx9_, qfx2_dist9_) = nnindexer.knn(qfx2_vec, K) qfx2_aid9_ = nnindexer.get_nn_aids(qfx2_idx9_) assert np.all(qfx2_aid9_.T[0] == qaid), 'should be same' nnindexer.remove_wbia_support(qreq_, add_daid_list1) nnindexer.add_wbia_support(qreq_, add_daid_list1) nnindexer.add_wbia_support(qreq_, extra_data) nnindexer.remove_wbia_support(qreq_, remove_later) logger.info(nnindexer.ax2_aid) aid_list = nnindexer.get_indexed_aids() # NOQA nnindexer.flann.save_index('test.flann') idx2_vec_masked = nnindexer.idx2_vec idx2_vec_compressed = nnindexer.get_indexed_vecs() from vtool._pyflann_backend import pyflann as pyflann flann1 = pyflann.FLANN() flann1.load_index('test.flann', idx2_vec_masked) from vtool._pyflann_backend import pyflann as pyflann flann2 = pyflann.FLANN() flann2.load_index('test.flann', idx2_vec_compressed) # NOW WE NEED TO TEST THAT WE CAN SAVE AND LOAD THIS DATA # # ax2_nvecs = ut.dict_take(ut.dict_hist(nnindexer.idx2_ax), range(len(nnindexer.ax2_aid))) pass
def interpolate_replbounds(xdata, ydata, pt): """ xdata = np.array([.1, .2, .3, .4, .5]) ydata = np.array([.1, .2, .3, .4, .5]) pt = .35 FIXME: if duplicate xdata is given bad things happen. BUG: in scipy.interpolate.interp1d If there is a duplicate xdata, then assume_sorted=False will sort ydata by xdata, but xdata should retain its initial ordering in places of ambuguity. Currently it does not. Args: xdata (ndarray): ydata (ndarray): pt (ndarray): Returns: float: interp_vals CommandLine: python -m vtool.confusion --exec-interpolate_replbounds Example: >>> # DISABLE_DOCTEST >>> from vtool.confusion import * # NOQA >>> xdata = np.array([0.7, 0.8, 0.8, 0.9, 0.9, 0.9]) >>> ydata = np.array([34, 26, 23, 22, 19, 17]) >>> pt = np.array([.85, 1.0, -1.0]) >>> interp_vals = interpolate_replbounds(xdata, ydata, pt) >>> result = ('interp_vals = %s' % (str(interp_vals),)) >>> print(result) interp_vals = [ 22.5 17. 34. ] """ if not ut.issorted(xdata): raise AssertionError("need to sort xdata and ydata in function") sortx = np.lexsort(np.vstack([np.arange(len(xdata)), xdata])) xdata = xdata.take(sortx, axis=0) ydata = ydata.take(sortx, axis=0) is_scalar = not ut.isiterable(pt) # print('----') # print('xdata = %r' % (xdata,)) # print('ydata = %r' % (ydata,)) if is_scalar: pt = np.array([pt]) # ut.ensure_iterable(pt) minval = xdata.min() maxval = xdata.max() argx_min_list = np.argwhere(xdata == minval) argx_max_list = np.argwhere(xdata == maxval) argx_min = argx_min_list.min() argx_max = argx_max_list.max() lower_mask = pt < xdata[argx_min] upper_mask = pt > xdata[argx_max] interp_mask = ~np.logical_or(lower_mask, upper_mask) # if isinstance(pt, np.ndarray): dtype = np.result_type(np.float32, ydata.dtype) interp_vals = np.empty(pt.shape, dtype=dtype) interp_vals[lower_mask] = ydata[argx_min] interp_vals[upper_mask] = ydata[argx_max] if np.any(interp_mask): # FIXME: allow assume_sorted = False func = scipy.interpolate.interp1d(xdata, ydata, kind="linear", assume_sorted=True) interp_vals[interp_mask] = func(pt[interp_mask]) if is_scalar: interp_vals = interp_vals[0] # interpolate to target recall # right_index = indicies[0] # right_recall = self.recall[right_index] # left_index = right_index - 1 # left_recall = self.recall[left_index] # stepsize = right_recall - left_recall # alpha = (target_recall - left_recall) / stepsize # left_fpr = self.fpr[left_index] # right_fpr = self.fpr[right_index] # interp_fpp = (left_fpr * (1 - alpha)) + (right_fpr * (alpha)) return interp_vals
def test_multiple_add_removes(): r""" CommandLine: python -m ibeis.algo.hots._neighbor_experiment --exec-test_multiple_add_removes Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots._neighbor_experiment import * # NOQA >>> result = test_multiple_add_removes() >>> print(result) """ from ibeis.algo.hots.neighbor_index_cache import test_nnindexer K = 4 nnindexer, qreq_, ibs = test_nnindexer('PZ_MTEST', use_memcache=False) assert len(nnindexer.get_removed_idxs()) == 0 print('\n\n --- got nnindex testdata --- ') print('') @ut.tracefunc_xml def print_nnindexer(nnindexer): print('nnindexer.get_indexed_aids() = %r' % (nnindexer.get_indexed_aids(),)) print('nnindexer.num_indexed_vecs() = %r' % (nnindexer.num_indexed_vecs(),)) print('nnindexer.get_removed_idxs().shape = %r' % (nnindexer.get_removed_idxs().shape,)) print('INITIALIZE TEST') print_nnindexer(nnindexer) config2_ = qreq_.get_internal_query_config2() qaid = 1 qfx2_vec = ibs.get_annot_vecs(qaid, config2_=config2_) (qfx2_idx1, qfx2_dist1) = nnindexer.knn(qfx2_vec, K) aids1 = set(nnindexer.get_nn_aids(qfx2_idx1).ravel()) print('aids1 = %r' % (aids1,)) print('') print('TESTING ADD') add_first_daids = [17, 22] nnindexer.add_ibeis_support(qreq_, add_first_daids) print_nnindexer(nnindexer) (qfx2_idx0, qfx2_dist0) = nnindexer.knn(qfx2_vec, K) assert np.any(qfx2_idx0 != qfx2_idx1), 'some should change' aids0 = set(nnindexer.get_nn_aids(qfx2_idx0).ravel()) print('aids0 = %r' % (aids0,)) # execute test function print('') print('TESTING REMOVE') remove_daid_list = [8, 10, 11] nnindexer.remove_ibeis_support(qreq_, remove_daid_list) print_nnindexer(nnindexer) # test after modification (qfx2_idx2, qfx2_dist2) = nnindexer.knn(qfx2_vec, K) aids2 = set(nnindexer.get_nn_aids(qfx2_idx2).ravel()) print('aids2 = %r' % (aids2,)) assert len(aids2.intersection(remove_daid_list)) == 0 __removed_ids = nnindexer.flann._FLANN__removed_ids invalid_idxs = nnindexer.get_removed_idxs() assert len(np.intersect1d(invalid_idxs, __removed_ids)) == len(__removed_ids) print('') print('TESTING DUPLICATE REMOVE') nnindexer.remove_ibeis_support(qreq_, remove_daid_list) print_nnindexer(nnindexer) # test after modification (qfx2_idx2_, qfx2_dist2_) = nnindexer.knn(qfx2_vec, K) assert np.all(qfx2_idx2_ == qfx2_idx2) assert np.all(qfx2_dist2_ == qfx2_dist2) print('') print('TESTING ADD AFTER REMOVE') # Is the error here happening because added points seem to # get the ids of the removed points? new_daid_list = [8, 10] nnindexer.add_ibeis_support(qreq_, new_daid_list) print_nnindexer(nnindexer) # test after modification (qfx2_idx3, qfx2_dist3) = nnindexer.knn(qfx2_vec, K) qfx2_aid3 = nnindexer.get_nn_aids(qfx2_idx3) found_removed_idxs = np.intersect1d(qfx2_idx3, nnindexer.get_removed_idxs()) if len(found_removed_idxs) != 0: print('found_removed_idxs.max() = %r' % (found_removed_idxs.max(),)) print('found_removed_idxs.min() = %r' % (found_removed_idxs.min(),)) raise AssertionError('found_removed_idxs.shape = %r' % (found_removed_idxs.shape,)) aids3 = set(qfx2_aid3.ravel()) assert aids3.intersection(remove_daid_list) == set(new_daid_list).intersection(remove_daid_list) print('TESTING DUPLICATE ADD') new_daid_list = [8, 10] nnindexer.add_ibeis_support(qreq_, new_daid_list) # test after modification print_nnindexer(nnindexer) (qfx2_idx3_, qfx2_dist3_) = nnindexer.knn(qfx2_vec, K) qfx2_aid3_ = nnindexer.get_nn_aids(qfx2_idx3_) assert np.all(qfx2_aid3 == qfx2_aid3_) print('TESTING ADD QUERY TO DATABASE') add_daid_list1 = [qaid] nnindexer.add_ibeis_support(qreq_, add_daid_list1) print_nnindexer(nnindexer) (qfx2_idx4_, qfx2_dist4_) = nnindexer.knn(qfx2_vec, K) qfx2_aid4_ = nnindexer.get_nn_aids(qfx2_idx4_) qfx2_fx4_ = nnindexer.get_nn_featxs(qfx2_idx4_) assert np.all(qfx2_aid4_.T[0] == qaid), 'should find self' assert ut.issorted(qfx2_fx4_.T[0]), 'should be in order' print('TESTING REMOVE QUERY POINTS') add_daid_list1 = [qaid] nnindexer.remove_ibeis_support(qreq_, add_daid_list1) print_nnindexer(nnindexer) (qfx2_idx5_, qfx2_dist5_) = nnindexer.knn(qfx2_vec, K) issame = (qfx2_idx5_ == qfx2_idx3_) percentsame = issame.sum() / issame.size print('percentsame = %r' % (percentsame,)) assert percentsame > .85, 'a large majority of the feature idxs should remain the same' print_nnindexer(nnindexer) # Do this multiple times for _ in range(10): add_daid_list1 = [qaid] nnindexer.add_ibeis_support(qreq_, add_daid_list1, verbose=False) nnindexer.remove_ibeis_support(qreq_, add_daid_list1, verbose=False) (qfx2_idxX_, qfx2_distX_) = nnindexer.knn(qfx2_vec, K) issame = (qfx2_idxX_ == qfx2_idx3_) percentsame = issame.sum() / issame.size print('percentsame = %r' % (percentsame,)) assert percentsame > .85, 'a large majority of the feature idxs should remain the same' # Test again with more data print('testing remove query points with more data') nnindexer.add_ibeis_support(qreq_, ibs.get_valid_aids()) (qfx2_idx6_, qfx2_dist6_) = nnindexer.knn(qfx2_vec, K) qfx2_aid6_ = nnindexer.get_nn_aids(qfx2_idx6_) assert np.all(qfx2_aid6_.T[0] == qaid), 'should be same' nnindexer.remove_ibeis_support(qreq_, add_daid_list1) print_nnindexer(nnindexer) (qfx2_idx7_, qfx2_dist6_) = nnindexer.knn(qfx2_vec, K) qfx2_aid7_ = nnindexer.get_nn_aids(qfx2_idx7_) assert np.all(qfx2_aid7_.T[0] != qaid), 'should not be same' # Do this multiple times for _ in range(10): add_daid_list1 = [qaid] nnindexer.add_ibeis_support(qreq_, add_daid_list1, verbose=True) nnindexer.remove_ibeis_support(qreq_, add_daid_list1, verbose=True) # weird that all seem to work here (qfx2_idxX_, qfx2_distX_) = nnindexer.knn(qfx2_vec, K) issame = (qfx2_idxX_ == qfx2_idx7_) percentsame = issame.sum() / issame.size print('percentsame = %r' % (percentsame,)) print_nnindexer(nnindexer) assert percentsame > .85, 'a large majority of the feature idxs should remain the same' nnindexer, qreq_, ibs = test_nnindexer('PZ_MTEST', use_memcache=False) big_set = ibs.get_valid_aids()[5:] remove_later = big_set[10:14] nnindexer.add_ibeis_support(qreq_, big_set) # Try again where remove is not the last operation print('testing remove query points with more op') extra_data = np.setdiff1d(ibs.get_valid_aids()[0:5], add_daid_list1) nnindexer.remove_ibeis_support(qreq_, extra_data) nnindexer.add_ibeis_support(qreq_, add_daid_list1) nnindexer.add_ibeis_support(qreq_, extra_data) (qfx2_idx8_, qfx2_dist8_) = nnindexer.knn(qfx2_vec, K) qfx2_aid8_ = nnindexer.get_nn_aids(qfx2_idx8_) assert np.all(qfx2_aid8_.T[0] == qaid), 'should be same' nnindexer.remove_ibeis_support(qreq_, extra_data) (qfx2_idx9_, qfx2_dist9_) = nnindexer.knn(qfx2_vec, K) qfx2_aid9_ = nnindexer.get_nn_aids(qfx2_idx9_) assert np.all(qfx2_aid9_.T[0] == qaid), 'should be same' nnindexer.remove_ibeis_support(qreq_, add_daid_list1) nnindexer.add_ibeis_support(qreq_, add_daid_list1) nnindexer.add_ibeis_support(qreq_, extra_data) nnindexer.remove_ibeis_support(qreq_, remove_later) print(nnindexer.ax2_aid) aid_list = nnindexer.get_indexed_aids() # NOQA nnindexer.flann.save_index('test.flann') idx2_vec_masked = nnindexer.idx2_vec idx2_vec_compressed = nnindexer.get_indexed_vecs() import pyflann flann1 = pyflann.FLANN() flann1.load_index('test.flann', idx2_vec_masked) import pyflann flann2 = pyflann.FLANN() flann2.load_index('test.flann', idx2_vec_compressed) # NOW WE NEED TO TEST THAT WE CAN SAVE AND LOAD THIS DATA # #ax2_nvecs = ut.dict_take(ut.dict_hist(nnindexer.idx2_ax), range(len(nnindexer.ax2_aid))) pass
def interpolate_replbounds(xdata, ydata, pt): """ xdata = np.array([.1, .2, .3, .4, .5]) ydata = np.array([.1, .2, .3, .4, .5]) pt = .35 FIXME: if duplicate xdata is given bad things happen. BUG: in scipy.interpolate.interp1d If there is a duplicate xdata, then assume_sorted=False will sort ydata by xdata, but xdata should retain its initial ordering in places of ambuguity. Currently it does not. Args: xdata (ndarray): ydata (ndarray): pt (ndarray): Returns: float: interp_vals CommandLine: python -m vtool.confusion --exec-interpolate_replbounds Example: >>> # DISABLE_DOCTEST >>> from vtool.confusion import * # NOQA >>> xdata = np.array([0.7, 0.8, 0.8, 0.9, 0.9, 0.9]) >>> ydata = np.array([34, 26, 23, 22, 19, 17]) >>> pt = np.array([.85, 1.0, -1.0]) >>> interp_vals = interpolate_replbounds(xdata, ydata, pt) >>> result = ('interp_vals = %s' % (str(interp_vals),)) >>> print(result) interp_vals = [ 22.5 17. 34. ] """ if not ut.issorted(xdata): raise AssertionError('need to sort xdata and ydata in function') sortx = np.lexsort(np.vstack([np.arange(len(xdata)), xdata])) xdata = xdata.take(sortx, axis=0) ydata = ydata.take(sortx, axis=0) is_scalar = not ut.isiterable(pt) #print('----') #print('xdata = %r' % (xdata,)) #print('ydata = %r' % (ydata,)) if is_scalar: pt = np.array([pt]) #ut.ensure_iterable(pt) minval = xdata.min() maxval = xdata.max() argx_min_list = np.argwhere(xdata == minval) argx_max_list = np.argwhere(xdata == maxval) argx_min = argx_min_list.min() argx_max = argx_max_list.max() lower_mask = pt < xdata[argx_min] upper_mask = pt > xdata[argx_max] interp_mask = ~np.logical_or(lower_mask, upper_mask) #if isinstance(pt, np.ndarray): dtype = np.result_type(np.float32, ydata.dtype) interp_vals = np.empty(pt.shape, dtype=dtype) interp_vals[lower_mask] = ydata[argx_min] interp_vals[upper_mask] = ydata[argx_max] if np.any(interp_mask): # FIXME: allow assume_sorted = False func = scipy.interpolate.interp1d(xdata, ydata, kind='linear', assume_sorted=True) interp_vals[interp_mask] = func(pt[interp_mask]) if is_scalar: interp_vals = interp_vals[0] # interpolate to target recall #right_index = indicies[0] #right_recall = self.recall[right_index] #left_index = right_index - 1 #left_recall = self.recall[left_index] #stepsize = right_recall - left_recall #alpha = (target_recall - left_recall) / stepsize #left_fpr = self.fpr[left_index] #right_fpr = self.fpr[right_index] #interp_fpp = (left_fpr * (1 - alpha)) + (right_fpr * (alpha)) return interp_vals