def test_pca(): bags = [ np.random.normal(5, 3, size=(np.random.randint(10, 100), 20)) for _ in xrange(50) ] feats = Features(bags, stack=True) pca = BagPCA(k=3) pca.fit(bags) pcaed = pca.transform(bags) assert pcaed.dim == 3 BagPCA(varfrac=.3).fit_transform(bags) pca2 = BagPCA(k=20) pcaed2 = pca2.fit_transform(bags) orig = pca2.inverse_transform(pcaed2) orig.make_stacked() assert np.allclose(feats.stacked_features, orig.stacked_features) assert BagPCA(k=5, randomize=True).fit_transform(bags).dim == 5 assert_raises(TypeError, lambda: BagPCA(randomize=True)) assert_raises(TypeError, lambda: BagPCA(mle_components=True, k=12)) assert BagPCA(mle_components=True)
def test_bagofwords_basic(): n_codewords = 10 dim = 5 kmeans = KMeans(n_clusters=n_codewords, max_iter=100, n_init=3, random_state=47) bow = BagOfWords(kmeans) np.random.seed(42) bags = [np.random.randn(np.random.randint(30, 100), dim) for _ in xrange(50)] bowed = bow.fit_transform(bags) assert bowed.shape == (len(bags), n_codewords) assert bow.codewords_.shape == (n_codewords, dim) assert np.all(bowed >= 0) assert np.all(np.sum(bowed, 1) == [b.shape[0] for b in bags]) bow.fit(Features(bags)) bowed2 = bow.transform(bags) assert np.all(bowed == bowed2) assert bow.codewords_.shape == (n_codewords, dim) minikmeans = MiniBatchKMeans(n_clusters=n_codewords, max_iter=100, random_state=47) minibow = BagOfWords(minikmeans) assert_raises(AttributeError, lambda: minibow.transform(bags)) minibowed = minibow.fit_transform(bags) assert minibowed.shape == bowed.shape assert np.all(bowed >= 0) assert np.all(np.sum(bowed, 1) == [b.shape[0] for b in bags])
def test_knn_memory(): if not have_flann: raise SkipTest("No flann, so skipping knn tests.") dim = 3 n = 20 np.random.seed(47) bags = Features( [np.random.randn(np.random.randint(30, 100), dim) for _ in xrange(n)]) tdir = tempfile.mkdtemp() div_funcs = ('kl', 'js', 'renyi:.9', 'l2', 'tsallis:.8') Ks = (3, 4) est = KNNDivergenceEstimator(div_funcs=div_funcs, Ks=Ks, memory=tdir) res1 = est.fit_transform(bags) with LogCapture('skl_groups.divergences.knn', level=logging.INFO) as l: res2 = est.transform(bags) assert len(l.records) == 0 assert np.all(res1 == res2) with LogCapture('skl_groups.divergences.knn', level=logging.INFO) as l: res3 = est.fit_transform(bags) for r in l.records: assert not r.message.startswith("Getting divergences") assert np.all(res1 == res3)
def distribution_divergence(X_s, X_l, k=10): """ This function computes l2 and js divergences from samples of two distributions. The implementation use `skl-groups`, which implements non-parametric estimation of divergences. Args: + X_s: a numpy array containing point cloud in state space + X_e: a numpy array containing point cloud in latent space """ # We discard cases with too large dimensions if X_s.shape[1] > 50: return {'l2_divergence': -1., 'js_divergence': -1.} # We instantiate the divergence object div = KNNDivergenceEstimator(div_funcs=['l2', 'js'], Ks=[k], n_jobs=4, clamp=True) # We turn both data to float32 X_s = X_s.astype(np.float32) X_l = X_l.astype(np.float32) # We generate Features f_s = Features(X_s, n_pts=[X_s.shape[0]]) f_l = Features(X_l, n_pts=[X_l.shape[0]]) # We create the knn graph div.fit(X=f_s) # We compute the divergences l2, js = div.transform(X=f_l).squeeze() # We construct the returned dictionnary output = {'l2_divergence': l2, 'js_divergence': js} return output
def kNNdiv_Kernel(X_white, kernel, Knn=3, div_func='renyi:.5', Nref=None, compwise=True, njobs=1, W_ica_inv=None): ''' `div_func` kNN divergence estimate between some data X_white and a distribution specified by Kernel. ''' if isinstance(Knn, int): Knns = [Knn] elif isinstance(Knn, list): Knns = Knn # if component wise there should be X_white.shape[1] # kernels for each componenets if compwise: if X_white.shape[1] != len(kernel): raise ValueError # construct reference "bag" if compwise: ref_dist = np.zeros((Nref, X_white.shape[1])) for icomp in range(X_white.shape[1]): samp = kernel[icomp].sample(Nref) if isinstance(samp, tuple): ref_dist[:, icomp] = samp[0].flatten() else: ref_dist[:, icomp] = samp.flatten() else: samp = kernel.sample(Nref) if isinstance(samp, tuple): ref_dist = samp[0] else: ref_dist = samp if W_ica_inv is not None: ref_dist = np.dot(ref_dist, W_ica_inv.T) # estimate divergence kNN = KNNDivergenceEstimator(div_funcs=[div_func], Ks=Knns, version='slow', clamp=False, n_jobs=njobs) feat = Features([X_white, ref_dist]) div_knn = kNN.fit_transform(feat) if len(Knns) == 1: return div_knn[0][0][0][1] div_knns = np.zeros(len(Knns)) for i in range(len(Knns)): div_knns[i] = div_knn[0][i][0][1] return div_knns
def test_basic(): bags = [ np.random.normal(5, 3, size=(np.random.randint(10, 100), 20)) for _ in xrange(50) ] feats = Features(bags, stack=True) stder = BagStandardizer() stdized = stder.fit_transform(bags) stdized.make_stacked() assert np.allclose(np.mean(stdized.stacked_features), 0) assert np.allclose(np.std(stdized.stacked_features), 1) first_five = stder.transform(bags[:5]) assert first_five == stdized[:5] minmaxer = BagMinMaxScaler([3, 7]) minmaxed = minmaxer.fit_transform(feats) minmaxed.make_stacked() assert np.allclose(np.min(minmaxed.stacked_features, 0), 3) assert np.allclose(np.max(minmaxed.stacked_features, 0), 7) normer = BagNormalizer('l1') normed = normer.fit_transform(Features(bags)) normed.make_stacked() assert np.allclose(np.sum(np.abs(normed.stacked_features), 1), 1) class GetMean(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X): return X.mean(axis=1)[None, :] m = BagPreprocesser(GetMean()) assert_raises(ValueError, lambda: m.transform(bags))
def test_knn_sanity_slow(): if not have_flann: raise SkipTest("No flann, so skipping knn tests.") dim = 3 n = 20 np.random.seed(47) bags = Features( [np.random.randn(np.random.randint(30, 100), dim) for _ in xrange(n)]) # just make sure it runs div_funcs = ('kl', 'js', 'renyi:.9', 'l2', 'tsallis:.8') Ks = (3, 4) est = KNNDivergenceEstimator(div_funcs=div_funcs, Ks=Ks) res = est.fit_transform(bags) assert res.shape == (len(div_funcs), len(Ks), n, n) assert np.all(np.isfinite(res)) # test that JS blows up when there's a huge difference in bag sizes # (so that K is too low) assert_raises( ValueError, partial(est.fit_transform, bags + [np.random.randn(1000, dim)])) # test fit() and then transform() with JS, with different-sized test bags est = KNNDivergenceEstimator(div_funcs=('js', ), Ks=(5, )) est.fit(bags, get_rhos=True) with LogCapture('skl_groups.divergences.knn', level=logging.WARNING) as l: res = est.transform([np.random.randn(300, dim)]) assert res.shape == (1, 1, 1, len(bags)) assert len(l.records) == 1 assert l.records[0].message.startswith('Y_rhos had a lower max_K') # test that passing div func more than once raises def blah(df): est = KNNDivergenceEstimator(div_funcs=[df, df]) return est.fit(bags) assert_raises(ValueError, lambda: blah('kl')) assert_raises(ValueError, lambda: blah('renyi:.8')) assert_raises(ValueError, lambda: blah('l2'))
def kNNdiv_general( X, Y, Knn=3, div_func='kl', alpha=None, njobs=1, ): #renyi:.5 """ kNN divergence estimate for samples drawn from any two arbitrary distributions. """ if Y.shape[1] != X.shape[1]: raise ValueError( 'dimension between X_white and Gaussian reference distribution do not match' ) if isinstance(Knn, int): Knns = [Knn] elif isinstance(Knn, list): Knns = Knn if alpha is not None: div_func = div_func + ':%s' % alpha kNN = KNNDivergenceEstimator(div_funcs=[div_func], Ks=Knns, version='slow', clamp=False, n_jobs=njobs) feat = Features([X, Y]) div_knn = kNN.fit_transform(feat) if len(Knns) == 1: return div_knn[0][0][0][1] div_knns = np.zeros(len(Knns)) for i in range(len(Knns)): div_knns[i] = div_knn[0][i][0][1] return div_knns
def kNNdiv_gauss(X_white, cov_X, Knn=3, div_func='renyi:.5', gauss=None, Nref=None, njobs=1): ''' `div_func` kNN divergence estimate between X_white and a reference Gaussian with covariance matrix cov_X. ''' if gauss is None: if Nref is None: raise ValueError gauss = np.random.multivariate_normal( np.zeros(X_white.shape[1]), cov_X, size=Nref) # Gaussian reference distribution if gauss.shape[1] != X_white.shape[1]: raise ValueError( 'dimension between X_white and Gaussian reference distribution do not match' ) if isinstance(Knn, int): Knns = [Knn] elif isinstance(Knn, list): Knns = Knn kNN = KNNDivergenceEstimator(div_funcs=[div_func], Ks=Knns, version='slow', clamp=False, n_jobs=njobs) feat = Features([X_white, gauss]) div_knn = kNN.fit_transform(feat) if len(Knns) == 1: return div_knn[0][0][0][1] div_knns = np.zeros(len(Knns)) for i in range(len(Knns)): div_knns[i] = div_knn[0][i][0][1] return div_knns
def computePairwiseSimilarities2(patients, y): """ Compute the pairwise similarity between bags using Dougal code Inputs: - patients: the collection of patient features - y: labels (number of abnormal nodes) for each patient. Used to fit the KNNDivergenceEstimator Returns: - sims: the pairwise similarities between each patient * Note: sims is a NxN symmetric matrix, where N is the number of patients """ # pass the features and labels to scikit-learn Features feats = Features(patients, labels=y) # directly from Dougal # note: learning methods won't use the labels, this is for conveinence # estimate the distances between the bags (patients) using KNNDivergenceEstimator # details: use the kl divergence, find 3 nearest neighbors # not sure what the pairwise picker line does? # rbf and projectPSD help ensure the data is separable? distEstModel = Pipeline( [ # div_funcs=['kl'], rewrite this to actually use PairwisePicker correctly next time ('divs', KNNDivergenceEstimator(div_funcs=['kl'], Ks=[3], n_jobs=-1, version='fast')), ('pick', PairwisePicker((0, 0))), ('symmetrize', Symmetrize()), ('rbf', RBFize(gamma=1, scale_by_median=True)), ('project', ProjectPSD()) ]) # return the pairwise similarities between the bags (patients) sims = distEstModel.fit_transform(feats) return sims
def test_knn_version_consistency(): if not have_flann: raise SkipTest("No flann, so skipping knn tests.") if not have_accel: raise SkipTest("No skl-groups-accel, so skipping version consistency.") n = 20 for dim in [1, 7]: np.random.seed(47) bags = Features([ np.random.randn(np.random.randint(30, 100), dim) for _ in xrange(n) ]) div_funcs = ('kl', 'js', 'renyi:.9', 'l2', 'tsallis:.8') Ks = (3, 4) get_est = partial(KNNDivergenceEstimator, div_funcs=div_funcs, Ks=Ks) results = {} for version in ('fast', 'slow', 'best'): est = get_est(version=version) results[version] = res = est.fit_transform(bags) assert res.shape == (len(div_funcs), len(Ks), n, n) assert np.all(np.isfinite(res)) for df, fast, slow in zip(div_funcs, results['fast'], results['slow']): assert_array_almost_equal(fast, slow, decimal=1 if df == 'js' else 5, err_msg="({}, dim {})".format(df, dim)) # TODO: debug JS differences est = get_est(version='fast', n_jobs=-1) res = est.fit_transform(bags) assert np.all(results['fast'] == res) est = get_est(version='slow', n_jobs=-1) res = est.fit_transform(bags) assert np.all(results['slow'] == res)
def computeSubjSubjKernel(subjects, div='KL', numNeighbors=3): """ Start by computing the pairwise similarities between subject using Dougal's code. Then, for HE and KL, symmetrize, RBFize, and project the similarities onto a positive semi-definite space. Inputs: - subjects: the collection of patient features - div: which divergence to use. Options are - 'KL': Kullback-Leibler divergence, 'kl' in the function (default) - 'HE': Hellinger divergence, 'hellinger' in the function - 'MMD': Maximum Mean Discrepancy, calls another function - numNeighbors: how many neighbors to look at. Default is 3 Returns: - kernel: the kernel calculated using the pairwise similarities between each subject * Note: kernel is a NxN symmetric matrix, where N is the number of subjects """ # pass the features and labels to scikit-learn Features feats = Features(subjects) # directly from Dougal # specify the divergence to use if div == 'KL': # estimate the distances between the bags (patients) using KNNDivergenceEstimator # details: use the kl divergence, find 3 nearest neighbors # not sure what the pairwise picker line does? # rbf and projectPSD help ensure the data is separable? distEstModel = Pipeline( [ # div_funcs=['kl'], rewrite this to actually use PairwisePicker correctly next time ('divs', KNNDivergenceEstimator(div_funcs=['kl'], Ks=[numNeighbors], n_jobs=-1, version='fast')), ('pick', PairwisePicker((0, 0))), ('symmetrize', Symmetrize()) # ('rbf', RBFize(gamma=1, scale_by_median=True)), # ('project', ProjectPSD()) ]) # return the pairwise similarities between the bags (patients) sims = distEstModel.fit_transform(feats) # Great, we have the similarities and they're symmetric # Now RBFize them, but do the scale by median by hand rbf = RBFize(gamma=1, scale_by_median=False) simsMedian = np.median(sims[np.triu_indices_from(sims)]) medianScaledSims = sims / simsMedian rbfedSims = rbf.fit_transform(medianScaledSims) # Final step in building the kernel: project the rbf'ed similarities # onto a positive semi-definite space psd = ProjectPSD() kernel = psd.fit_transform(rbfedSims) elif div == 'HE': # estimate the distances between the bags (patients) using KNNDivergenceEstimator # details: use the hellinger divergence, find 3 nearest neighbors # not sure what the pairwise picker line does? # rbf and projectPSD help ensure the data is separable? distEstModel = Pipeline( [ # div_funcs=['kl'], rewrite this to actually use PairwisePicker correctly next time ('divs', KNNDivergenceEstimator(div_funcs=['hellinger'], Ks=[numNeighbors], n_jobs=-1, version='fast')), ('pick', PairwisePicker((0, 0))), ('symmetrize', Symmetrize()) # ('rbf', RBFize(gamma=1, scale_by_median=True)), # ('project', ProjectPSD()) ]) # return the pairwise similarities between the bags (patients) sims = distEstModel.fit_transform(feats) # Great, we have the similarities and they're symmetric # Now RBFize them, but do the scale by median by hand rbf = RBFize(gamma=1, scale_by_median=False) simsMedian = np.median(sims[np.triu_indices_from(sims)]) # medianScaledSims = sims/simsMedian # rbfedSims = rbf.fit_transform(medianScaledSims) rbfedSims = rbf.fit_transform(sims) # Final step in building the kernel: project the rbf'ed similarities # onto a positive semi-definite space psd = ProjectPSD() kernel = psd.fit_transform(rbfedSims) elif div == 'MMD': # start by getting the median pairwise squared distance between subject, # used as a heuristic for choosing the bandwidth of the inner RBF kernel subset = np.vstack(feats) subset = subset[np.random.choice(subset.shape[0], min(2000, subset.shape[0]), replace=False)] subsetSquaredDists = euclidean_distances(subset, squared=True) featsMedianSquaredDist = np.median( subsetSquaredDists[np.triu_indices_from(subsetSquaredDists, k=numNeighbors)], overwrite_input=True) # now we need to determine gamma (scaling factor, inverse of sigma) # This was initially done in the library, but Kayhan believes there's # a multiplication instead of a division, so it's being done by hand firstGamma = 1 / featsMedianSquaredDist # calculate the mmds mmds, mmkDiagonals = mmd.rbf_mmd(feats, gammas=firstGamma, squared=True, ret_X_diag=True) # now let's turn the squared MMD distances into a kernel # symmetrize it sym = Symmetrize() mmds = sym.fit_transform(mmds) # get the median squared MMD distance mmdMedianSquaredDist = np.median(mmds[np.triu_indices_from( mmds, k=numNeighbors)]) kernel = np.exp(np.multiply(mmds, -1 / mmdMedianSquaredDist)) else: print("Error: divergence entered is not valid.") return -1 return kernel