def ICDoneKernelOneTCGA(tcga, kernel, kernel_args, rank): #K = Kinterface(data=np.array(cnv), kernel=rfb_kernel, kernel_args={"sigma": 110}) K = Kinterface(data=np.array(tcga), kernel=kernel, kernel_args=kernel_args) model = ICD(rank=rank) model.fit(K) G_icd = model.G #inxs = model.active_set_ print("G shape:", G_icd.shape, "Error:", np.linalg.norm(K[:, :] - G_icd.dot(G_icd.T))) return model
def get_kernel_matrix(dframe, n_dim=15): """ This returns a Kernel Transformation Matrix $\Theta$ It uses kernel approximation offered by the MKlaren package For the sake of completeness (and for my peace of mind, I use the best possible approx.) :param dframe: input data as a pandas dataframe. :param n_dim: Number of dimensions for the kernel matrix (default=15) :return: $\Theta$ matrix """ ker = Kinterface(data=dframe.values, kernel=linear_kernel) model = ICD(rank=n_dim) model.fit(ker) g_nystrom = model.G return g_nystrom
def testPoly(self): """ Test expected reconstruction properties of the ICD. """ for d in range(1, 6): K = poly_kernel(self.X, self.X, degree=d) model = ICD(rank=self.n) model.fit(K) errors = np.zeros((self.n, )) for i in range(self.n): Ki = model.G[:, :i + 1].dot(model.G[:, :i + 1].T) errors[i] = np.linalg.norm(K - Ki) self.assertTrue(np.all(errors[:-1] > errors[1:])) self.assertAlmostEqual(errors[-1], 0, delta=3)
def testPolySum(self): """ Test expected reconstruction properties of the ICD. Kernels are iteratively summed. """ K = np.zeros((self.n, self.n)) for d in range(1, 6): K += Kinterface(data=self.X, kernel=poly_kernel, kernel_args={"degree": d}, row_normalize=True)[:, :] model = ICD(rank=self.n) model.fit(K) errors = np.zeros((self.n, )) for i in range(self.n): Ki = model.G[:, :i+1].dot(model.G[:, :i+1].T) errors[i] = np.linalg.norm(K-Ki) self.assertTrue(np.all(errors[:-1] > errors[1:])) self.assertAlmostEqual(errors[-1], 0, delta=3)