def GPDM_solver(self): train_data = [] print(self.gene_data) for key_gene in self.select_gene: train_data.append(self.gene_data[key_gene]) self.train_data = np.array(train_data).T output = GPLVM(self.train_data, self.latent_dim, init='PCA') output.optimize(messages=True, max_iters=20) return output
def plot_latent(self, *args, **kwargs): input_1, input_2 = GPLVM.plot_latent(*args, **kwargs) pb.plot(m.Z[:, input_1], m.Z[:, input_2], '^w')
def plot(self): GPLVM.plot(self) # passing Z without a small amout of jitter will induce the white kernel where we don;t want it! mu, var, upper, lower = SparseGPRegression.predict(self, self.Z + np.random.randn(*self.Z.shape) * 0.0001) pb.plot(mu[:, 0] , mu[:, 1], 'ko')
plt.tick_params(bottom=False, left=False, right=False, top=False) plt.gca().spines['right'].set_visible(False) plt.gca().spines['top'].set_visible(False) plt.gca().spines['left'].set_visible(False) plt.gca().spines['bottom'].set_visible(False) plt.savefig(f_name) if __name__ == "__main__": np.random.seed(3) image = np.asarray(Image.open('./data/shobon.png').convert('L')) # binarization a = np.where(image < 240) shobon = np.c_[a[1], np.flipud(a[0])] img_save(shobon, f_name='shobon.png') # high dimension dim = 100 w = np.random.normal(size=2*dim).reshape(2, dim) high = shobon @ w high = high + np.random.normal(size=high.shape[0]*high.shape[1]).reshape(high.shape[0], high.shape[1]) # latent gplvm = GPLVM(high, 2) latent = gplvm.X img_save(latent, f_name='latent.png')