def test_glmpca_pre_initialized(self): f0 = np.random.randn(5, 2) / 10 l0 = np.random.randn(10, 2) / 10 g1 = glmpca.glmpca(self.Y, 2, fam="poi", init={ "factors": f0, "loadings": l0 })
def run(self): Y = self.data.X.todense().T res = glmpca(Y, self.n_latent, fam=self.likelihood) # Normalized matrix norm = np.dot(res['factors'], res['loadings'].T) self.data.obsm['X_norm'] = norm self.data.obsm['X_emb'] = res['factors'] self.dump_to_h5ad(f"glmpca_{self.likelihood}")
def run_glmpca(counts, fam, theta=100, penalty=1, optimize_nb_theta=True, maxIter=1000, eps=0.0001, n_PCs=50, seed=42, dataset_label=''): '''Wrapper around GLM PCA by Will Townes: applies GLM PCA with given settings and saves results as pickle''' np.random.seed(seed) ctl = {"maxIter": maxIter, "eps": eps, "optimizeTheta": optimize_nb_theta} if maxIter == 1000 and eps == 0.0001: ctl_str = '' else: ctl_str = '_maxIter%u_eps%s' % (maxIter, eps) starttime = str(datetime.now()) res = glmpca.glmpca(counts.T, n_PCs, fam=fam, nb_theta=theta, verbose=True, penalty=penalty, ctl=ctl) endtime = str(datetime.now()) res['starttime'] = starttime res['endtime'] = endtime _ = res.pop('glmpca_family') if fam == 'poi': path = 'glmpca_results/%sglmpca-py_%s_penalty%u%s.pickle' % ( dataset_label, fam, penalty, ctl_str) elif optimize_nb_theta: path = 'glmpca_results/%sglmpca-py_%s_penalty%u%s.pickle' % ( dataset_label, fam, penalty, ctl_str) else: path = 'glmpca_results/%sglmpca-py_%s_fixedTheta%u_penalty%u%s.pickle' % ( dataset_label, fam, theta, penalty, ctl_str) print('Saving at', path) with open(path, 'wb') as f: pickle.dump(res, f)
def test_glmpca_fixed_overdispersion(self): fixed_theta = 123 g_nb= glmpca.glmpca(self.Y,2,fam="nb", nb_theta = fixed_theta, ctl={"maxIter":1000, "eps":1e-4, "optimizeTheta":False}) self.assertEqual(fixed_theta,g_nb['glmpca_family'].nb_theta)
def test_glmpca_extra_args(self): g1= glmpca.glmpca(self.Y,2,fam="poi",verbose=True,penalty=10)
def test_glmpca_covariates(self): X = np.array(range(1,6)) Z = np.array(range(1,11)) g1= glmpca.glmpca(self.Y,2,fam="poi",X=X[:,None],Z=Z[:,None])
def test_glmpca_dims_L1(self): g1 = glmpca.glmpca(self.Y,1,fam="poi")
def test_glmpca_bern_likelihood(self): g1 = glmpca.glmpca(self.Ybin,2,fam="bern",penalty=10)
def test_glmpca_mult_likelihood(self): g1 = glmpca.glmpca(self.Y,2,fam="mult",sz=np.array(range(1,6)))
def test_glmpca_nb_likelihood(self): g1 = glmpca.glmpca(self.Y,2,fam="nb")
def setUp(self): self.Y = np.random.negative_binomial(4,.8,size=(10,5)) self.Ybin = self.Y.copy() self.Ybin[self.Ybin>0] = 1 self.g1 = glmpca.glmpca(self.Y,2,fam="poi")
def run_glmpca(counts, n_latent=10, likelihood='poi'): glm_res = glmpca(counts, n_latent, fam=likelihood) glcpca_norm_counts = np.dot(glm_res['factors'], glm_res['loadings'].T) return glcpca_norm_counts