def simulate(self, cterms=['intrinsic', 'environmental', 'interactions'], interactions_size=None): # train gp with requested terms model = Model1(self.Y, self.X, norm='quantile', oos_predictions=0., cov_terms=cterms, kin_from=self.kin_from) model.reset_params() model.train_gp(grid_size=10) # simulate from gp after removing interactions term k = model.covar_terms['intrinsic'].K() + \ model.covar_terms['environmental'].K() + \ model.covar_terms['noise'].K() k *= covar_rescaling_factor_efficient(k) # manually add a cross-talk term if interactions_size is not None: assert 0. < interactions_size < 1., 'interactions size must be between 0 and 1 ' tmp = model.covar_terms['interactions'].K() tmp *= covar_rescaling_factor_efficient(tmp) tmp *= (interactions_size / (1. - interactions_size)) k += tmp res = np.random.multivariate_normal([0.]*k.shape[0], k) return res
def simulate_env(self): kin = self.exp.dot(self.exp.transpose()) kin *= covar_rescaling_factor_efficient(kin) tmp = ZKZCov(self.X, kin) tmp.length = self.l2 k = tmp.K() k *= covar_rescaling_factor_efficient(k) self.covar += k
def reset_params(self): if self.init_from_previous: self.reset_from_previous() return # import pdb; pdb.set_trace() self.intrinsic_cov.scale = 1. self.interactions_cov.scale = 1. self.environmental_cov.scale = 1. self.noise_cov.scale = 1. used_covar = self.covar_terms.values() n = len(used_covar) for cov in used_covar: k = covar_rescaling_factor_efficient(cov.K()) / n # TODO: not so clean, would be good to have setInterParams ? new_params = np.log(cov.getInterParams() * k) cov.setParams(new_params) if self.use_scale_down: for term in self.scale_down: self.covar_terms[term].scale *= 1e-6
def simulate_local(self): tmp = SQExpCov(self.X) tmp.length = self.l2 k = tmp.K() k *= covar_rescaling_factor_efficient(k) self.covar += k
def build_Kinship(self): if self.kin_from is None: assert self.types is not None, 'Provide a vector of cell types or a cell state matrix to compute kinship' self.Kin = utils.build_cell_type_kinship(self.types) else: self.kin_from -= self.kin_from.mean(axis=0) #self.kin_from /= self.kin_from.std(axis=0) self.Kin = self.kin_from.dot(self.kin_from.transpose()) self.Kin *= covar_rescaling_factor_efficient(self.Kin)
def simulate_crowding(self): k1 = np.ones([self.n_samples, self.n_samples]) # kin *= covar_rescaling_factor_efficient(kin) tmp = ZKZCov(self.X, k1) tmp.length = self.l2 k = tmp.K() k *= covar_rescaling_factor_efficient(k) self.covar += k
def reset_from_previous(self): for cov_key in self.covar_terms.keys(): if cov_key in self.init_covs.keys(): self.covar_terms[cov_key].setParams(self.init_covs[cov_key]) else: self.covar_terms[cov_key].scale = 1. k = covar_rescaling_factor_efficient( self.covar_terms[cov_key].K()) self.covar_terms[cov_key].scale = 0.5 * k if self.use_scale_down: for term in self.scale_down: self.covar_terms[term].scale *= 1e-10 self.covar = SumCov(*self.covar_terms.values()) self.build_gp()
def simulate_intrinsic(self): kin = self.exp.dot(self.exp.transpose()) kin *= covar_rescaling_factor_efficient(kin) self.covar += kin
def write_results(trained_model, output_dir, file_prefix, n_types, cell_types, cell_types_names, by_effective_type=True): # extracting covariance terms from the trained model #################################################################### intrinsic_cov = trained_model['intrinsic_cov'] noise_covs = trained_model['noise_covs'] local_noise_cov = trained_model['local_noise_cov'] env_covs = trained_model['env_covs'] # putting parameters together #################################################################### if by_effective_type: parameters = np.zeros([1, int(2 * n_types**2.0 + n_types + 2 + 1)]) else: parameters = np.zeros([1, int(2 * n_types + n_types + 2 + 1)]) try: parameters[0,0] = 1./covar_rescaling_factor_efficient(intrinsic_cov.K()) except: parameters[0,0] = np.nan try: parameters[0,1] = 1./covar_rescaling_factor_efficient(local_noise_cov.K()) except: parameters[0,1] = np.nan parameters[0,2] = local_noise_cov.length count = 3 for type1 in np.unique(cell_types): cell_filter = (cell_types == type1) assert np.all(noise_covs[type1].K()[:, ~cell_filter][~cell_filter,:] == 0), 'problem cell filter' K_tmp = noise_covs[type1].K()[:, cell_filter][cell_filter,:] try: parameters[0,count] = 1./covar_rescaling_factor_efficient(K_tmp) except: parameters[0,count] = np.nan count += 1 for type1 in np.unique(cell_types): cell_filter = (cell_types == type1) if by_effective_type: for type2 in np.unique(cell_types): K_tmp = env_covs[type1][type2].K()[:, cell_filter][cell_filter,:] assert np.all(env_covs[type1][type2].K()[:, ~cell_filter][~cell_filter,:] ==0), 'problem cell filter' try: parameters[0,count] = 1./covar_rescaling_factor_efficient(K_tmp) except: parameters[0,count] = np.nan count += 1 else: K_tmp = env_covs[type1].K()[:, cell_filter][cell_filter,:] assert np.all(env_covs[type1].K()[:, ~cell_filter][~cell_filter,:] ==0), 'problem cell filter' try: parameters[0,count] = 1./covar_rescaling_factor_efficient(K_tmp) except: parameters[0,count] = np.nan count += 1 for type1 in np.unique(cell_types): if by_effective_type: for type2 in np.unique(cell_types): parameters[0,count] = env_covs[type1][type2].length count += 1 else: parameters[0,count] = env_covs[type1].length count += 1 # Putting header together #################################################################### if by_effective_type: result_header_array = [None for i in range(int(2 * n_types**2.0 + n_types + 2 + 1))] else: result_header_array = [None for i in range(int(2 * n_types + n_types + 2 + 1))] result_header_array[0] = 'inrtinsic' result_header_array[1] = 'local_noise_scale' result_header_array[2] = 'local_noise_length' count = 3 for type in np.unique(cell_types): result_header_array[count] = 'noise_'+cell_types_names[type] count += 1 for type1 in np.unique(cell_types): if by_effective_type: for type2 in np.unique(cell_types): result_header_array[count] = 'env_'+cell_types_names[type1]+'_'+cell_types_names[type2] count += 1 else: result_header_array[count] = 'env_'+cell_types_names[type1] count += 1 for type1 in np.unique(cell_types): if by_effective_type: for type2 in np.unique(cell_types): result_header_array[count] = 'env_length_'+cell_types_names[type1]+'_'+cell_types_names[type2] count += 1 else: result_header_array[count] = 'env_length_'+cell_types_names[type1] count += 1 # TODO concatenate list into single string result_header = ' '.join(result_header_array) # Write file #################################################################### output_file = output_dir + '/' + file_prefix with open(output_file, 'w') as f: np.savetxt(f, parameters, delimiter=' ', header=result_header, fmt='%s', comments='')