# for dt_str, sample_idx in zip(['train', 'test'], [train_idx, test_idx]): sample_idx = np.intersect1d(multi_dt.get_comb_idx(omic_combn), sample_idx) dl = DataLoader( dataset=Subset(multi_dt, sample_idx), batch_size=128, collate_fn=lambda x: collate_wrapper(x, omic_combn), shuffle=False) latent_test = [] for i, x in enumerate(dl): x = [[ x_i.to(device) if x_i is not None else None for x_i in y ] for y in x] latent_test += [model.get_latent(x, elbo_bool=omic_combn)] res = np.concatenate(latent_test) np.savetxt(output_dir + file_sub + "_".join([str(x) for x in omic_combn]) + "_" + dt_str + "_latent.csv", res, delimiter=",") np.savetxt(output_dir + file_sub + "_".join([str(x) for x in omic_combn]) + "_" + dt_str + "_barcode.csv", barcode[sample_idx], fmt="%s") plt.close() # === save feature for i, x in enumerate(model.get_beta()):
loss = annealing_factor * latent_loss + recon_loss optimizer.zero_grad() loss.backward() optimizer.step() latent_loss_all += [latent_loss.item()] recon_loss_all += [recon_loss.item()] history['loss'] += [ np.array(latent_loss_all) + np.array(recon_loss_all) ] if np.isnan(history['loss'][-1].mean()): raise ValueError # print([x.mean() / 128 for x in history['loss']]) with torch.no_grad(): for test_elbo_comb in [[True, False], [False, True], [True, True]]: for dt_str, dl in zip(['train'], [train_loader, test_loader]): topic_prop_test = [] for i, x in enumerate(dl): topic_prop_test += [ model.get_latent([y.to(device) for y in x], elbo_bool=test_elbo_comb) ] res = np.concatenate(topic_prop_test) np.savetxt(os.path.join( output_dir, file_sub + "_".join([str(x) for x in test_elbo_comb]) + \ "_" + dt_str + "_latent.csv"), res, delimiter=",")