def main(): from train_infogan import parse_args opt = parse_args() train_set, test_set = get_merged_common_dataset(opt) size = train_set.features.shape[1] discriminator = Discriminator(opt, size, train_set.binary) discriminator.load_state_dict(torch.load(opt.disc_path)) discriminator.eval() xgboost_test(discriminator.extract_features, opt)
with open(args.config, 'r') as f: y = yaml.load(f, Loader=yaml.SafeLoader) opt = argparse.Namespace(**y) # Loss functions adversarial_loss = torch.nn.MSELoss() categorical_loss = torch.nn.CrossEntropyLoss() continuous_loss = torch.nn.MSELoss() # Loss weights lambda_cat = 1 lambda_con = 0.1 # Initialize generator and discriminator generator = Generator(opt) discriminator = Discriminator(opt) if cuda: generator.cuda() discriminator.cuda() adversarial_loss.cuda() categorical_loss.cuda() continuous_loss.cuda() # Initialize weights generator.apply(weights_init_normal) discriminator.apply(weights_init_normal) # Configure data loader data = CuratedBreastCancerData(opt.batch_size, test_split=0.0) # train this unsupervised model on all data
categorical_size = train_config.n_classes if train_config.n_classes else train_dataset.n_studies load_checkpoints = 's{0:02d}_c{1:02d}_{2}'.format( train_config.code_dim, categorical_size, os.path.split(train_config.gexs_csv)[-1].split('.')[0]) load_path = os.path.join('checkpoints', load_checkpoints) # No need to load generator # generator = Generator( # train_config.latent_dim, # categorical_size, # train_config.code_dim, # train_dataset.n_genes # ) # generator_checkpoint = os.path.join(load_path, 'generator.pth') # generator.load_state_dict(torch.load(generator_checkpoint)) discriminator = Discriminator(categorical_size, train_config.code_dim, train_dataset.n_genes) discriminator_checkpoint = os.path.join(load_path, 'discriminator.pth') discriminator.load_state_dict(torch.load(discriminator_checkpoint)) if cuda: #generator = generator.cuda().eval() discriminator = discriminator.cuda().eval() print('CUDA is here') else: print('CPU using') codes, hidden, clusters, tags, valids = [], [], [], [], [] for gex in tqdm(test_dataset.traverse_gexs()): with torch.no_grad(): batch_gex = FloatTensor(gex) valid, cluster, pred_code = discriminator(batch_gex)
import matplotlib.pyplot as plt #from mpl_toolkits.axes_grid1 import ImageGrid #from sklearn.decomposition import PCA #from sklearn.linear_model import LogisticRegression #from scipy import stats # load config with open('config1.yml', 'r') as f: y = yaml.load(f, Loader=yaml.SafeLoader) opt = argparse.Namespace(**y) data = CuratedBreastCancerData(opt.batch_size, test_split=0.0) generator = Generator(opt) generator.load_state_dict(torch.load('generator.pth')) discriminator = Discriminator(opt) discriminator.load_state_dict(torch.load('discriminator.pth')) if cuda: generator = generator.cuda().eval() discriminator = discriminator.cuda().eval() print('CUDA is here') else: print('CPU using') codes, hidden, clusters, tags, valids = [], [], [], [], [] for gex in data.traverse_gex_study(): with torch.no_grad(): batch_gex = FloatTensor(gex) valid, cluster, pred_code = discriminator(batch_gex) hi_z = discriminator.hidden(batch_gex)
with open(args.config, 'r') as f: y = yaml.load(f, Loader=yaml.SafeLoader) opt = argparse.Namespace(**y) # Loss functions adversarial_loss = torch.nn.MSELoss() categorical_loss = torch.nn.CrossEntropyLoss() continuous_loss = torch.nn.MSELoss() # Loss weights lambda_cat = 1 lambda_con = 0.1 # Initialize generator and discriminator generator = Generator(opt) discriminator = Discriminator(opt) if cuda: generator.cuda() discriminator.cuda() adversarial_loss.cuda() categorical_loss.cuda() continuous_loss.cuda() # Initialize weights generator.apply(weights_init_normal) discriminator.apply(weights_init_normal) # Configure data loader data = CuratedBreastCancerData( opt.batch_size,
adversarial_loss = torch.nn.MSELoss() categorical_loss = torch.nn.CrossEntropyLoss() continuous_loss = torch.nn.MSELoss() # Loss weights lambda_cat = 1 lambda_con = 0.1 # Initialize generator and discriminator categorical_size = config.n_classes if config.n_classes else dataset.n_studies print('categorical_size:', categorical_size) #import ipdb; ipdb.set_trace() generator = Generator(config.latent_dim, categorical_size, config.code_dim, dataset.n_genes) discriminator = Discriminator(categorical_size, config.code_dim, dataset.n_genes) if cuda: generator.cuda() discriminator.cuda() adversarial_loss.cuda() categorical_loss.cuda() continuous_loss.cuda() # Initialize weights generator.apply(weights_init_normal) discriminator.apply(weights_init_normal) # Optimizers optimizer_G = torch.optim.Adam(generator.parameters(), lr=config.lr,
encoder = Encoder(c_size, z_size) def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) g = Generator(c_size + z_size) g.apply(weights_init) d = Discriminator() d.apply(weights_init) q = Q(c_size) q.apply(weights_init) dq = D_Q_commonlayer() dq.apply(weights_init) for i in [dq, d, q, g, encoder]: i.cuda() #i.apply(weights_init) trainer = Trainer(g, dq, d, q, encoder, batch_size, img_size, c_size, z_size, dataloader, version, c_loss_weight, RF_loss_weight, generator_loss_weight, epoch) trainer.train()