Esempio n. 1
0
#static_code = FloatTensor(np.zeros((opt.n_classes ** 2, opt.code_dim))))

# ----------
#  Training
# ----------

for epoch in range(opt.n_epochs):
    i = 0
    for batch_gexs in data.get_unsupervised_batch():
        i += 1

        batch_size = batch_gexs.shape[0]

        # Adversarial ground truths
        valid = Variable(  # noisy labels
            FloatTensor(batch_size, 1).fill_(1.0) - FloatTensor(np.random.uniform(0.0, 0.05, (batch_size, 1))), requires_grad=False
        )
        fake = Variable(
            FloatTensor(batch_size, 1).fill_(0.0) + FloatTensor(np.random.uniform(0.0, 0.05, (batch_size, 1))), requires_grad=False
        )

        # Configure input
        #real_imgs = Variable(batch_gex.type(FloatTensor)).view(-1, 1024)
        real_gexs = FloatTensor(batch_gexs)
        #labels = to_categorical(labels.numpy(), num_columns=opt.n_classes)

        # -----------------
        #  Train Generator
        # -----------------

        optimizer_G.zero_grad()
Esempio n. 2
0
    test_split=0.0)  # train this unsupervised model on all data

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))
optimizer_info = torch.optim.Adam(itertools.chain(generator.parameters(),
                                                  discriminator.parameters()),
                                  lr=opt.lr,
                                  betas=(opt.b1, opt.b2))

# Static generator inputs for sampling
static_z = Variable(FloatTensor(np.zeros((opt.n_classes**2, opt.latent_dim))))
static_label = to_categorical(np.array(
    [num for _ in range(opt.n_classes) for num in range(opt.n_classes)]),
                              num_columns=opt.n_classes)
static_code = Variable(FloatTensor(np.zeros((opt.n_classes**2, opt.code_dim))))

# ----------
#  Training
# ----------

for epoch in range(opt.n_epochs):
    i = 0
    for batch_gexs in data.get_unsupervised_batch():
        i += 1

        batch_size = batch_gexs.shape[0]
Esempio n. 3
0
discriminator = Discriminator(categorical_size, train_config.code_dim,
                              train_dataset.n_genes)
discriminator_checkpoint = os.path.join(load_path, 'discriminator.pth')
discriminator.load_state_dict(torch.load(discriminator_checkpoint))

if cuda:
    #generator = generator.cuda().eval()
    discriminator = discriminator.cuda().eval()
    print('CUDA is here')
else:
    print('CPU using')

codes, hidden, clusters, tags, valids = [], [], [], [], []
for gex in tqdm(test_dataset.traverse_gexs()):
    with torch.no_grad():
        batch_gex = FloatTensor(gex)
        valid, cluster, pred_code = discriminator(batch_gex)
        #hi_z = discriminator.hidden(batch_gex)
    #valids.append(valid.detach().cpu().numpy())
    codes.append(pred_code.detach().cpu().numpy())
    #clusters.append(cluster.detach().cpu().numpy().argmax(1))
    #hidden.append(hi_z.detach().cpu().numpy())
codes = np.vstack(codes)
#valids = np.vstack(valids)
#hidden = np.vstack(hidden)
#clusters = np.hstack(clusters)

patient_idxs = test_dataset.gexs.loc[
    np.invert(test_dataset.gexs['sample_name'].isnull().values),
    ['sample_name']].values.reshape(-1)
#patient_idxs.shape
Esempio n. 4
0
#  Training
# ----------
summary = SummaryWriter('summary/s{0:02d}_c{1:02d}_{2}'.format(
    config.code_dim, categorical_size,
    os.path.split(config.gexs_csv)[-1].split('.')[0]))
for epoch in range(config.n_epochs):
    print('\repoch {0}'.format(epoch + 1), end='', flush=True)
    i = 0
    for batch_gexs in dataset.get_gexs_batch(config.batch_size):
        i += 1

        batch_size = batch_gexs.shape[0]

        # Adversarial ground truths
        valid = Variable(  # noisy labels
            FloatTensor(batch_size, 1).fill_(1.0) -
            FloatTensor(np.random.uniform(0.0, 0.05, (batch_size, 1))),
            requires_grad=False)
        fake = Variable(
            FloatTensor(batch_size, 1).fill_(0.0) +
            FloatTensor(np.random.uniform(0.0, 0.05, (batch_size, 1))),
            requires_grad=False)

        # Configure input
        real_gexs = FloatTensor(batch_gexs)

        # -----------------
        #  Train Generator
        # -----------------

        optimizer_G.zero_grad()