Beispiel #1
0
        from qelos_core.scripts.convai.preproc import run as run_preproc
        run_preproc()
    else:
        tt = q.ticktock("script")
        tt.tick("loading data")
        train_dataset, valid_dataset = load_datasets()
        tt.tock("loaded data")
        print(
            "{} unique words, {} training examples, {} valid examples".format(
                len(train_dataset.D), len(train_dataset), len(valid_dataset)))
        trainloader = q.dataload(train_dataset,
                                 shuffle=True,
                                 batch_size=batsize)
        validloader = q.dataload(valid_dataset,
                                 shuffle=True,
                                 batch_size=batsize)
        # test
        if test:
            testexample = train_dataset[10]
            trainloader_iter = iter(trainloader)
            tt.tick("getting 1000 batches")
            for i in range(1000):
                batch = next(iter(trainloader))
            tt.tock("got 1000 batches")

        print("done")


if __name__ == "__main__":
    q.argprun(run)
    # region train
    optimizer = torch.optim.Adam(q.params_of(model), lr=lr)
    trainer = q.trainer(model).on(data).loss(torch.nn.CrossEntropyLoss(), q.Accuracy())\
        .optimizer(optimizer).hook(q.ClipGradNorm(5.)).device(device)
    validator = q.tester(model).on(valid_data).loss(
        q.Accuracy()).device(device)
    q.train(trainer, validator).run(epochs=epochs)
    # endregion

    # region check attention    #TODO
    # feed a batch
    inpd = torch.tensor(sm.matrix[400:410])
    outd, att = model(inpd, with_att=True)
    outd = torch.max(outd, 1)[1].cpu().detach().numpy()
    inpd = inpd.cpu().detach().numpy()
    att = att.cpu().detach().numpy()
    rD = {v: k for k, v in sm.D.items()}
    roD = {v: k for k, v in D.items()}
    for i in range(len(att)):
        inpdi = "   ".join([rD[x] for x in inpd[i]])
        outdi = roD[outd[i]]
        print("input:     {}\nprediction: {}\nattention: {}".format(
            inpdi, outdi, " ".join(["{:.1f}".format(x) for x in att[i]])))

    # endregion


if __name__ == '__main__':
    q.argprun(run_classify)
from qelos_core.scripts.query.wikisql_clean import run_seq2seq_oracle_df as runf
import qelos_core as q

if __name__ == "__main__":
    q.argprun(runf)
    SeqGAN = SeqGAN_Base if usebase else SeqGAN_DCL

    disc_model = SeqGAN(discriminator, generator, gan_mode=q.gan.GAN.DISC_TRAIN, accumulate=not noaccumulate)
    gen_model = SeqGAN(discriminator, generator, gan_mode=q.gan.GAN.GEN_TRAIN, accumulate=not noaccumulate)

    disc_optim = torch.optim.Adam(q.params_of(discriminator), lr=lr)
    gen_optim = torch.optim.Adam(q.params_of(generator), lr=lr)

    disc_trainer = q.trainer(disc_model).on(disc_data).optimizer(disc_optim).loss(q.no_losses(2))
    gen_trainer = q.trainer(gen_model).on(gen_data).optimizer(gen_optim).loss(q.no_losses(2))

    gan_trainer = q.gan.GANTrainer(disc_trainer, gen_trainer)

    gan_trainer.run(epochs, disciters=5, geniters=1, burnin=500)

    # print some predictions:
    with torch.no_grad():
        rvocab = {v: k for k, v in vocab.items()}
        q.batch_reset(generator)
        eval_z = torch.randn(50, z_dim)
        eval_y, _ = generator(eval_z)
        for i in range(len(eval_y)):
            prow = "".join([rvocab[mij] for mij in eval_y[i].numpy()])
            print(prow)

    print("done")


if __name__ == "__main__":
    q.argprun(run_cond_toy)
Beispiel #5
0
            return self.orig[index][0]

        def __len__(self):
            return len(self.orig)

    cifar = dset.CIFAR10(root='../datasets/cifar/',
                         download=True,
                         train=True,
                         transform=transforms.Compose([
                             transforms.Scale(32),
                             transforms.ToTensor(),
                             transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                                  std=[0.5, 0.5, 0.5])
                         ]))
    device = torch.device("cpu") if not cuda else torch.device("cuda", gpu)
    print(device, cuda)
    cifar = IgnoreLabelDataset(cifar)
    cifar_loader = q.dataload(cifar, batch_size=batsize)
    scorer = q.gan.FIDandIS(device=device)
    print(scorer.inception.training)
    scorer.set_real_stats_with(cifar_loader)

    print("Calculating FID and IS ... ")

    scores = scorer.get_scores(cifar_loader)
    print(scores)


if __name__ == '__main__':
    q.argprun(tst_inception_cifar10)
            self.fc2 = nn.Linear(50, 10)

        def forward(self, x):
            x = F.relu(F.max_pool2d(self.conv1(x), 2))
            x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
            x = x.view(-1, 320)
            x = F.relu(self.fc1(x))
            x = F.dropout(x, training=self.training)
            x = self.fc2(x)
            return F.log_softmax(x, dim=1)

    model = Net()

    optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)

    trainer = q.trainer(model).on(train_loader)\
        .loss(torch.nn.NLLLoss(), q.Accuracy())\
        .optimizer(optim).cuda(cuda)
    validator = q.tester(model).on(test_loader)\
        .loss(torch.nn.NLLLoss(), q.Accuracy())\
        .cuda(cuda)

    logger.loglosses(trainer, "train.losses")
    logger.loglosses(validator, "valid.losses")

    q.train(trainer, validator).run(epochs)


if __name__ == "__main__":
    q.argprun(run_sound)
            core_out = self.core(core_inp)
            out = self.out(core_out)
            return out

    decoder_cell = DecoderCell(decoder_emb, decoder_lstm, decoder_outlin)
    decoder = q.TFDecoder(decoder_cell)
    # endregion

    likelihood = Likelihood()

    vae = SeqVAE(encoder, decoder, likelihood)

    x = torch.randint(0, vocsize, (batsize, seqlen), dtype=torch.int64)
    ys = vae(x)

    optim = torch.optim.Adam(q.params_of(vae), lr=lr)

    x = torch.randint(0, vocsize, (batsize * 100, seqlen), dtype=torch.int64)
    dataloader = q.dataload(x, batch_size=batsize, shuffle=True)

    trainer = q.trainer(vae).on(dataloader).optimizer(optim).loss(4).epochs(
        epochs)
    trainer.run()

    print("done \n\n")


if __name__ == "__main__":
    # q.argprun(run_seqvae_toy)
    q.argprun(run_normal_seqvae_toy)