Ejemplo n.º 1
0
        if glove_vocab.getIndex(word):
            emb[vocab.getIndex(word)] = glove_emb[glove_vocab.getIndex(word)]
    torch.save(emb, emb_file)
# plug these into embedding matrix inside model
model.emb.weight.data.copy_(emb)

# +
###For changing embeddings
# -





# +
model.to(device), criterion.to(device)
if args.optim == 'adam':
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()), lr=args.lr, weight_decay=args.wd)
elif args.optim == 'adagrad':
    optimizer = optim.Adagrad(filter(lambda p: p.requires_grad,
                                     model.parameters()), lr=args.lr, weight_decay=args.wd)
elif args.optim == 'sgd':
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                 model.parameters()), lr=args.lr, weight_decay=args.wd)

elif args.optim == 'rmsprop':
    optimizer = optim.RMSprop(filter(lambda p: p.requires_grad,
                                  model.parameters()), lr=args.lr, weight_decay=args.wd)
# -
Ejemplo n.º 2
0
        cfg.mem_dim(),
        cfg.hidden_dim(),
        cfg.num_classes(),
        cfg.sparse(),
        cfg.freeze_embed())

    criterion = nn.KLDivLoss()

    cfg.logger.info("model:\n" + str(model))

    emb = get_embd(cfg, D.vocab)

    # plug these into embedding matrix inside model
    model.emb.weight.data.copy_(emb)

    model.to(cfg.device()), criterion.to(cfg.device())

    metrics = Metrics(cfg.num_classes())

    # create trainer object for training and testing


    trainer = Trainer(cfg, model, criterion, cfg.optimizer(model), cfg.device())

    best = -float('inf')
    for epoch in range(cfg.num_epochs()):
        train_loss = trainer.train(train_dataset)

        train_loss, train_pred = trainer.test(train_dataset)
        test_loss, test_pred = trainer.test(test_dataset)