Ejemplo n.º 1
0
                    default=0.,
                    help='y controle parameter <alpha> of UnICORNN')

args = parser.parse_args()
print(args)

## set up data iterators and dictionary:
train_iterator, valid_iterator, test_iterator, text_field = utils.get_data(
    args.batch, args.emb_dim)

ninp = len(text_field.vocab)
nout = 1
pad_idx = text_field.vocab.stoi[text_field.pad_token]

model = network.UnICORNN(ninp, args.emb_dim, args.nhid, nout, pad_idx, args.dt,
                         args.alpha, args.nlayers, args.drop,
                         args.drop_emb).cuda()

## zero embedding for <unk_token> and <padding_token>:
utils.zero_words_in_embedding(model, args.emb_dim, text_field, pad_idx)

optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.BCEWithLogitsLoss()
print('done building')


def binary_accuracy(preds, y):
    rounded_preds = torch.round(torch.sigmoid(preds))
    correct = (rounded_preds == y).float()
    acc = correct.sum() / len(correct)
    return acc
Ejemplo n.º 2
0
                    default=13.0,
                    help='y controle parameter <alpha> of UnICORNN')

args = parser.parse_args()
print(args)

ninp = 96
nout = 10
bs_test = 1000

torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(12345)
np.random.seed(12345)

model = network.UnICORNN(ninp, args.nhid, nout, args.dt, args.alpha,
                         args.nlayers).cuda()
train_loader, valid_loader, test_loader = utils.get_data(args.batch, bs_test)

rands = torch.randn(1, 1000 - 32, 96)
rand_train = rands.repeat(args.batch, 1, 1).cuda()
rand_test = rands.repeat(bs_test, 1, 1).cuda()

## Define the loss
objective = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)


def test(data_loader):
    model.eval()
    correct = 0
    with torch.no_grad():