eval_batch_size = 64 train_data = batchify(corpus.train, args.batch_size) val_data = batchify(corpus.valid, eval_batch_size) test_data = batchify(corpus.test, eval_batch_size) print('INFO: Data loaded.') ############################################################################### # Build the model ############################################################################### ntokens = len(corpus.dictionary) model = PRPN(ntokens, args.emsize, args.nhid, args.nlayers, args.nslots, args.nlookback, args.resolution, args.dropout, args.idropout, args.rdropout, args.tied, args.hard, args.res) if args.cuda: model.cuda() # criterion = nn.CrossEntropyLoss() def criterion(input, targets, targets_mask): targets_mask = targets_mask.view(-1) input = input.view(-1, ntokens) input = F.log_softmax(input, dim=-1) loss = torch.gather(input, 1, targets[:, None]).view(-1) loss = (-loss * targets_mask).sum() / targets_mask.sum() return loss
return data_batched eval_batch_size = 64 train_data = batchify(corpus.train, args.batch_size) val_data = batchify(corpus.valid, eval_batch_size) test_data = batchify(corpus.test, eval_batch_size) ############################################################################### # Build the model ############################################################################### ntokens = len(corpus.dictionary) model = PRPN(ntokens, args.emsize, args.nhid, args.nlayers, args.nslots, args.nlookback, args.resolution, args.dropout, args.idropout, args.rdropout, args.tied, args.hard, args.res) if args.cuda: model.cuda() # criterion = nn.CrossEntropyLoss() def criterion(input, targets, targets_mask): targets_mask = targets_mask.view(-1) input = input.view(-1, ntokens) input = F.log_softmax(input, dim=-1) loss = torch.gather(input, 1, targets[:, None]).view(-1) loss = (-loss * targets_mask).sum() / targets_mask.sum() return loss