def test(model, dataloader, params): val_data = tqdm(dataloader.data_iterator(data_type='test', batch_size=params.batch_size), total=(dataloader.size()[0] // params.batch_size)) metrics = Metrics() loss_avg = RunningAverage() with torch.no_grad(): for data, labels in val_data: model.eval() data = torch.tensor(data, dtype=torch.long).to(params.device) labels = torch.tensor(labels, dtype=torch.long).to(params.device) batch_masks = data != 0 loss, logits = model(data, attention_mask=batch_masks, labels=labels) predicted = logits.max(2)[1] metrics.update(batch_pred=predicted.cpu().numpy(), batch_true=labels.cpu().numpy(), batch_mask=batch_masks.cpu().numpy()) loss_avg.update(torch.mean(loss).item()) val_data.set_postfix(type='VAL', loss='{:05.3f}'.format(loss_avg())) metrics.loss = loss_avg() return metrics
def validate(model, val_set, params): val_data = tqdm(DataLoader(val_set, batch_size=params.batch_size, collate_fn=KeyphraseData.collate_fn), total=(len(val_set) // params.batch_size)) metrics = Metrics() loss_avg = RunningAverage() with torch.no_grad(): model.eval() for data, labels, mask in val_data: data = data.to(params.device) labels = labels.to(params.device) mask = mask.to(params.device) loss, logits = model(data, attention_mask=mask, labels=labels) predicted = logits.max(2)[1] metrics.update(batch_pred=predicted.cpu().numpy(), batch_true=labels.cpu().numpy(), batch_mask=mask.cpu().numpy()) loss_avg.update(torch.mean(loss).item()) val_data.set_postfix(type='VAL', loss='{:05.3f}'.format(loss_avg())) metrics.loss = loss_avg() return metrics