示例#1
0
def run_a_train_epoch(args, model, data_loader, criterion, optimizer):
    model.train()
    train_meter = Meter(args['train_mean'], args['train_std'])
    epoch_loss = torch.zeros(len(args['tasks']))
    for _, batch_data in enumerate(data_loader):
        _, bg, labels, masks = batch_data
        labels, masks = labels.to(args['device']), masks.to(args['device'])
        prediction = regress(args, model, bg)
        # Normalize the labels so that the scale of labels will be similar
        loss = criterion(prediction,
                         (labels - args['train_mean']) / args['train_std'])
        # Mask non-existing labels
        loss = (loss * (masks != 0).float()).sum(0)
        # Update epoch loss
        epoch_loss = epoch_loss + loss.detach().cpu().data
        # Average the loss over batch
        loss = loss.sum() / bg.batch_size
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(prediction, labels, masks)
    epoch_loss = epoch_loss / len(data_loader.dataset)
    epoch_loss = epoch_loss.cpu().detach().tolist()

    return epoch_loss, train_meter.pearson_r2(), train_meter.mae()
示例#2
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter(args['train_mean'], args['train_std'])
    with torch.no_grad():
        for _, batch_data in enumerate(data_loader):
            _, bg, labels, masks = batch_data
            prediction = regress(args, model, bg)
            eval_meter.update(prediction, labels, masks)

    return eval_meter.pearson_r2(), eval_meter.mae()