def train(net, loader, criterion, optimizer, verbose=False):
    net.train()
    running_loss = 0
    running_accuracy = 0

    for i, (X, y) in enumerate(loader):
        if args.cuda:
            X, y = X.cuda(), y.cuda()
        X, y = Variable(X), Variable(y)

        output = net(X)
        loss = criterion(output, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        #pdb.set_trace()
        running_loss += loss.item()
        acc = utils.get_multilabel_accuracy(output, y)
        running_accuracy += acc
        if i % 400 == 0 and verbose:
            pct = float(i + 1) / len(loader)
            curr_loss = running_loss / (i + 1)
            curr_acc = running_accuracy / (i + 1)
            print('Complete: {:.2f}, Loss: {:.2f}, Accuracy: {:.4f}'.format(
                pct * 100, curr_loss, curr_acc))
    return running_loss / len(loader), running_accuracy / len(loader)
Beispiel #2
0
def validate(net, loader, criterion):
    net.eval()
    running_loss = 0
    running_accuracy = 0
    targets = torch.FloatTensor(0, 17)  # For fscore calculation
    predictions = torch.FloatTensor(0, 17)
    for i, (X, y) in enumerate(tqdm(loader, desc='Val')):
        if args.cuda:
            X, y = X.cuda(), y.cuda()
        X, y = Variable(X, volatile=True), Variable(y)
        output = net(X)
        loss = criterion(output, y)
        acc = utils.get_multilabel_accuracy(output, y)
        targets = torch.cat((targets, y.cpu().data), 0)
        predictions = torch.cat((predictions, output.cpu().data), 0)
        running_loss += loss.item()
        running_accuracy += acc
    fscore = fbeta_score(targets.numpy(),
                         predictions.numpy() > 0.23,
                         beta=2,
                         average='samples')
    return running_loss / len(loader), running_accuracy / len(loader), fscore