Esempio n. 1
0
def validate(val_loader,
             model,
             criterion,
             epoch,
             print_freq,
             save_history=True,
             ngpu=1):
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and ngpu > 0) else "cpu")
    batch_time = metrics.AverageMeter()
    losses = metrics.AverageMeter()
    top1 = metrics.AverageMeter()
    top5 = metrics.AverageMeter()
    history = {'epoch': [], 'loss': [], 'acc_topk1': [], 'acc_topk5': []}

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (input, target) in enumerate(val_loader):

            input = input.to(device)
            target = target.to(device)

            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure metrics.accuracy and record loss
            acc1, acc5 = metrics.accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(acc1[0], input.size(0))
            top5.update(acc5[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % print_freq == 0:
                history['epoch'].append(epoch)
                history['loss'].append(losses.avg)
                history['acc_topk1'].append(top1.avg)
                history['acc_topk5'].append(top5.avg)
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          i,
                          len(val_loader),
                          batch_time=batch_time,
                          loss=losses,
                          top1=top1,
                          top5=top5))

        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
                                                                    top5=top5))

    return history
Esempio n. 2
0
def val(model, loader, loss_criterion, label_names, cuda=True):
    model.eval()
    loader = tqdm(loader, ncols=100)
    losses = metrics.AverageMeter()
    cm = metrics.ConfusionMatrixMeter(label_names, cmap='Blues')

    with torch.no_grad():
        for i, batch in enumerate(loader, start=0):

            if cuda: batch = batch.to('cuda:0')

            batch_size = len(batch.batch.unique())

            outputs = model(batch)
            out_device = outputs.device
            gt = batch.y.to(out_device)

            loss = loss_criterion(outputs, gt)

            batch_size = len(torch.unique(batch.batch))

            losses.update(loss.item(), batch_size)

            cm.add(gt.cpu().data.numpy(), outputs.cpu().data.numpy())

            loader.set_postfix({"loss": loss.item()})
            torch.cuda.empty_cache()

    return losses.avg, cm
Esempio n. 3
0
def train(model,
          loader,
          loss_criteron,
          optimizer,
          label_names,
          batch_parts=0,
          cuda=True):
    model.train()
    numIt = len(loader)
    loader = tqdm(loader, ncols=100)
    acc = metrics.AverageMeter()
    losses = metrics.AverageMeter()
    cm = metrics.ConfusionMatrixMeter(label_names, cmap='Oranges')
    prev = -1
    optimizer.zero_grad()

    for i, batch in enumerate(loader, start=0):
        if cuda: batch = batch.to('cuda:0')

        outputs = model(batch)

        out_device = outputs.device
        gt = batch.y.to(out_device)
        loss = loss_criterion(outputs, gt)
        loss.backward()

        batch_size = len(torch.unique(batch.batch))

        losses.update(loss.item(), batch_size)
        cm.add(gt.cpu().data.numpy(), outputs.cpu().data.numpy())

        if (i + 1) % batch_parts == 0 or (i + 1) == numIt:
            if batch_parts > 1:
                accum = i - prev
                prev = i
                for p in model.parameters():
                    p.grad.div_(accum)
            optimizer.step()
            optimizer.zero_grad()

        torch.cuda.empty_cache()
    return losses.avg, cm
Esempio n. 4
0
def train(train_loader,
          model,
          criterion,
          optimizer,
          epoch,
          print_freq,
          save_history=True,
          ngpu=1):
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and ngpu > 0) else "cpu")
    batch_time = metrics.AverageMeter()
    data_time = metrics.AverageMeter()
    losses = metrics.AverageMeter()
    top1 = metrics.AverageMeter()
    top5 = metrics.AverageMeter()
    history = {'epoch': [], 'loss': [], 'acc_topk1': [], 'acc_topk5': []}

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        input = input.to(device)
        target = target.to(device)

        optimizer.zero_grad()

        # compute output
        output = model(input)
        loss = criterion(output, target)

        # measure metrics.accuracy and record loss
        acc1, acc5 = metrics.accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(acc1[0], input.size(0))
        top5.update(acc5[0], input.size(0))

        # compute gradient and do SGD step

        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            history['epoch'].append(epoch)
            history['loss'].append(losses.avg)
            history['acc_topk1'].append(top1.avg)
            history['acc_topk5'].append(top5.avg)
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))
    return history