예제 #1
0
def validate(val_loader, model, criterion, epoch, key, evaluator):
    '''
        Run evaluation
    '''

    # Switch to evaluate mode
    model.eval()

    for i, (img, gt) in enumerate(val_loader):

        # Process the network inputs and outputs
        img = utils.normalize(img, torch.Tensor([0.295, 0.204, 0.197]), torch.Tensor([0.221, 0.188, 0.182]))
        gt_temp = gt * 255
        label = utils.generateLabel4CE(gt_temp, key)
        oneHotGT = utils.generateOneHot(gt_temp, key)

        img, label = Variable(img), Variable(label)

        if use_gpu:
            img = img.cuda()
            label = label.cuda()

        # Compute output
        seg = model(img)
        loss = model.dice_loss(seg, label)

        print('[%d/%d][%d/%d] Loss: %.4f'
              % (epoch, args.epochs-1, i, len(val_loader)-1, loss.mean().data))

        utils.displaySamples(img, seg, gt, use_gpu, key, args.saveTest, epoch,
                             i, args.save_dir)
        evaluator.addBatch(seg, oneHotGT)
예제 #2
0
def train(train_loader, model, criterion, optimizer, scheduler, epoch, key):
    '''
        Run one training epoch
    '''

    # Switch to train mode
    model.train()
    epoch_loss = 0
    for i, (img, gt) in enumerate(train_loader):

        # For TenCrop Data Augmentation
        img = img.view(-1,3,args.resizedImageSize,args.resizedImageSize)
        img = utils.normalize(img, torch.Tensor([0.295, 0.204, 0.197]), torch.Tensor([0.221, 0.188, 0.182]))
        gt = gt.view(-1,3,args.resizedImageSize,args.resizedImageSize)

        # Process the network inputs and outputs
        gt_temp = gt * 255
        label = utils.generateLabel4CE(gt_temp, key)
        oneHotGT = utils.generateOneHot(gt_temp, key)

        img, label = Variable(img), Variable(label)

        if use_gpu:
            img = img.cuda()
            label = label.cuda()

        # Compute output
        seg = model(img)
        loss = model.dice_loss(seg, label)

        # Compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        scheduler.step(loss.mean().item())
        epoch_loss += loss.mean().item()

        print('[%d/%d][%d/%d] Loss: %.4f'
              % (epoch, args.epochs-1, i, len(train_loader)-1, loss.mean().item()))
        utils.displaySamples(img, seg, gt, use_gpu, key, False, epoch,
                             i, args.save_dir)

    writer.add_scalar('Train Epoch Loss', epoch_loss / (i+1), epoch)
예제 #3
0
def validate(val_loader, model, criterion, epoch, key, evaluator):
    '''
        Run evaluation
    '''

    # Switch to evaluate mode
    model.eval()

    for i, (img, seg_gt, class_gt) in enumerate(val_loader):

        # Process the network inputs and outputs
        img = utils.normalize(img, torch.Tensor([0.295, 0.204, 0.197]),
                              torch.Tensor([0.221, 0.188, 0.182]))
        gt_temp = seg_gt * 255
        seg_label = utils.generateLabel4CE(gt_temp, key)
        oneHotGT = utils.generateOneHot(gt_temp, key)

        img, seg_label, class_label = Variable(img), Variable(
            seg_label), Variable(class_gt).float()

        if use_gpu:
            img = img.cuda()
            seg_label = seg_label.cuda()
            class_label = class_label.cuda()

        # Compute output
        classified, segmented = model(img)
        seg_loss = model.dice_loss(segmented, seg_label)
        class_loss = criterion(classified, class_label)
        total_loss = seg_loss + class_loss

        print(
            '[{:d}/{:d}][{:d}/{:d}] Total Loss: {:.4f}, Segmentation Loss: {:.4f}, Classification Loss: {:.4f}'
            .format(epoch, args.epochs - 1, i,
                    len(val_loader) - 1,
                    total_loss.mean().data,
                    seg_loss.mean().data,
                    class_loss.mean().data))

        utils.displaySamples(img, segmented, seg_gt, use_gpu, key,
                             args.saveTest, epoch, i, args.save_dir)
        evaluator.addBatch(segmented, oneHotGT)