Пример #1
0
def validate(val_loader, model, criterion):
    # list to store the losses and accuracies: [loss, pixel_acc, iou ]
    results = utils.AverageMeter(3)

    # switch to evaluate mode
    model.eval()

    for i, sample in enumerate(val_loader):
        input, weight_map, target = sample
        weight_map = weight_map.float().div(20)
        if weight_map.dim() == 4:
            weight_map = weight_map.squeeze(1)
        weight_map_var = weight_map.cuda()

        # for b in range(input.size(0)):
        #     utils.show_figures((input[b, 0, :, :].numpy(), target[b,0,:,:].numpy(), weight_map[b, :, :]))

        if torch.max(target) == 255:
            target = target / 255
        if target.dim() == 4:
            target = target.squeeze(1)

        target_var = target.cuda()

        size = opt.train['input_size']
        overlap = opt.train['val_overlap']
        output = utils.split_forward(model, input, size, overlap, opt.model['out_c'])

        log_prob_maps = F.log_softmax(output, dim=1)
        loss_map = criterion(log_prob_maps, target_var)
        loss_map *= weight_map_var
        loss_CE = loss_map.mean()

        if opt.train['alpha'] != 0:
            prob_maps = F.softmax(output, dim=1)

            target_labeled = torch.zeros(target.size()).long()
            for k in range(target.size(0)):
                target_labeled[k] = torch.from_numpy(measure.label(target[k].numpy() == 1))
                # utils.show_figures((target[k].numpy(), target[k].numpy()==1, target_labeled[k].numpy()))
            loss_var = criterion_var(prob_maps, target_labeled.cuda())
            loss = loss_CE + opt.train['alpha'] * loss_var
        else:
            loss = loss_CE

        # measure accuracy and record loss
        pred = np.argmax(log_prob_maps.data.cpu().numpy(), axis=1)
        metrics = utils.accuracy_pixel_level(pred, target.numpy())
        pixel_accu = metrics[0]
        iou = metrics[1]

        results.update([loss.item(), pixel_accu, iou])

        del output, target_var, log_prob_maps, loss

    logger.info('\t=> Val Avg:   Loss {r[0]:.4f}\tPixel_Acc {r[1]:.4f}'
                '\tIoU {r[2]:.4f}'.format(r=results.avg))

    return results.avg
Пример #2
0
def get_probmaps(input, model, opt):
    size = opt.test['patch_size']
    overlap = opt.test['overlap']

    output = utils.split_forward(model, input, size, overlap, with_uncertainty=False)
    output = output.squeeze(0)
    prob_maps = F.softmax(output, dim=0).cpu().numpy()

    return prob_maps
Пример #3
0
def get_probmaps(input, model, opt):
    size = opt.test['patch_size']
    overlap = opt.test['overlap']

    if size == 0:
        with torch.no_grad():
            output = model(input.cuda())
    else:
        output = utils.split_forward(model, input, size, overlap)
    output = output.squeeze(0)
    prob_maps = F.softmax(output, dim=0).cpu().numpy()

    return prob_maps
Пример #4
0
def get_probmaps(input, model, opt):
    size = opt.test['patch_size']
    overlap = opt.test['overlap']

    if size == 0:
        with torch.no_grad():
            output = model(input.cuda())
    else:
        output = utils.split_forward(model, input, size, overlap,
                                     opt.model['out_c'])
    output = torch.sigmoid(output[0, 0, :, :]).cpu().numpy()

    return output
Пример #5
0
def get_probmaps(input, model, opt):
    size = opt.test['patch_size']
    overlap = opt.test['overlap']

    if size == 0:
        with torch.no_grad():
            output = model(input.cuda())
    else:
        output = utils.split_forward(model, input, size, overlap)
    output = output.squeeze(0)
    prob_maps = torch.sigmoid(output[0, :, :])

    return prob_maps
def get_probmaps(input, model, opt):
    size = opt.test['patch_size']
    overlap = opt.test['overlap']

    output, log_var = utils.split_forward(model,
                                          input,
                                          size,
                                          overlap,
                                          with_uncertainty=True)
    output = output.squeeze(0)
    log_var = log_var.squeeze(0)

    return output.cpu().numpy(), log_var.cpu().numpy()
Пример #7
0
def validate(val_loader, model, criterion):
    # list to store the losses and accuracies: [loss, pixel_acc, iou ]
    results = utils.AverageMeter(5)

    # switch to evaluate mode
    model.eval()

    for i, sample in enumerate(val_loader):
        if params.train['weight_map']:
            input, weight_map, target = sample
            weight_map = weight_map.float().div(20)
            if weight_map.dim() == 4:
                weight_map = weight_map.squeeze(1)
            weight_map_var = weight_map.cuda()
        else:
            input, target = sample

        # no classification
        if params.model['out_c'] == 3:
            target[target == 2] = 1
            target[target == 3] = 1
            target[target == 4] = 2

        # no edge or classification
        if params.model['out_c'] == 2:
            target[target > 0] = 1

        if target.dim() == 4:
            target = target.squeeze(1)

        target_var = target.cuda()

        size = params.train['input_size']
        overlap = params.train['val_overlap']
        output = utils.split_forward(model, input, size, overlap,
                                     params.model['out_c'])

        log_prob_maps = F.log_softmax(output, dim=1)
        if params.train['weight_map']:
            loss_map = criterion(log_prob_maps, target_var)
            loss_map *= weight_map_var
            loss_CE = loss_map.sum() / (loss_map.size(0) * loss_map.size(1) *
                                        loss_map.size(2))
        else:
            loss_CE = criterion(log_prob_maps, target_var)
        loss = loss_CE

        if params.train['beta'] != 0:
            prob_maps = F.softmax(output, dim=1)
            pred_map = prob_maps[:, 4:5, :, :].repeat(1, 3, 1, 1).float()
            target_map = (target_var == 4).unsqueeze(1).repeat(1, 3, 1,
                                                               1).float()
            pred_feat = vgg_model(pred_map)
            target_feat = vgg_model(target_map)
            loss_perceptual = criterion_perceptual(pred_feat, target_feat)
            loss = loss_CE + params.train['beta'] * loss_perceptual

        # measure accuracy and record loss
        pred = np.argmax(log_prob_maps.data.cpu().numpy(), axis=1)
        iou = utils.accuracy(pred,
                             target.numpy(),
                             num_class=params.model['out_c'] - 1)
        if params.model['out_c'] == 5:
            iou_nuclei = utils.accuracy(np.uint8((pred > 0) * (pred < 4)),
                                        np.uint8((target.numpy() > 0) *
                                                 (target.numpy() < 4)),
                                        num_class=2)
        else:
            iou_nuclei = utils.accuracy(np.uint8((pred == 1)),
                                        np.uint8((target.numpy() == 1)),
                                        num_class=2)

        if params.train['beta'] != 0:
            result = [
                loss.item(),
                loss_CE.item(),
                loss_perceptual.item(), iou_nuclei, iou
            ]
        else:
            result = [loss.item(), loss_CE.item(), 0, iou_nuclei, iou]

        results.update(result, input.size(0))

        del output, target_var, log_prob_maps, loss

    logger.info('\t=> Val Avg:   Loss {r[0]:.4f}'
                '\tLoss_CE {r[1]:.4f}'
                '\tLoss_Per {r[2]:.4f}'
                '\tIoU-nuclei {r[3]:.4f}'
                '\tIoU {r[4]:.4f}'.format(r=results.avg))

    return results.avg