Beispiel #1
0
def validate(tb, val_loader, model, loss_func, global_step):
    val_loss_epoch = []
    val_iou_extract = []

    for x_input, y_extract, y_restor in tqdm(val_loader):
        x_input = torch.FloatTensor(x_input).cuda()
        y_extract = y_extract.type(torch.FloatTensor).cuda()

        logits_restor, logits_extract = None, model(x_input)  # restoration + extraction

        if args.added_part == "Yes":
            # training "Unet on without filling wholes on h_gt in synthetic
            loss = loss_func(logits_extract, logits_restor, y_extract, y_restor)
            iou_scr = iou_score(torch.round(torch.clamp(logits_extract, 0, 1).cpu()).long().numpy(),
                                y_extract.cpu().long().numpy())
        else:
            # training "Unet on whole image nh_gt, default this one
            loss = loss_func(logits_extract, logits_restor, y_restor, y_extract)
            iou_scr = iou_score(torch.round(torch.clamp(logits_extract, 0, 1).cpu()).long().numpy(),
                                y_restor.cpu().long().numpy())

        val_iou_extract.append(iou_scr)
        val_loss_epoch.append(loss.cpu().data.numpy())
        del loss

    tb.add_scalar('val_loss', np.mean(val_loss_epoch), global_step=global_step)

    tb.add_scalar('val_iou_extract', np.mean(val_iou_extract), global_step=global_step)
    out_grid = torchvision.utils.make_grid(logits_extract.unsqueeze(1).cpu())
    input_grid = torchvision.utils.make_grid(x_input.cpu())
    tb.add_image(tag='val_out_extract', img_tensor=out_grid, global_step=global_step)
    tb.add_image(tag='val_input', img_tensor=input_grid, global_step=global_step)
Beispiel #2
0
def iou_raster_reference(output_vector_image: VectorImage,
                         reference_raster_image_np: np.ndarray,
                         width='mean',
                         binarization='median'):
    output_vector_image = output_vector_image.with_filled_removed()

    if (width == 'mean') or isinstance(width, (int, float)):
        if width == 'mean':
            shaded_surface_area = (
                1 - reference_raster_image_np.astype(np.float32) / 255).sum()
            total_vector_length = sum(prim.segment.length()
                                      for path in output_vector_image.paths
                                      for prim in path if path.visible)
            width_to_set = shaded_surface_area / total_vector_length
        else:
            width_to_set = width
        for path in output_vector_image.paths:
            if path.width is not None:
                path.width = Pixels(width_to_set)
    # leave as is if width is None

    output_rasterization = output_vector_image.render(render)

    if reference_raster_image_np.ndim > 2:
        reference_raster_image_np = reference_raster_image_np[..., 0]

    # undo paddings
    h, w = reference_raster_image_np.shape
    output_rasterization = output_rasterization[:h, :w]
    return iou_score(reference_raster_image_np,
                     output_rasterization,
                     binarization=binarization).item()
Beispiel #3
0
def iou_vector_reference(output_vector_image: VectorImage,
                         reference_vector_image: VectorImage,
                         width='mean',
                         binarization='median'):
    output_vector_image = output_vector_image.with_filled_removed()
    reference_vector_image = reference_vector_image.with_filled_removed()

    if (width == 'mean') or isinstance(width, (int, float)):
        if width == 'mean':
            width_to_set = np.array([
                float(path.width.as_pixels())
                for path in reference_vector_image.paths
                if path.width is not None
            ]).mean()
        else:
            width_to_set = width
        for path in output_vector_image.paths:
            if path.width is not None:
                path.width = Pixels(width_to_set)
        for path in reference_vector_image.paths:
            if path.width is not None:
                path.width = Pixels(width_to_set)
    # leave as is if width is None

    output_rasterization = output_vector_image.render(render)
    reference_rasterization = reference_vector_image.render(render)

    # undo paddings
    h, w = reference_rasterization.shape
    output_rasterization = output_rasterization[:h, :w]
    return iou_score(reference_rasterization,
                     output_rasterization,
                     binarization=binarization).item()
def iou_score(image_true, image_pred, raster_res, **kwargs):
    """Computes IoU metric between ground-truth and predicted vectors.
    See `vectran.metrics.raster_metrics.iou_score` for reference."""
    raster_true = _maybe_vector_to_raster(image_true, raster_res)
    raster_pred = _maybe_vector_to_raster(image_pred, raster_res)
    return r.iou_score(raster_true, raster_pred, **kwargs)
def validate(tb, val_loader, unet, gen, loss_func, global_step):
    val_iou_extract = []
    val_loss_epoch = []
    val_iou_without_gan = []
    unet.eval()

    for x_input, y_extract, y_restor in tqdm(val_loader):

        with torch.no_grad():
            x_input = torch.FloatTensor(x_input).cuda()

            # Cleaning prediction
            logits_restor, logits_extract = None, unet(x_input)
            # 1 - Cleaning prediction
            logits_extract = 1 - logits_extract.unsqueeze(1)

            y_restor = 1. - y_restor.type(
                torch.FloatTensor).cuda().unsqueeze(1)

            # generator prediction based on cleaning prediction
            logits_restore = gen.forward(logits_extract).unsqueeze(
                1)  # restoration + extraction

            loss = loss_func(1 - (logits_extract + logits_restore), None,
                             1 - y_restor, None)

            val_loss_epoch.append(loss.cpu().data.numpy())
            iou_scr_without_gan = iou_score(
                1 -
                torch.round(logits_extract.squeeze(1)).cpu().long().numpy(),
                1 - torch.round(y_restor.squeeze(1)).cpu().long().numpy())

            val_iou_without_gan.append(iou_scr_without_gan)

            iou_scr = iou_score(
                1 - torch.round(
                    torch.clamp(logits_extract + logits_restore, 0,
                                1).squeeze(1).cpu()).long().numpy(),
                1 - torch.round(y_restor.squeeze(1)).cpu().long().numpy())

            val_iou_extract.append(iou_scr)

    tb.add_scalar('val_iou_extract',
                  np.mean(val_iou_extract),
                  global_step=global_step)
    tb.add_scalar('val_loss', np.mean(val_loss_epoch), global_step=global_step)
    tb.add_scalar('val_iou_without_gan',
                  np.mean(val_iou_without_gan),
                  global_step=global_step)

    out_grid = torchvision.utils.make_grid(
        1. - torch.clamp(logits_extract + logits_restore, 0, 1).cpu())
    input_grid = torchvision.utils.make_grid(1. - logits_extract.cpu())
    true_grid = torchvision.utils.make_grid(1. - y_restor.cpu())
    input_clean_grid = torchvision.utils.make_grid(x_input.cpu())

    tb.add_image(tag='val_first_input',
                 img_tensor=input_clean_grid,
                 global_step=global_step)
    tb.add_image(tag='val_out_extract',
                 img_tensor=out_grid,
                 global_step=global_step)
    tb.add_image(tag='val_input',
                 img_tensor=input_grid,
                 global_step=global_step)
    tb.add_image(tag='val_true', img_tensor=true_grid, global_step=global_step)