コード例 #1
0
 def test_dewarp_fail_color(self):
     """
     Test dewarping of a color line fails
     """
     with raises(ValueError):
         with Image.open(resources / '000236.png') as im:
             lineest.dewarp(self.lnorm, im.convert('RGB'))
コード例 #2
0
 def test_dewarp_gray(self):
     """
     Test dewarping of a single line in grayscale
     """
     with Image.open(resources / '000236.png') as im:
         o = lineest.dewarp(self.lnorm, im.convert('L'))
         self.assertEqual(self.lnorm.target_height, o.size[1])
コード例 #3
0
def generate_input_transforms(batch: int, height: int, width: int, channels: int, pad: int) -> transforms.Compose:
    """
    Generates a torchvision transformation converting a PIL.Image into a
    tensor usable in a network forward pass.

    Args:
        batch (int): mini-batch size
        height (int): height of input image in pixels
        width (int): width of input image in pixels
        channels (int): color channels of input
        pad (int): Amount of padding on horizontal ends of image

    Returns:
        A torchvision transformation composition converting the input image to
        the appropriate tensor.
    """
    scale = 0  # type: Union[Tuple[int, int], int]
    if height == 1 and width == 0 and channels > 3:
        perm = (1, 0, 2)
        scale = channels
        mode = 'L'
    # arbitrary (or fixed) height and width and channels 1 or 3 => needs a
    # summarizing network (or a not yet implemented scale operation) to move
    # height to the channel dimension.
    elif height > 1 and width == 0 and channels in (1, 3):
        perm = (0, 1, 2)
        scale = height
        mode = 'RGB' if channels == 3 else 'L'
    # fixed height and width image => bicubic scaling of the input image, disable padding
    elif height > 0 and width > 0 and channels in (1, 3):
        perm = (0, 1, 2)
        pad = 0
        scale = (height, width)
        mode = 'RGB' if channels == 3 else 'L'
    elif height == 0 and width == 0 and channels in (1, 3):
        perm = (0, 1, 2)
        pad = 0
        scale = 0
        mode = 'RGB' if channels == 3 else 'L'
    else:
        raise KrakenInputException('Invalid input spec (variable height and fixed width not supported)')

    out_transforms = []
    out_transforms.append(transforms.Lambda(lambda x: x.convert(mode)))
    if scale:
        if isinstance(scale, int):
            if mode not in ['1', 'L']:
                raise KrakenInputException('Invalid mode {} for line dewarping'.format(mode))
            lnorm = CenterNormalizer(scale)
            out_transforms.append(transforms.Lambda(lambda x: dewarp(lnorm, x)))
            out_transforms.append(transforms.Lambda(lambda x: x.convert(mode)))
        elif isinstance(scale, tuple):
            out_transforms.append(transforms.Resize(scale, Image.LANCZOS))
    if pad:
        out_transforms.append(transforms.Pad((pad, 0), fill=255))
    out_transforms.append(transforms.ToTensor())
    # invert
    out_transforms.append(transforms.Lambda(lambda x: x.max() - x))
    out_transforms.append(transforms.Lambda(lambda x: x.permute(*perm)))
    return transforms.Compose(out_transforms)
コード例 #4
0
 def test_dewarp_bw_undewarpable(self):
     """
     Test dewarping of an undewarpable line.
     """
     with Image.open(
             resources /
             'ONB_ibn_19110701_010.tif_line_1548924556947_449.png') as im:
         o = lineest.dewarp(self.lnorm, im)
         self.assertEqual(self.lnorm.target_height, o.size[1])
コード例 #5
0
def generate_input_transforms(batch: int,
                              height: int,
                              width: int,
                              channels: int,
                              pad: int,
                              valid_norm: bool = True,
                              force_binarization=False) -> transforms.Compose:
    """
    Generates a torchvision transformation converting a PIL.Image into a
    tensor usable in a network forward pass.

    Args:
        batch (int): mini-batch size
        height (int): height of input image in pixels
        width (int): width of input image in pixels
        channels (int): color channels of input
        pad (int): Amount of padding on horizontal ends of image
        valid_norm (bool): Enables/disables baseline normalization as a valid
                           preprocessing step. If disabled we will fall back to
                           standard scaling.
        force_binarization (bool): Forces binarization of input images using
                                   the nlbin algorithm.

    Returns:
        A torchvision transformation composition converting the input image to
        the appropriate tensor.
    """
    scale = (height, width)  # type: Tuple[int, int]
    center_norm = False
    mode = 'RGB' if channels == 3 else 'L'
    if height == 1 and width == 0 and channels > 3:
        perm = (1, 0, 2)
        scale = (channels, 0)
        if valid_norm:
            center_norm = True
        mode = 'L'
    elif height > 1 and width == 0 and channels in (1, 3):
        perm = (0, 1, 2)
        if valid_norm and channels == 1:
            center_norm = True
    elif height == 0 and width > 1 and channels in (1, 3):
        perm = (0, 1, 2)
    # fixed height and width image => bicubic scaling of the input image, disable padding
    elif height > 0 and width > 0 and channels in (1, 3):
        perm = (0, 1, 2)
        pad = 0
    elif height == 0 and width == 0 and channels in (1, 3):
        perm = (0, 1, 2)
        pad = 0
    else:
        raise KrakenInputException(
            'Invalid input spec {}, {}, {}, {}, {}'.format(
                batch, height, width, channels, pad))
    if mode != 'L' and force_binarization:
        raise KrakenInputException(
            'Invalid input spec {}, {}, {}, {} in'
            ' combination with forced binarization.'.format(
                batch, height, width, channels, pad))

    out_transforms = []
    out_transforms.append(transforms.Lambda(lambda x: x.convert(mode)))

    if force_binarization:
        out_transforms.append(transforms.Lambda(lambda x: nlbin(im)))
    # dummy transforms to ensure we can determine color mode of input material
    # from first two transforms. It's stupid but it works.
    out_transforms.append(transforms.Lambda(lambda x: x))
    if scale != (0, 0):
        if center_norm:
            lnorm = CenterNormalizer(scale[0])
            out_transforms.append(
                transforms.Lambda(lambda x: dewarp(lnorm, x)))
            out_transforms.append(transforms.Lambda(lambda x: x.convert(mode)))
        else:
            out_transforms.append(
                transforms.Lambda(
                    lambda x: _fixed_resize(x, scale, Image.LANCZOS)))
    if pad:
        out_transforms.append(transforms.Pad((pad, 0), fill=255))
    out_transforms.append(transforms.ToTensor())
    # invert
    out_transforms.append(transforms.Lambda(lambda x: x.max() - x))
    out_transforms.append(transforms.Lambda(lambda x: x.permute(*perm)))
    return transforms.Compose(out_transforms)
コード例 #6
0
def pil_dewarp(im: Image.Image, lnorm: CenterNormalizer) -> Image.Image:
    return dewarp(lnorm, im)