def search_with_value(page: page_dat, chars: [char_dat], mult: float, max_x: int, max_y: int) -> wobble:
    histodiff_sum = 0
    best_diff_sum = 9999999999999999999999999999999999999999999999999999999999999
    max_wobble = mult * 1.1
    wobinc = (max_wobble - mult) / 10
    curx = mult
    cury = mult
    bestx = None
    besty = None
    
    while(curx < max_wobble and int(max_x * (curx + wobinc)) < page.x):
        curx += wobinc
        while(cury < max_wobble and int(max_y * (cury + wobinc)) < page.y):
            cury += wobinc
            histodiff_sum = 0
            for char in chars:
                x1 = int(curx * char.x1)
                y1 = int(cury * char.y1)
                x2 = int(curx * char.x2)
                y2 = int(cury * char.y2)
                page_char = page.img.crop([y1, x1, y2, x2])
                page_charr = autocontrast(page_char.resize([char.img.width, char.img.height], Image.BOX))
                page_diff = difference(page_charr, char.img).histogram()
                histo_sum = 0
                inc = 0
                for diff in page_diff:
                    inc += 1
                    histo_sum += diff * inc
                histodiff_sum += histo_sum
            if histodiff_sum < best_diff_sum:
                best_diff_sum = histodiff_sum
                bestx = curx
                besty = cury
    return wobble(bestx, besty, best_diff_sum)
    def apply_annotations(self, images):
        """
        Apply annotations to images returning a numpy.ndarray

        Args:
            images (3D NumPy array): Images that will be annotated

        Returns:
            RGB numpy array or None: Annotated images, if successfully applied
        """
        # Create output array with shape of input, plus 3 channels for RGB
        output_images = np.zeros((*images.shape, 3), dtype="uint8")
        try:
            if not self._saved_annotations:
                raise AttributeError(
                    "Annotations cannot be as no annotations were successfully extracted"
                )
            images = np.flip(images, 1)
            for idx, image in enumerate(images, 0):
                # Make 'L' type PIL image from 2D array, autocontrast, then convert to RGBA
                image = autocontrast(
                    Image.fromarray(image).convert("L")).convert("RGBA")
                # Combine annotations and image by alpha
                image.alpha_composite(self._annotations)
                # Throw away alpha and fill into output array
                output_images[idx] = image.convert("RGB")
            return output_images
        except Exception:
            logging.error("Failed to apply annotations", exc_info=True)
Beispiel #3
0
 def _auto_contrast(img, cutoff=0):
     from PIL.ImageOps import autocontrast
     from PIL import Image
     # Image.fromarray defaultly supports RGB, not BGR.
     # convert from BGR to RGB
     img = Image.fromarray(img[..., ::-1], mode='RGB')
     contrasted_img = autocontrast(img, cutoff)
     # convert from RGB to BGR
     return np.asarray(contrasted_img)[..., ::-1]
def correct_coords(apps, page: Page, chars: Character) -> None:
    PageMultiplier = apps.get_model('calligraphy', 'PageMultiplier')
    print(str(page.image))
    p_img = grayscale(Image.open(str(page.image)))
    mypage = page_dat(page.image_width, page.image_length, p_img, page)
    mychars = []
    for char in chars:
        coords = str(char.image).split('(')[1].split(')')[0].split(',')
        charimg = autocontrast(grayscale(Image.open(str(char.image))))
        mychars.append(char_dat(coords, charimg, char))
    find_search_space(PageMultiplier, mypage, mychars)
Beispiel #5
0
def cleaned_contour(image, contrast_cutoff_percent=5, contrast_threshold=200):
    contour = image.filter(ImageFilter.CONTOUR)
    grey_image = contour.convert("L")
    grey_image = ImageOps.invert(grey_image)
    grey_image = autocontrast(grey_image, contrast_cutoff_percent)
    grey_contours = array(grey_image)
    low_indices = grey_contours < contrast_threshold
    grey_contours[low_indices] = 0

    # Remove edges
    grey_contours[:, 0] = 0
    grey_contours[:, grey_contours.shape[1] - 1] = 0
    grey_contours[0, :] = 0
    grey_contours[grey_contours.shape[0] - 1, :] = 0
    return grey_contours
Beispiel #6
0
def transform(image):
    # Grayscale
    image = image.convert('L')
    # Autocontrast
    image = autocontrast(image)
    # Resize
    size = 127
    w, h = image.size
    oh = size
    ow = int(size * w / h)
    image = image.resize((ow, oh), Image.BILINEAR)
    # ToTensor
    pic = np.array(image)
    pic = pic[:, :, None]
    pic = pic.transpose((2, 0, 1))
    pic = pic.astype(np.float32, copy=False) / 255.0
    # unsqueeze
    pic = np.expand_dims(pic, axis=0)

    return pic
 def diff(a, b):
     diff = difference(a.image, b.image)
     diffNormalized = autocontrast(diff)
     return (VideoFrame(diff), VideoFrame(diffNormalized))
Beispiel #8
0
 def _prepare(self):
     bwimg = self.orig.convert("L")
     return Contrast(autocontrast(bwimg, 10)).enhance(contrast).point(
         self._digitize)
import sys
import subprocess
from ConfigParser import ConfigParser

import numpy as np

# PIL
import Image
from PIL.ImageOps import autocontrast

# Location of files
cfg_file = pjoin(dirname(__file__), 'vinyl.ini')
cfg = ConfigParser()
cfg.read(cfg_file)
img_dir = cfg.get('storage', 'art')

Y = (315, 1515)
X = (58, 1260)
PCT_DROP = 5

for froot in os.listdir(img_dir):
    in_fname = pjoin(img_dir, froot)
    out_fname = pjoin(img_dir, 'c_' + froot)
    img = Image.open(in_fname)
    c_img = autocontrast(img, PCT_DROP)
    arr = np.array(c_img)
    arr = arr[X[0]:X[1], Y[0]:Y[1]]
    out_img = Image.fromarray(arr)
    out_img.save(out_fname)

Beispiel #10
0
 def diff(a, b):
     diff = difference(a.image, b.image)
     diff_normalized = autocontrast(diff)
     return (VideoFrame(diff), VideoFrame(diff_normalized))