Exemplo n.º 1
0
def create_stroke_mask(alpha, setting):
    """
    Create a mask image for the given alpha image.

    TODO: MaxFilter is square, but the desired region is circle.
    """
    from PIL import ImageFilter, ImageChops, ImageMath

    mask = ImageMath.eval('255 * (x > 0)', x=alpha).convert('L')
    edge = alpha.filter(ImageFilter.FIND_EDGES)
    size = int(setting.get(Key.SizeKey))
    style = setting.get(Key.Style).enum
    odd_size = 2 * int(size / 2.) + 1
    if style == Enum.OutsetFrame:
        result = ImageChops.subtract(
            edge.filter(ImageFilter.MaxFilter(2 * size + 1)), mask)
    elif style == Enum.InsetFrame:
        result = ImageChops.subtract(
            edge.filter(ImageFilter.MaxFilter(2 * size - 1)),
            ImageChops.invert(mask))
    else:
        result = edge.filter(ImageFilter.MaxFilter(odd_size))

    inverse_alpha = ImageChops.darker(ImageChops.invert(alpha), mask)
    return ImageChops.lighter(result, inverse_alpha)
Exemplo n.º 2
0
def simpleTrim(im):
    r, g, b = im.getpixel((0, 0))
    bgcolor = (r, g, b)
    bg = Image.new(im.mode, im.size, (255, 255, 255))
    offsetAdd = Image.new(im.mode, im.size, (3, 3, 3))
    diff = ImageChops.difference(im.filter(ImageFilter.MaxFilter(5)), bg)
    diff = ImageChops.subtract(diff, offsetAdd).convert("1").filter(
        ImageFilter.MaxFilter(5))
    bbox = diff.getbbox()
    if bbox:
        im = im.crop(bbox)
    return im
Exemplo n.º 3
0
def number(url):

    response = requests.get(url)
    img_original = Image.open(BytesIO(response.content))

    # left, up, right, bottom
    borders = [
        (0, 550, 2260, 1350),
        (2300, 400, 2900, 680),
    ]

    img = img_original.crop(borders[0])
    img = img.resize((150, 52), PIL.Image.LANCZOS)  # ratio 0,35
    img = img.filter(ImageFilter.MaxFilter(3))
    img = img.filter(ImageFilter.MinFilter(3))
    img = img.getchannel('R')
    img_badania = img.point(lambda x: 0 if x < 140 else 255)
    # img_badania.show()

    # Negatywne
    img = img_original.crop(borders[1])
    img = img.resize((150, 70), PIL.Image.LANCZOS)
    img = img.filter(ImageFilter.MaxFilter(3))
    img = img.filter(ImageFilter.MinFilter(3))
    img = img.point(lambda x: 0 if x < 200 else 255)
    img_negatywne = img.getchannel('R')
    # img_negatywne.show()

    res_badania = pytesseract.image_to_string(img_badania,
                                              lang='eng',
                                              config='--psm 6 --oem 3')

    try:
        res_badania = int(res_badania)
    except ValueError:
        # raise BaniaOCRException(f'OCR return non digit while processing BADANIA: {res_badania}')
        return None, None

    res_negatywne = pytesseract.image_to_string(img_negatywne,
                                                lang='eng',
                                                config='--psm 6 --oem 3')

    try:
        res_negatywne = int(res_negatywne)
    except ValueError:
        # raise NegatywneOCRException(f'OCR return non digit while processing NEGATYWNE: {res_negatywne}')
        return None, None

    return (res_badania, res_negatywne)
Exemplo n.º 4
0
 def max_filter(self, kernel):  # aplicar o filtro MAXIMO
     im = img.open(self.filename)
     image = im.filter(ImageFilter.MaxFilter(kernel))
     new_file_name = "./images/temporarias/" + os.path.basename(
         self.filename)
     image.save(new_file_name)
     return new_file_name
Exemplo n.º 5
0
def url_to_img(dataURL):
    string = str(dataURL)
    comma = string.find(",")
    code = string[comma + 1:]
    decoded = b64decode(code)
    buf = BytesIO(decoded)
    img = Image.open(buf)

    converted = img.convert("LA")
    la = np.array(converted)
    la[la[..., -1] == 0] = [255, 255]
    whiteBG = Image.fromarray(la)

    converted = whiteBG.convert("L")
    inverted = ImageOps.invert(converted)

    bounding_box = inverted.getbbox()
    padded_box = tuple(map(lambda i,j: i+j, bounding_box, (-5, -5, 5, 5)))
    cropped = inverted.crop(padded_box)

    thick = cropped.filter(ImageFilter.MaxFilter(5))

    ratio = 48.0 / max(thick.size)
    new_size = tuple([int(round(x*ratio)) for x in thick.size])
    res = thick.resize(new_size, Image.LANCZOS)

    arr = np.asarray(res)
    com = ndimage.measurements.center_of_mass(arr)
    result = Image.new("L", (64, 64))
    box = (int(round(32.0 - com[1])), int(round(32.0 - com[0])))
    result.paste(res, box)
    return result
    def add_random_alpha_variation(self, image: Image.Image) -> Image.Image:
        """
        Randomly changes the alpha value of the image by applying a blur and max filter to a randomized array
        and setting the result array as new alpha channel
        
        # Arguments
            image: Image. the image to augment
            
        # Returns
            Image. the augmented image
        """

        if image.mode == "L":  # skip mask images
            return image

        img_array = np.array(image.convert('RGBA'))
        rand_alpha_array = np.random.randint(0, 255, img_array.shape[:2])

        rand_alpha_img = Image.fromarray(rand_alpha_array, mode='L')
        rand_alpha_img = rand_alpha_img.filter(ImageFilter.MaxFilter(self.max_filter_size))\
            .filter(ImageFilter.BoxBlur(self.blur_size))

        new_alpha_array = np.rint(img_array[:, :, 3] *
                                  (np.asarray(rand_alpha_img) / 255))
        img_array[:, :, 3] = new_alpha_array

        return Image.fromarray(img_array, mode='RGBA')
Exemplo n.º 7
0
def create_mask_file(file_path):
    file_name = get_filename(file_path)

    if file_name.startswith("mask"):
        return

    dest_file_name = file_name[:-4]
    while not dest_file_name[-1:].isdigit():
        #print(dest_file_name)
        dest_file_name = dest_file_name[:-1]

    destination_file = os.path.join(output_dir,
                                    "mask_" + dest_file_name + ".png")

    print(file_path + " -> " + destination_file)

    if dry_run:
        return

    img = Image.open(file_path).convert('L')

    img = img.point(lambda x: 0 if x < 255 else 255, '1')
    img.filter(ImageFilter.MaxFilter(3))

    img.save(destination_file)
Exemplo n.º 8
0
 def get_text(self, img_url):
     """
     识别内容
     :param img_url:
     :return:
     """
     li = list()
     ImageEnhance.Contrast(Image.open(img_url)).enhance(1.3).save(img_url)
     imgs = Image.open(img_url)
     imgs.filter(ImageFilter.BLUR).filter(ImageFilter.MaxFilter(23))
     imgs.convert('L')
     x_width, y_height = imgs.size
     width = x_width / 4
     height = (y_height - 30) / 2
     for x_ in range(0, 2):
         for y_ in range(0, 4):
             left = y_ * width
             right = (y_ + 1) * width
             index = x_ * 4 + y_
             box = (left, x_ * height + 30, right, (x_ + 1) * height + 30)
             file_num = str(x_) + str(y_)
             imgs.crop(box).save(self.base_img_url + "/" + self.pic_base +
                                 file_num + self.pic_ext)
             # 上传图片并获取查询地址
             query_url = self.__upload_pic__(file_num)
             context = self.__get_query_content__(query_url)
             # 由于12306官方验证码是验证正确验证码的坐标范围,我们取每个验证码中点的坐标(大约值)
             # 将坐标保存
             li.append({CODE_POSITION[index]: context})
     return li
Exemplo n.º 9
0
def random_mask_perturbation(mask, verbose=False):

    degrees = random.uniform(-10, 10)
    translate_h = random.uniform(-0.1, 0.1)
    translate_v = random.uniform(-0.1, 0.1)
    scale = random.uniform(0.8, 1.2)
    shear = random.uniform(-10, 10)
    resample = Image.BICUBIC

    mask = TF.affine(mask, degrees, (translate_h, translate_v), scale, shear, resample)

    if verbose:
        print('Mask pertubation degrees: %.1f, T_h: %.1f, T_v: %.1f, Scale: %.1f, Shear: %.1f' % \
            (degrees, translate_h, translate_v, scale, shear))

    morphologic_times = int(random.random() * 10)
    morphologic_thres = random.random()
    filter_size = 7
    for i in range(morphologic_times):
        if random.random() < morphologic_thres:
            mask = mask.filter(ImageFilter.MinFilter(filter_size))
            if verbose:
                print(i, 'erossion')
        else:
            mask = mask.filter(ImageFilter.MaxFilter(filter_size))
            if verbose:
                print(i, 'dilation')

    mask = mask.convert('1')

    return mask
Exemplo n.º 10
0
def stroke_image(img_, stroke_size=1, stroke_color=(255,255,255)):
    from PIL import Image, ImageFilter
    from .utilities import center_rect, fitAspectRatio

    r, g, b, a = img_.split()
    cont_size = tuple(numpy.array(img_.size) + (stroke_size * 2))

    center = center_rect(img_.size, cont_size)
    #print center

    mask = Image.new("L", cont_size, 0)
    mask.paste(a, center)
    #mask = mask.filter(ImageFilter.BoxBlur(stroke_size))
    for i in range(0, stroke_size):
        mask = mask.filter(ImageFilter.MaxFilter(3))
    #mask = mask.filter(ImageFilter.GaussianBlur(stroke_size))
    #mask = mask.filter(ImageFilter.EDGE_ENHANCE_MORE)
    #mask = mask.filter(ImageFilter.MaxFilter(stroke_size+1))
    def intensity(int):
        if int > 0:
            return 255
        else:
            return 0
    #mask = mask.point(intensity)

    #mask.show()

    backdrop = Image.new('RGB', cont_size, stroke_color)
    backdrop.paste(img_, center, img_)
    #backdrop.format = 'PNG'
    backdrop.putalpha(mask)
    return backdrop
Exemplo n.º 11
0
def simple_highlight(img1, img2, opts):
    """Try to align the two images to minimize pixel differences.

    Produces two masks for img1 and img2.

    The algorithm works by comparing every possible alignment of the images,
    finding the aligment that minimzes the differences, and then smoothing
    it a bit to reduce spurious matches in areas that are perceptibly
    different (e.g. text).
    """

    try:
        diff, ((x1, y1), (x2, y2)) = best_diff(img1, img2, opts)
    except KeyboardInterrupt:
        return None, None
    diff = diff.filter(ImageFilter.MaxFilter(9))
    diff = tweak_diff(diff, opts.opacity)
    # If the images have different sizes, the areas outside the alignment
    # zone are considered to be dissimilar -- filling them with 0xff.
    # Perhaps it would be better to compare those bits with bars of solid
    # color, filled with opts.bgcolor?
    mask1 = Image.new('L', img1.size, 0xff)
    mask2 = Image.new('L', img2.size, 0xff)
    mask1.paste(diff, (x1, y1))
    mask2.paste(diff, (x2, y2))
    return mask1, mask2
Exemplo n.º 12
0
def dilate(image, radius, iterations=1):
    """Apply dilation to an image.

    Parameters
    ----------
    image : :class:`numpy.ndarray` or :class:`Image.Image`
        The image object.
    radius : :class:`int`
        The number of pixels to include in each direction. For example, if
        `radius`=1 then use 1 pixel in each direction from the central pixel,
        i.e., 9 pixels in total.
    iterations : :class:`int`, optional
        The number of times to apply dilation.

    Returns
    -------
    The `image` with dilation applied.
    """
    if radius is None or radius < 1 or iterations < 1:
        return image
    size = 2 * radius + 1
    if isinstance(image, np.ndarray):
        kernel = np.ones((size, size), dtype=np.uint8)
        return cv2.dilate(image, kernel, iterations=iterations)
    elif isinstance(image, Image.Image):
        for i in range(iterations):
            image = image.filter(ImageFilter.MaxFilter(size=size))
        return image
    else:
        raise TypeError('Expect a PIL or OpenCV image')
Exemplo n.º 13
0
def get_text(im):

    lab_im = rgb_to_lab(im)

    lab_im = rescale(lab_im)
    l, a, b = lab_im.split()

    # Convert to numpy array and apply the threshold to remove lines
    np_a = np.array(a)
    np_a = threshold(np_a, 180, 0, 255)

    # Invert the image: we want black text on a white background
    np_a = 255 - np_a

    a = Image.fromarray(np_a)

    # Expand image to close up "gaps" in letters, shrink to
    # stop letters running together
    a_filtered = a.filter(ImageFilter.MinFilter(11))
    a_filtered = a_filtered.filter(ImageFilter.MaxFilter(5))

    # It's useful to save this pre-OCR step to identify issues
    a_filtered.save('filtered.png')

    # Run OCR and get the result
    result = pytesseract.image_to_string(a_filtered)
    result = apply_corrections(result)

    return result
Exemplo n.º 14
0
def process(pimg):
    emptythres = Pref.emptythres
    depth = Pref.dilation_max_depth
    radius = Pref.emptyrange

    emptythres8 = round(emptythres * 255)
    img = pimg

    bufs = []
    buf = img.copy()
    for i in range(depth):
        bufs.append(buf.copy())
        buf = buf.filter(ImageFilter.MaxFilter())

    _gen = lambda x: ((x - emptythres8) /
                      (255 - emptythres8)) * 255 if x >= emptythres8 else 0
    convert_table = [_gen(i) for i in range(256)] * len(img.getbands())

    bufs.reverse()
    out_buf = bufs[0].copy()
    for buf in bufs[1:]:
        blurred_img = out_buf.filter(ImageFilter.GaussianBlur(radius=radius))
        mask = blurred_img.point(convert_table)
        out_buf.paste(buf, mask=mask)

    return out_buf
Exemplo n.º 15
0
    def load_item(self, index):
        # load image
        img, _ = self.data[index]
        img_name = "{:05d}.png".format(5)

        # load mask
        m = np.zeros((self.h, self.w)).astype(np.uint8)
        x1 = random.randint(5, 7)
        # w1 = random.randint(20, 34)
        w1 = random.randint(20, 50)
        y1 = random.randint(5, 7)
        h1 = random.randint(20, 50)

        m[x1: w1, y1: h1] = 255
        # m[16: 48, 16: 48] = 255

        mask = Image.fromarray(m).convert('L')
        # augment
        if self.split == 'train':
            img = transforms.RandomHorizontalFlip()(img)
            img = transforms.ColorJitter(0.05, 0.05, 0.05, 0.05)(img)
            mask = transforms.RandomHorizontalFlip()(mask)
            mask = mask.rotate(random.randint(0, 45), expand=True)
            mask = mask.filter(ImageFilter.MaxFilter(3))
        img = img.resize((self.w, self.h))
        mask = mask.resize((self.w, self.h), Image.NEAREST)
        return F.to_tensor(img) * 2 - 1., F.to_tensor(mask), img_name
Exemplo n.º 16
0
def write_contour(draw, pos, text, font, color=0, contour_color=255):
    mask = Image.new("L", draw.im.size, 0)
    mask_draw = ImageDraw.Draw(mask)
    write_text(mask_draw, pos, text, font, 255)
    mask = mask.filter(ImageFilter.MaxFilter(5))
    draw.bitmap((0, 0), mask, fill=contour_color)
    write_text(draw, pos, text, font, color)
Exemplo n.º 17
0
def maximum(image, radius, amount=100):
    """Apply a filter
    - amount: 0-1"""
    image = imtools.convert_safe_mode(image)
    maximumed = image.filter(ImageFilter.MaxFilter(radius))
    if amount < 100:
        return imtools.blend(image, maximumed, amount / 100.0)
    return maximumed
Exemplo n.º 18
0
 def button4(self):  #線画抽出
     gray = img.convert("L")
     gray2 = gray.filter(ImageFilter.MaxFilter(5))
     senga_inv = ImageChops.difference(gray2, gray)
     senga = ImageOps.invert(senga_inv)
     img1 = ImageQt.ImageQt(senga)
     Qting = img1.copy()
     pm4 = QPixmap.fromImage(Qting)
     label.setPixmap(pm4)
Exemplo n.º 19
0
def Maximum():
    img = cv2.cvtColor(cv2.imread(file_path), cv2.COLOR_BGR2RGB)
    img = Image.fromarray(img)
    img = img.filter(ImageFilter.MaxFilter())
    img = np.array(img, dtype=np.uint8)
    img = cv2.resize(img, (390, 370), interpolation=cv2.INTER_AREA)
    image_tk = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(img))
    canvas.create_image(0, 0, anchor=NW, image=image_tk)
    root.mainloop()
Exemplo n.º 20
0
def recognize():
    im = Image.open('f**k.png')
    im = im.crop((360, 290, 440, 310))
    width, height = im.size
    im = im.resize((width * 2, height * 2))
    im = im.filter(ImageFilter.MinFilter(3))
    enhancer = ImageEnhance.Contrast(im)
    im = enhancer.enhance(20)
    im = im.convert('1')
    im = im.filter(ImageFilter.MaxFilter(3))
    #im.show()
    ocr_result = pytesseract.image_to_string(
        im, config="-c tessedit_char_whitelist=0123456789+-=?")
    ocr_result = ocr_result.rstrip('-7').rstrip('=?').strip('-').strip(' ')

    print('raw: ', ocr_result)

    if ocr_result[0] == '0':
        ocr_result[0] = 9

    if ocr_result.find('-') != -1:
        a = clean_operator(ocr_result.split('-')[0])
        b = clean_operator(ocr_result.split('-')[-1])
        if a == 'wtf' or b == 'wtf':
            return 'wtf'
        return (a - b)
    if ocr_result.find('+') != -1:
        a = clean_operator(ocr_result.split('+')[0])
        b = clean_operator(ocr_result.split('+')[-1])
        if a == 'wtf' or b == 'wtf':
            return 'wtf'
        return (a + b)

    if ocr_result.find(' ') != -1:
        a = ocr_result.split(' ')[0]
        b = ocr_result.split(' ')[-1]
        if len(b) == 2:
            if b[0] == '7':
                a = clean_operator(a)
                b = clean_operator(b)
                if a == 'wtf' or b == 'wtf':
                    return 'wtf'
                return (a - b)
            else:
                a = clean_operator(a)
                b = clean_operator(b)
                if a == 'wtf' or b == 'wtf':
                    return 'wtf'
                return (a + b)
        else:
            a = clean_operator(a)
            b = clean_operator(b)
            if a == 'wtf' or b == 'wtf':
                return 'wtf'
            return (a - b)

    return 'wtf'
Exemplo n.º 21
0
def dilateImage(image, factor):
    # kernel = [0,0,0,
    #           0,0,0,
    #           0,0,0]
    dilation = image.filter(ImageFilter.MaxFilter(5))
    #dilation = image.filter(ImageFilter.Kernel((3,3), kernel, scale=0.5))
    #dilatedImage = image.resize((int(image.width * factor), int(image.height * factor)))

    return dilation