def get_map(image: Image.Image):
    """Use feature matching to determine match and map type"""
    path = os.path.join(get_temp_directory(), "temp.png")
    map_directory = os.path.join(get_assets_directory(), "maps")
    image.save(path)
    image = opencv.imread(path, 0)
    orb = opencv.ORB_create()
    image_kp, image_ds = orb.detectAndCompute(image, None)
    map_results = dict()
    bf = opencv.BFMatcher(opencv.NORM_L1, crossCheck=False)
    for match_type, maps in map_dictionary.items():
        for map, file_name in maps.items():
            file_name = "{}_{}".format(match_type, file_name + ".jpg")
            path = os.path.join(map_directory, file_name)
            Image.open(path).resize((200, 200), Image.ANTIALIAS).save(os.path.join(get_temp_directory(), file_name))
            template = opencv.imread(os.path.join(get_temp_directory(), file_name), 0)
            template_kp, template_ds = orb.detectAndCompute(template, None)
            matches = bf.knnMatch(image_ds, template_ds, k=2)
            good = list()
            for m, n in matches:
                if m.distance < 0.8 * n.distance:
                    good.append(m)
            map_results[(match_type, map)] = len(good)
    map = max(map_results, key=lambda key: map_results[key])
    if map_results[map] < MIN_MATCHES:
        return None
    return map
Exemple #2
0
def binary_img(imgObj: Image, a=100):
    imgObj = imgObj.copy()

    img1 = imgObj.convert("L")
    img2 = img1.point(set_table(a), '1')

    return img2
def match_digit(image: Image.Image)->str:
    """Match the digit in the image to a template in the assets folder"""
    folder = os.path.join(get_assets_directory(), "digits")
    digits = os.listdir(folder)
    results = dict()
    image = image.crop((0, 0, image.height, image.height))
    image = image.resize((150, 150), Image.BICUBIC)
    image = high_pass_invert(image, 600)
    image = imageops.replace_color(image, (0, 0, 0), (255, 0, 0))
    pixels = image.load()
    min_x, min_y, max_x, max_y = image.width - 1, image.height - 1, 0, 0
    for x in range(image.width):
        for y in range(image.height):
            if sum(pixels[x, y]) < 3 * 255 // 2:
                min_x = min(min_x, x)
                min_y = min(min_y, y)
                max_x = max(max_x, x)
                max_y = max(max_y, y)
    box = (min_x - 3, min_y - 3, max_x + 3, max_y + 3)
    image = image.crop(box)
    image = imageops.replace_color(image, (0, 0, 0), (255, 255, 255))
    image = imageops.replace_color(image, (255, 0, 0), (0, 0, 0))
    image = image.resize((150, 150), Image.NEAREST)

    for digit in sorted(digits):
        if not digit.endswith(".png"):
            continue
        template: Image.Image = Image.open(os.path.join(folder, digit)).convert("RGB")
        digit = digit[:-4]
        results[digit] = imageops.get_similarity_monochrome(template, image)
    return max(results, key=lambda key: results[key])
 def get_score(screenshot: Image.Image, scale: float, location: Tuple[int, int]) -> Tuple[int, int, int]:
     """Parse the score on the scoreboard to (left, right, self)"""
     x, y = location
     y1, y2 = int(y - 40 * scale), int(y - 20 * scale)
     x1_l, x2_l, x1_r, x2_r = x + 50 * scale, x + 85 * scale, x + 180 * scale, 205 * scale
     left: Image.Image = screenshot.crop(tuple(map(int, (x1_l, y1, x2_l, y2))))
     right: Image.Image = screenshot.crop(tuple(map(int, (x1_r, y1, x2_r, y2))))
     score_l, score_r = map(lambda i: tesseract.perform_ocr_scoreboard(i, "score", 420, 360), (left, right))
     return score_l, score_r, list(map(ScoreboardParser.is_allied, (left, right))).index(True)
Exemple #5
0
def removeFrame_img(imgObj: Image, frame_width=2):
    imgObj = imgObj.copy()

    (width, height) = imgObj.size
    if frame_width >= width or frame_width >= height:
        raise ImageSizeError('image size too small, (%s, %s)' % imgObj.size)

    imgObj = imgObj.crop((frame_width, frame_width,
                          width - frame_width, height - frame_width))

    return imgObj
Exemple #6
0
def is_bitonal(im: Image.Image) -> bool:
    """
    Tests a PIL.Image for bitonality.

    Args:
        im (PIL.Image.Image): Image to test

    Returns:
        True if the image contains only two different color values. False
        otherwise.
    """
    return im.getcolors(2) is not None and len(im.getcolors(2)) == 2
Exemple #7
0
 def make_transparent(cls, slot_item: Image.Image):
     px = 0
     py = 0
     while py < slot_item.size[1] and py < 34:
         slot_item_pixel = slot_item.getpixel((px, py))
         if slot_item_pixel == (255, 0, 255, 255):
             slot_item.putpixel((px, py), (255, 0, 255, 0))
         px += 1
         if px == slot_item.size[0] or px == 34:
             py += 1
             px = 0
     return slot_item
Exemple #8
0
def paste_watermark(image: Image.Image, watermark: Image.Image) -> Image.Image:
    min_image_dimension = min(image.size)
    watermark_length = int(min_image_dimension / 4.)

    watermark_size = (watermark_length, watermark_length)
    watermark = watermark.resize(watermark_size, Image.ANTIALIAS)

    foreground_shape = (image.size[1], image.size[0], 4)

    top_margin = int(5 * image.size[1] / 6.)

    foreground_array = np.zeros(foreground_shape, dtype=np.uint8)
    foreground_array[top_margin - watermark_length:top_margin, :watermark_length, :] = np.array(watermark)

    image_array = np.array(image)
    background_rgb = img_as_float(image_array[..., :-1])
    mean = np.mean(background_rgb[top_margin - watermark_length:top_margin, :watermark_length, :])
    if mean < 0.4:
        foreground_alpha = foreground_array[..., -1:]
        foreground_rgb = foreground_array[..., :-1]
        foreground_rgb = 255 - foreground_rgb
        foreground_array = np.dstack((foreground_rgb, foreground_alpha))

    marked_image_array = alpha_composite(foreground_array, image_array)
    return Image.fromarray(marked_image_array, mode='RGBA')
Exemple #9
0
    def get_item_color(cls, item: Image.Image) -> Tuple[int, int, int]:
        """Gets the average color of an item.

        :param item: The item's image
        :return: The item's colors
        """
        count = 0
        px = 0
        py = 0
        color = [0, 0, 0]
        while py < item.size[1]:
            item_pixel = item.getpixel((px, py))
            if not (cls.is_empty(item_pixel) or cls.is_background_color(item_pixel)):
                color[0] += item_pixel[0]
                color[1] += item_pixel[1]
                color[2] += item_pixel[2]
                count += 1
            px += 1
            if px == item.size[0]:
                px = 0
                py += 1
        if count == 0:
            return 0, 0, 0
        color[0] /= count
        color[1] /= count
        color[2] /= count
        return int(color[0]), int(color[1]), int(color[2])
Exemple #10
0
 def prepare_image(self, image: Image.Image, colors: list) -> Image.Image:
     """Prepare an image by filtering its text"""
     self.conn.send("Processing colors...")
     size = tuple(map(lambda v: int(round(self._f * v)), image.size))
     image = image.resize(size, Image.NORMAL)
     colors: List[Color] = list(set(map(ChatParser.rgb_to_hsv, colors)))
     pixels = np.array(image)
     img_colors = image.getcolors(image.width * image.height)
     cmp_colors = list()
     for count, color in img_colors:
         color = ChatParser.rgb_to_hsv(color)
         if count > 1 and ChatParser.compare_colors(color, colors):
             cmp_colors.append(color)
     self.conn.send("Pre-processing image...")
     result = Pool().map(partial(self.process_row, colors), pixels)
     return Image.fromarray(np.vstack(result), "L")
Exemple #11
0
def get_similarity(template: Image.Image, to_match: Image.Image)->float:
    """Compares two images and returns the similarity ratio"""
    if template.size != to_match.size:
        raise ValueError("These images are not the same size. One: {}, Two: {}.".format(template.size, to_match.size))
    diff = sum(abs(color_one - color_two) for pair_one, pair_two in zip(template.getdata(), to_match.getdata())
               for color_one, color_two in zip(pair_one, pair_two) if not sum(pair_two) > 3 * 255 - 1)
    n_comps = template.size[0] * template.size[1] * 3
    return 100 - (diff / 255.0 * 100) / n_comps
Exemple #12
0
def template_match(image: Image.Image, template: Image.Image, margin: float = 95, similarity: bool=False):
    """Use template matching and similarity to return template score"""
    image_cv, template_cv = map(image_to_opencv, (image, template))
    result = cv.matchTemplate(image_cv, template_cv, cv.TM_CCOEFF)
    _, _, _, loc = cv.minMaxLoc(result)
    image = image.crop((loc[0], loc[1], loc[0] + template.width, loc[1] + template.height))
    ratio = get_similarity(image, template)
    ratio = (ratio > margin) if similarity is False else ratio
    return ratio, loc
Exemple #13
0
def _crop(image: Image.Image, pad_w: int, pad_h: int) -> Image.Image:
    orig_w, orig_h = image.size
    w_shift = max(orig_w - pad_w, 0) // 2
    h_shift = max(orig_h - pad_h, 0) // 2

    even_w = max(orig_w - pad_w, 0) % 2
    even_h = max(orig_h - pad_h, 0) % 2

    return image.crop(
        (w_shift, h_shift, orig_w - w_shift - even_w,
         orig_h - h_shift - even_h))
Exemple #14
0
    def post(self):
        if self.request.files == {} or 'mypicture' not in self.request.files:
            """ 看是否有文件且name为picture,跟HTML代码对应 """
            self.write('')
            return
        # 有文件,判断是否为我们需要的格式
        # 常用的图片格式有:image/jpeg,image/bmp,image/pjpeg,image/gif,image/x-png,image/png
        image_type_list = ['image/gif', 'image/jpeg',
                           'image/pjpeg', 'image/bmp', 'image/png', 'image/x-png']
        send_file = self.request.files['mypicture'][0]
        if send_file['content_type'] not in image_type_list:
            self.write('')
            return
        # 上述判断含有很大的上传漏洞,可以通过PIL来避免这些。
        # 限制上传文件的大小,通过len获取字节数
        if len(send_file['body']) > 4 * 1024 * 1024:
            self.write('')
            return
        # 满足要求后,将图片存储。
        # 存储也就是将send_file['body']内容进行存储,type(send_file['body'])为str
        # 先将文件写入临时文件,然后再用PIL对这个临时文件进行处理。
        tmp_file = tempfile.NamedTemporaryFile(delete=True)  # 创建临时文件,当文件关闭时自动删除
        tmp_file.write(send_file['body'])  # 写入临时文件
        tmp_file.seek(0)  # 将文件指针指向文件头部,因为上面的操作将指针指向了尾部。

        # 此时用PIL再处理进行存储,PIL打开不是图片的文件会出现IOERROR错误,这就可以识别后缀名虽然是图片格式,但内容并非是图片。
        try:
            image_one = Image.open(tmp_file.name)
        except IOError as error:
            logging.info(error)  # 进行日志记录,因为这些操作大多数是破坏者的做法。
            logging.info('+' * 30 + '\n')
            logging.info(self.request.headers)
            tmp_file.close()
            self.write('')
            return

        # 判断图片尺寸,不在尺寸内拒绝操作
        if image_one.size[0] < 250 or image_one.size[1] < 250 or image_one.size[0] > 2000 or image_one.size[1] > 2000:
            tmp_file.close()
            self.write('')
            return

        # 进行存储。
        # 指定存储目录,产生新的文件名。
        # 获取文件格式,用PIL获得的format不一定正确,所以用原文件名获得
        image_path = "./static/picture/"
        image_format = send_file['filename'].split('.').pop().lower()
        tmp_name = image_path + str(int(time.time())) + image_format
        image_one.save(tmp_name)

        # 关闭临时文件,关闭后临时文件自动删除
        tmp_file.close()
        self.write('')
        return
Exemple #15
0
def get_channel_data(image: Image.Image, *channel_names: str, as_dict: bool = False):
    from numpy import array
    width, height = image.size
    channels = get_image_channels(image)
    channel_data = {}
    for channel_name in channel_names:
        channel_index = channels[channel_name]
        channel_data[channel_name] = array(image.getdata(channel_index)).reshape(height, width)
    if as_dict or len(channel_names) != 1:
        return dict(zip(channel_names, (channel_data[channel_name] for channel_name in channel_names)))
    else:
        return channel_data[channel_names[0]] 
def high_pass_invert(image: Image.Image, treshold: int) -> Image.Image:
    """Perform a high-pass filter on an image and invert"""
    result = image.copy()  # Do not modify original image
    pixels = result.load()
    for x in range(image.width):
        for y in range(image.height):
            pixel = pixels[x, y]
            if sum(pixel) < treshold:
                pixels[x, y] = (255, 255, 255)
                continue
            pixels[x, y] = (0, 0, 0)
    return result
Exemple #17
0
    def get_item_size(cls, item: Image.Image) -> int:
        """Gets the actual size of an item in pixels."""
        size = item.size[0] * item.size[1]
        empty = 0
        px = 0
        py = 0
        while py < item.size[1]:
            item_pixel = item.getpixel((px, py))
            if not cls.is_empty(item_pixel):
                size -= empty
                empty = 0
                px = 0
                py += 1
            else:
                empty += 1
                px += 1
                if px == item.size[0]:
                    size -= empty - 1
                    empty = 0
                    px = 0
                    py += 1

        empty = 0
        px = item.size[0] - 1
        py = 0
        while py < item.size[1]:
            item_pixel = item.getpixel((px, py))
            if not cls.is_empty(item_pixel):
                size -= empty
                empty = 0
                px = item.size[0] - 1
                py += 1
            else:
                empty += 1
                px -= 1
                if px == -1:
                    empty = 0
                    px = item.size[0] - 1
                    py += 1
        return size
Exemple #18
0
    def createFiles(self):
        for directory, width in sizes.items():
            image = Image.open(self.path)
            startingWidth, startingHeight = image.size
            resizedHeight = int(round(startingHeight * (float(width-2)/startingWidth))) #height based on resized width before border applied

            if startingWidth < sizes["drawable-xxhdpi"]:
                print "WARNING! This image is too small and might look bad at higher resolutions. It's "+str(startingWidth)+" wide."

            print "Final image dimensions for "+directory+": "+str(width)+"x"+str(resizedHeight+2)

            os.system("convert "+self.path+" +antialias -blur 0 -resize "+str(width-2)+"x"+str(resizedHeight)+
                      " -bordercolor 'transparent' -border 1x1 -fill black -draw 'point 1,0' -draw 'point 0,1' -draw 'point "
                      +str(width-2)+",0' -draw 'point 0,"+str(resizedHeight)+"' "+self.out+directory+"/"+self.filename+".9.png")
Exemple #19
0
def clearNoise_img(imgObj: Image):
    imgObj = imgObj.copy()
    if imgObj.mode not in {'1', 'P'}:
        imgObj = imgObj.convert('P')

    w, h = imgObj.size
    pixdata = imgObj.load()

    for y in range(1, h - 1):
        for x in range(1, w - 1):
            count = 0
            if pixdata[x, y - 1] > 245:
                count = count + 1
            if pixdata[x, y + 1] > 245:
                count = count + 1
            if pixdata[x - 1, y] > 245:
                count = count + 1
            if pixdata[x + 1, y] > 245:
                count = count + 1
            if count > 2:
                pixdata[x, y] = 255

    return imgObj
 def _create_white_bg_image(cls, image: Image.Image) -> Image.Image:
     width, height = image.size
     new_width = max(width, height)
     new_height = new_width
     white_img = Image.new("RGBA", (new_width, new_height), (255, 255, 255, 255))
     if not image.mode == "RGBA":
         image = image.convert("RGBA")
     # Put original (potentially transparent) image over white background
     white_img.paste(
         image,
         cls._find_middle_coordinates_pip(white_img.size, image.size),
         mask=image
     )
     return white_img
Exemple #21
0
def feature_match(image: Image.Image, template: Image.Image)->int:
    """Return the amount of features matched with ORB"""
    orb = cv.ORB_create()
    image.show(), template.show()
    matcher = cv.BFMatcher(cv.NORM_L1, crossCheck=False)
    template = image_to_opencv(template.convert("RGB"))
    tp_kp, tp_ds = orb.detectAndCompute(template, None)
    image = image_to_opencv(image.convert("RGB"))
    im_kp, im_ds = orb.detectAndCompute(image, None)
    assert im_ds is not None and tp_ds is not None
    try:
        matches = matcher.knnMatch(tp_ds, im_ds, k=2)
    except cv.error as e:
        print("[OpenCV.feature_match] {}".format(e))
        return 0
    result = 0
    for value in matches:
        if len(value) != 2:
            continue
        m, n = value
        if m.distance < 0.8 * n.distance:
            result += 1
    return result
Exemple #22
0
def _rescale_or_crop(image: Image.Image, pad_w: int, pad_h: int,
                     rescale_w: bool, rescale_h: bool,
                     keep_aspect_ratio: bool) -> Image.Image:
    """Rescale and/or crop the image based on the rescale configuration."""
    orig_w, orig_h = image.size
    if orig_w == pad_w and orig_h == pad_h:
        return image

    if rescale_w and rescale_h and not keep_aspect_ratio:
        image = image.resize((pad_w, pad_h), Image.BILINEAR)
    elif rescale_w and rescale_h and keep_aspect_ratio:
        ratio = min(pad_h / orig_h, pad_w / orig_w)
        image = image.resize((int(orig_w * ratio), int(orig_h * ratio)))
    elif rescale_w and not rescale_h:
        orig_w, orig_h = image.size
        if orig_w != pad_w:
            ratio = pad_w / orig_w
            image = image.resize((pad_w, int(orig_h * ratio)))
    elif rescale_h and not rescale_w:
        orig_w, orig_h = image.size
        if orig_h != pad_h:
            ratio = pad_h / orig_h
            image = image.resize((int(orig_w * ratio), pad_h))
    return _crop(image, pad_w, pad_h)
Exemple #23
0
def get_score(image: Image.Image):
    """
    Implementation:

    1. Crop the image
    2. (Optional) Use template matching to detect score card
       Whether this is required highly depends on the accuracy of the
       coordinates. If the coordinates are not accurate, it is
       impossible to determine the actual score based on the fill
       ratio of the coloured score bars.
    3. Use brightest pixel detection to get score location
    4. Compute the ratio filled of the score bars
    5. Compute the score
    6. Detect the presence of satellite markers
    7. Calculate the score from ratio and match type

    If the length of the coloured bar cannot be determined, then it
    must be assumed that one of the bars is full, and thus has gained
    full points. The ratio can then be determined with the other bar,
    moving in the left direction only horizontally, continuing for as
    long as the pixels are coloured and not black.
    """
    red, green = get_brightest_pixel_loc(image, 0), get_brightest_pixel_loc(image, 1)
    # Assume one of the bars is full
    if red is None or green is None:
        return 0.0
    (xr, yr), (xg, yg) = red, green
    pixels = image.load()
    # Move over pixels in left direction from brightest point on
    xpr = xr
    while xpr > 0:
        color = pixels[xpr, yr]
        if color[0] < 150:
            break
        xpr -= 1
    xpg = xg
    while xpg > 0:
        color = pixels[xpg, yg]
        if color[1] < 175:
            break
        xpg -= 1
    # Now calculate ratio between two scores (ally/enemy)
    if xr == xpr:
        return 0.0
    return (xg - xpg) / (xr - xpr)
 def split_scoreboard(image: Image.Image, scale: float, header_loc: tuple) -> list:
     """Split a scoreboard into different elements"""
     columns, widths = ScoreboardParser.COLUMNS, ScoreboardParser.WIDTHS
     image = ScoreboardParser.crop_scoreboard(image, scale, header_loc)
     result = list()
     for i in range(16):
         row = (0, i * (image.height / ROWS), image.width, (i + 1) * (image.height / ROWS))
         row_img = image.crop(row)
         row_elems = list()
         for name in columns:
             start = sum([widths[col] * image.width for col in columns[:columns.index(name)]])
             end = start + widths[name] * image.width
             crop = (start, 0, end, row_img.height)
             column = row_img.crop(crop)
             column = column.resize((column.width * 3, column.height * 3), Image.LANCZOS)
             row_elems.append(column)
         result.append(row_elems)
     return result
Exemple #25
0
def bisect_img(imgobj: Image, n):
    """
    split imgobj to single-character img
    using avarage width
    :param imgobj:
    :param n:
    :return:
    """
    (width, height) = imgobj.size
    img_slice = width // n
    imgs = []
    incx = img_slice
    for i in range(n):
        x = i * incx  # 里的数字参数需要自己
        y = height  # 证码图片的像素进行
        imgs.append(imgobj.crop((x, 0, x + incx, y)))

    return imgs
Exemple #26
0
    def number_scan(cls, slot_image: Image.Image) -> Tuple[int, Any]:
        """Scans a slot's image looking for amount digits

        :param slot_image: The image of an inventory slot.
        :return: A tuple containing the number parsed, the slot's image and the number's image.
        """
        digit_thousands = slot_image.crop((0, 20, 0 + 8, 20 + 7))
        digit_hundreds = slot_image.crop((8, 20, 8 + 8, 20 + 7))
        digit_tens = slot_image.crop((16, 20, 16 + 8, 20 + 7))
        digit_units = slot_image.crop((24, 20, 24 + 8, 20 + 7))
        item_numbers = [digit_thousands, digit_hundreds, digit_tens, digit_units]
        number_string = ""
        numbers_image = Image.new("RGBA", (32, 11), (255, 255, 255, 0))
        a = 0
        for item_number in item_numbers:
            i = 0
            for number in numbers:
                px = 0
                py = 0
                while py < item_number.size[1] and py < number.size[1]:
                    item_number_pixel = item_number.getpixel((px, py))
                    number_pixel = number.getpixel((px, py))
                    if not cls.is_transparent(number_pixel) and not item_number_pixel == number_pixel:
                        break
                    px += 1
                    if px == item_number.size[0] or px == number.size[0]:
                        py += 1
                        px = 0
                    if py == item_number.size[1]:
                        if i > 9:
                            i = "k"
                        number_string += str(i)
                        numbers_image.paste(number, (8 * a, 0))
                        i = -1
                        break
                if i == -1:
                    break
                i += 1
            a += 1
        px = 0
        py = 0
        while py < numbers_image.size[1]:
            numbers_image_pixel = numbers_image.getpixel((px, py))
            if not cls.is_transparent(numbers_image_pixel):
                slot_image.putpixel((px, py + 20), (255, 255, 0, 0))
            px += 1
            if px == numbers_image.size[0]:
                py += 1
                px = 0
        return 1 if number_string == "" else int(number_string.replace("k", "000")), numbers_image
 def create_thumbnail(self):
     from django.core.files.storage import default_storage as storage
     if not self.picture:
         return ""
     file_path = self.picture.name
     filename_base, filename_ext = os.path.split(file_path)
     thumbnail_file_path = "%s_thumbnail.jpg" % filename_base
     if storage.exists(thumbnail_file_path):
         # If thumbnail version exists, return its URL path
         return "exists"
     try:
         # resize the original image and 
         # return URL path of the thumbnail version
         f = storage.open(file_path, 'r')
         image = Image.open(f)
         width, height = image.size
         thumbnail_size = 50, 50
         
         if width > height:
             delta = width - height
             left = int(delta/2)
             upper = 0
             right = height + left
             lower = height
         else:
             delta = height - width
             left = 0
             upper = int(delta/2)
             rght = width
             lower = width + upper
             
         image = image.crop((left, upper, right, lower))
         image = image.resize(thumbnail_size, Image.ANTIALIAS)
         
         f_mob = storage.open(thumbnail_file_path, "w")
         image.save(f_mob, "JPEG")
         f_mob.close()
         return "Success"
     except:
         raise IOError
         return "error"
Exemple #28
0
    def clear_background(cls, slot_item: Image.Image, *, copy=False) -> Image.Image:
        """Clears the slot's background of an image.

        :param slot_item: The slot's image.
        :param copy: Whether to create a copy or alter the original.

        :returns: The item's image without the slot's background.
        """
        px = 0
        py = 0
        if copy:
            slot_item = slot_item.copy()

        background = {'Normal': 0, 'Gray': 0, 'Green': 0, 'Blue': 0, 'Violet': 0, 'Golden': 0, 'Other': -255}
        for i in range(0, 32):
            pixel = slot_item.getpixel((i, 0))
            lum = (0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2])
            background[cls.get_background_type(lum)] += 1
        for i in range(0, 32):
            pixel = slot_item.getpixel((0, i))
            lum = (0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2])
            background[cls.get_background_type(lum)] += 1
        for i in range(0, 32):
            pixel = slot_item.getpixel((31, i))
            lum = (0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2])
            background[cls.get_background_type(lum)] += 1
        # no point checking the last row since its always blanked out
        # for i in range(0, 32):
        #     pixel = slot_item.getpixel((i, 31))
        #     lum = (0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2])
        #     background[get_background_type(lum)]+=1
        background_type = max(background.items(), key=operator.itemgetter(1))[0]
        while py < slot_item.size[1] and py < slot[background_type].size[1]:
            slot_item_pixel = slot_item.getpixel((px, py))
            slot_pixel = slot[background_type].getpixel((px + 1, py + 1))
            if slot_item_pixel[:3] == slot_pixel[:3]:
                slot_item.putpixel((px, py), (255, 0, 255, 0))
            px += 1
            if px == slot_item.size[0] or px == slot[background_type].size[0]:
                py += 1
                px = 0
        return slot_item
Exemple #29
0
    def scan_item(cls, slot_item: Image.Image, item_list: List[Dict[str, Any]]) -> \
            Union[Dict[str, Union[str, int]], str]:
        """Scans an item's image, and looks for it among similar items in the database.

        :param slot_item: The item's cropped image.
        :param item_list: The list of similar items.
        :return: The matched item, represented in a dictionary.
        """
        results = {}
        if slot_item is None:
            return "Empty"
        for item in item_list:
            if item['id'] in results:
                continue
            item_image = pickle.loads(item['frame'])
            item_image = Image.open(io.BytesIO(bytearray(item_image)))
            px = 0
            py = 0
            while py < slot_item.size[1] and py < item_image.size[1]:
                slot_item_pixel = slot_item.getpixel((px, py))
                item_pixel = item_image.getpixel((px, py))
                if cls.is_empty(item_pixel) == cls.is_empty(slot_item_pixel):
                    pass
                elif cls.is_empty(slot_item_pixel):
                    if not cls.is_number(slot_item_pixel):
                        break
                elif cls.is_empty(item_pixel) or item_pixel != slot_item_pixel:
                    break

                px += 1
                if px == slot_item.size[0] or px == item_image.size[0]:
                    py += 1
                    px = 0
                if py == slot_item.size[1] or py == item_image.size[1]:
                    return item

        return "Unknown"
Exemple #30
0
def extract_boxes(im: Image.Image, bounds: Dict[str, Any]) -> Image:
    """
    Yields the subimages of image im defined in the list of bounding boxes in
    bounds preserving order.

    Args:
        im (PIL.Image.Image): Input image
        bounds (list): A list of tuples (x1, y1, x2, y2)

    Yields:
        (PIL.Image) the extracted subimage
    """
    if bounds['text_direction'].startswith('vertical'):
        angle = 90
    else:
        angle = 0
    for box in bounds['boxes']:
        if isinstance(box, tuple):
            box = list(box)
        if (box < [0, 0, 0, 0] or box[::2] > [im.size[0], im.size[0]] or
                box[1::2] > [im.size[1], im.size[1]]):
            logger.error('bbox {} is outside of image bounds {}'.format(box, im.size))
            raise KrakenInputException('Line outside of image bounds')
        yield im.crop(box).rotate(angle, expand=True), box
Exemple #31
0
import pathlib
from typing import Dict, Iterator, Literal, Optional, Set, Text, Tuple, Union

import cv2
import numpy as np
from PIL.Image import Image
from PIL.Image import open as open_image

from . import clients

LOGGER = logging.getLogger(__name__)

TARGET_WIDTH = 540

_CACHED_SCREENSHOT: Dict[Literal["value"], Tuple[dt.datetime, Image]] = {
    "value": (dt.datetime.fromtimestamp(0), Image())
}


def invalidate_screeshot():
    _CACHED_SCREENSHOT["value"] = (dt.datetime.fromtimestamp(0), Image())


class g:
    last_screenshot_save_path: str = ""
    screenshot_width = TARGET_WIDTH


def screenshot(*, max_age: float = 1) -> Image:
    cached_time, _ = _CACHED_SCREENSHOT["value"]
    if cached_time < dt.datetime.now() - dt.timedelta(seconds=max_age):
Exemple #32
0
 def __call__(self,
              img: Image.Image,
              boxes: Optional[torch.Tensor] = None,
              labels: Optional[torch.Tensor] = None):
     return np.array(img.convert("RGB")), boxes, labels
Exemple #33
0
def adjust_image_size(image: Image.Image, master: Image.Image) -> Image.Image:
    'Adjusts the image size.'
    return image.resize(master.size)
def resize(im: Image.Image, size):
    if size != (512, 512):
        im = im.resize(size, Image.ANTIALIAS)
    return im
Exemple #35
0
def crop(img: Image.Image, down=False):
    p = 80
    w, h = img.size
    box = (0, p, w, h) if down else (0, 0, w, p)
    return img.crop(box)
Exemple #36
0
 def __call__(self, img: Image.Image):
     return img.resize(self.size, self.mode)
Exemple #37
0
 def smooth(source: Image.Image) -> Image.Image:
     return source.copy().filter(ImageFilter.SMOOTH_MORE)
Exemple #38
0
def save_png(img: Image.Image, filename: str, transparency=None):
    with open(filename, "wb") as fd:
        img.save(fd, "png", transparency=transparency)
Exemple #39
0
def to_bytes(image: ImageType, image_format: str = "png") -> bytes:
    buffer = io.BytesIO()
    image.save(buffer, format=image_format)
    return buffer.getvalue()
Exemple #40
0
def embossing(image: Image.Image) -> Image.Image:
    """Cria uma versão metálica da imagem passada.

    O algoritmo utilizado é o "embossing", e utiliza o kernel de convolução passado no vídeo relacionado à tarefa.

    Parâmetros
    ----------
    image : Image.Image
        A imagem a partir da qual a nova imagem será gerada.

        A imagem deve ser uma imagem da biblioteca PIL (ou Pillow), e deve ter formato RGB ou RGBA [err #1].

    Retorno
    -------
    Image.Image
        A imagem "metálica", gerada a partir da imagem passada. É uma imagem da biblioteca PIL (ou Pillow) em formato
          RGBA.

    Erros
    -----
    ValueError
    [1] Caso a imagem passada esteja em um formato que não seja RGB ou RGBA.
    """
    # Verificar se o formato da imagem está correto.
    if image.mode != "RGB" and image.mode != "RGBA":
        raise ValueError("[1] O formato da imagem deve ser RGB ou RGBA.")

    # Construir o kernel de convolução.
    kernel = ConvolutionKernel(matrix=[[0, 1, 1], [-1, 0, 1], [-1, -1, 0]],
                               anchor=(1, 1))

    # Aplicar o kernel de convolução em cada layer da imagem, exceto a transparência, que será conservada da imagem
    #   original.
    out_r = kernel.apply(function=lambda coord: image.getpixel(coord)[0],
                         limits=image.size,
                         weight=1,
                         default=0)
    out_g = kernel.apply(function=lambda coord: image.getpixel(coord)[1],
                         limits=image.size,
                         weight=1,
                         default=0)
    out_b = kernel.apply(function=lambda coord: image.getpixel(coord)[2],
                         limits=image.size,
                         weight=1,
                         default=0)

    # Criar a imagem nova e colocar os novos valores de cada layer em cada pixel, após serem parametrizados. A
    #   aplicação do kernel terá resultados entre -765 e +765, que serão parametrizados linearmente para o intervalo
    #   0 a +255.
    out_image = Image.new(mode="RGBA", size=image.size)

    for pixel_iy in range(out_image.size[1]):
        for pixel_ix in range(out_image.size[0]):
            out_image.putpixel(
                xy=(pixel_ix, pixel_iy),
                value=(int((out_r[pixel_ix][pixel_iy] + 765) / 6),
                       int((out_g[pixel_ix][pixel_iy] + 765) / 6),
                       int((out_b[pixel_ix][pixel_iy] + 765) / 6),
                       255 if image.mode == "RGB" else image.getpixel(
                           (pixel_ix, pixel_iy))[3]))

    return out_image
Exemple #41
0
 def edges(source: Image.Image) -> Image.Image:
     return source.copy().filter(ImageFilter.EDGE_ENHANCE_MORE)
Exemple #42
0
 def crop_self(self, bg_img: Image.Image):
     return bg_img.crop(self.inner_box)
    def update_image(self, image: Image.Image):
        resized_image = image.resize((self.canvas_width, self.canvas_height))
        self.photo_image = ImageTk.PhotoImage(resized_image, master=self)

        self.itemconfigure(self.image_id, image=self.photo_image)
Exemple #44
0
 def __call__(self, img: Image.Image):
     img = np.float32(img.convert('L')) / np.float32(255.0)
     return img[np.newaxis, :]
Exemple #45
0
 def save_final_image(self, image: Image) -> None:
     file_path = self.get_image_file_path()
     image.save(file_path)
     self.print_file_ready_message(file_path)
Exemple #46
0
def compositeImgs(img_src:PILImage, img_dst:PILImage) -> PILImage:
    img_dst.alpha_composite(img_src, dest=(0,0), source=(0,0))
    return img_dst
Exemple #47
0
def edge_detection(image: Image.Image) -> Image.Image:
    """Cria uma nova imagem com de arestas detectadas na imagem passada.

    O algoritmo de detecção utiliza o kernel de convolução passado no vídeo relacionado à tarefa.

    Parâmetros
    ----------
    image : Image.Image
        A imagem a partir da qual a nova imagem de arestas detectadas será gerada.

        A imagem deve ser uma imagem da biblioteca PIL (ou Pillow), e deve ter formato RGB ou RGBA [err #1].

    Retorno
    -------
    Image.Image
        A imagem de arestas detectadas, gerada a partir da imagem passada. É uma imagem da biblioteca PIL (ou Pillow)
          em formato RGBA.

    Erros
    -----
    ValueError
    [1] Caso a imagem passada esteja em um formato que não seja RGB ou RGBA.
    """
    # Verificar se o formato da imagem está correto.
    if image.mode != "RGB" and image.mode != "RGBA":
        raise ValueError("[1] O formato da imagem deve ser RGB ou RGBA.")

    # Construir o kernel de convolução.
    kernel = ConvolutionKernel(matrix=[[0, -1, 0], [-1, 0, 1], [0, 1, 0]],
                               anchor=(1, 1))

    # Construir matriz do brilho de cada pixel previamente, pois cada pixel será chamado múltiplas vezes, e calcular a
    #   média múltiplas vezes deixaria o código mais pesado. É considerado que o brilho é a média dos valores RGB.
    brightness = [[
        sum(image.getpixel((x_index, y_index))[:3]) / 3
        for y_index in range(image.size[1])
    ] for x_index in range(image.size[0])]

    # Matriz com valores de brilho (não-parametrizados, estão no range -510 a 510) para cada pixel após a aplicação do
    #   kernel.
    out_brightness = kernel.apply(
        function=lambda coord: brightness[coord[0]][coord[1]],
        limits=image.size,
        weight=1,
        default=0)

    # Criar a imagem nova e colocar os valores do brilho em cada pixel, após serem parametrizados. A parametrização é
    #   um simples módulo do brilho dividido por 2, e todos os pixels estão em escala de cinza.
    out_image = Image.new(mode="RGBA", size=image.size)

    for pixel_iy in range(out_image.size[1]):
        for pixel_ix in range(out_image.size[0]):
            out_image.putpixel(
                xy=(pixel_ix, pixel_iy),
                value=(int(abs(out_brightness[pixel_ix][pixel_iy]) / 2),
                       int(abs(out_brightness[pixel_ix][pixel_iy]) / 2),
                       int(abs(out_brightness[pixel_ix][pixel_iy]) / 2),
                       255 if image.mode == "RGB" else image.getpixel(
                           (pixel_ix, pixel_iy))[3]))

    return out_image
Exemple #48
0
 def to_bytes(cls, content: img.Image) -> bytes:
     dst = io.BytesIO()
     content.save(dst, format="png")
     return dst.getvalue()
Exemple #49
0
    def extend_random_crop(image: ImPIL.Image,
                           labels: np.ndarray,
                           ltrb: np.ndarray,
                           points: np.ndarray = None,
                           osize: tuple = (320, 320),
                           min_box_side: int = 30,
                           ignore_intersection: tuple = (0.5, 0.9),
                           aspect_ratio_bounds: tuple = (0.5, 2)):
        r"""Does random crop and adjusts the boxes while maintaining minimum
        box size. When not None, the points (xy locations within a bounding
        box) are adjusted.

        Args:
            image (pil-image): pillow input image
            labels (np.ndarray): labels of each box
            ltrb (np.ndarray): object locations in ltrb format.
                Requires 2D-array, with rows of (left, top, right, bottom) in
                pixels.
            points (np.ndarray, optional): (x, y) locations of landmarks within
                a box. Expects a 3D-array with points.shape[0] = ltrb.shape[0]
                and points.shape[2] = 2
            osize (tuple/list): Output image size (width, height)
            min_box_side (int): Minimum size of the box predicted by the model.
                Default = 30 -- SSD minimum size
            ignore_intersection (tuple/list of floats): avoids objects within
                the intersection range in the final crop.
            aspect_ratio_bounds (tuple/list of floats): allowed crop ratios
                given an image

        Return:
            image, labels, ltrb, points

        ** requires some speed-up
        """
        valid_points = None
        # filter boxes with negative width -- not usual but a safe check
        _valid = np.stack((ltrb[:, 2] - ltrb[:, 0], ltrb[:, 3] - ltrb[:, 1]))
        _valid = _valid.min(0) > 2
        labels, ltrb, points = labels[_valid], ltrb[_valid], points[_valid]
        w, h = image.size
        # minimum ltrb side on actual image
        mbox = min((ltrb[:, 3] - ltrb[:, 1]).min(),
                   (ltrb[:, 2] - ltrb[:, 0]).min())
        # min & max possible crop size to maintain min_box_side
        mincw = int(mbox * 1.1)
        maxcw = int(min(mincw * min(osize) / min_box_side, min(w, h)))
        if mincw > maxcw:
            mincw = maxcw - 1
        # random width and height given all the above conditions
        nw = random.randint(mincw, maxcw)
        nh = random.randint(int(nw * aspect_ratio_bounds[0]),
                            int(nw * aspect_ratio_bounds[1]))
        nh = min(max(nh, int(mbox * 1.1)), h)
        # find all possible boxes, given nw and nh
        all_ls, all_ts = np.arange(0, w - nw, 10), np.arange(0, h - nh, 10)
        all_ls = all_ls.repeat(len(all_ts))
        all_ts = np.tile(all_ts[None, :],
                         (len(np.arange(0, w - nw, 10)), 1)).reshape(-1)
        possible = np.concatenate((all_ls[None, ], all_ts[None, ])).T
        possible = np.concatenate([
            possible[:, [0]], possible[:, [1]], possible[:, [0]] + nw,
            possible[:, [1]] + nh
        ], 1)

        # intersection in percentage to validate all possible boxes
        lt = np.maximum(ltrb[:, :2][:, np.newaxis],
                        possible[:, :2][np.newaxis, :])
        rb = np.minimum(ltrb[:, 2:][:, np.newaxis],
                        possible[:, 2:][np.newaxis, :])
        intersection = np.multiply(*np.split(np.clip(rb - lt, 0, None), 2, 2))
        intersection = intersection.squeeze(2)
        area = ((ltrb[:, 2] - ltrb[:, 0]) * (ltrb[:, 3] - ltrb[:, 1]))
        intersection = intersection / area[:, None]
        idx = np.where((intersection > ignore_intersection[1]).sum(0))[0]
        idx = [
            x for x in idx
            if not ((intersection[:, x] > ignore_intersection[0]) *
                    (intersection[:, x] < ignore_intersection[1])).any()
        ]

        if len(idx) > 0:
            # randomly pick one valid possible box
            pick = random.randint(0, len(idx) - 1)
            crop = possible[idx[pick]]
            valid = intersection[:, idx[pick]] > ignore_intersection[1]
            valid_ltrb = ltrb[valid].copy()
            if points is not None:
                valid_points = points[valid].copy()
            valid_labels = labels[valid].copy()

        else:
            # if the above fails -- fall back to a single object
            pick = random.randint(0, len(ltrb) - 1)
            crop = ltrb[pick].copy()
            # adjust crop - add some width and some height
            rw_ = (crop[2] - crop[0]) * (random.random() * 0.2) + 0.05
            _rw = (crop[2] - crop[0]) * (random.random() * 0.2) + 0.05
            rh_ = (crop[3] - crop[1]) * (random.random() * 0.2) + 0.05
            _rh = (crop[3] - crop[1]) * (random.random() * 0.2) + 0.05
            crop[0] -= rw_
            crop[1] -= rh_
            crop[2] += _rw
            crop[3] += _rh
            valid_ltrb = ltrb[[pick]].copy()
            if points is not None:
                valid_points = points[[pick]].copy()
            valid_labels = labels[[pick]].copy()

        # adjust xy's
        valid_ltrb[:, 0::2] -= crop[0]
        valid_ltrb[:, 1::2] -= crop[1]
        if points is not None:
            valid_points[:, :, 0] -= crop[0]
            valid_points[:, :, 1] -= crop[1]

        image = image.crop(list(map(int, crop)))
        w, h = image.size
        image = image.resize(osize)
        valid_ltrb[:, 0::2] *= osize[0] / w
        valid_ltrb[:, 1::2] *= osize[1] / h
        if points is not None:
            valid_points[:, :, 0] *= osize[0] / w
            valid_points[:, :, 1] *= osize[1] / h
        valid_ltrb[:, 0::2] = np.clip(valid_ltrb[:, 0::2], 0, osize[0] - 1)
        valid_ltrb[:, 1::2] = np.clip(valid_ltrb[:, 1::2], 0, osize[1] - 1)
        if points is not None:
            valid_points[:, :, 0] = np.clip(valid_points[:, :, 0], 0, osize[0])
            valid_points[:, :, 1] = np.clip(valid_points[:, :, 1], 0, osize[1])
        return image, valid_labels, valid_ltrb, valid_points
Exemple #50
0
def invalidate_screeshot():
    _CACHED_SCREENSHOT["value"] = (dt.datetime.fromtimestamp(0), Image())
Exemple #51
0
    def get_image_and_offset(
        name: str, id: int, image: ImageType, off_x: int, off_y: int, copy_level: int
    ) -> Tuple[ImageType, int, int]:  # image, off_x, off_y
        # blame rob; honestly, I am done with doing all those exceptions at this point ~ nekit
        if "robot" in name:

            if f"{id}_02_" in name:
                image = image.rotate(-45, resample=Image.BICUBIC, expand=True)
                off_x -= 50 if copy_level else 40
                off_y -= 20

                if TWO in name:

                    if id in {2, 5, 6, 8, 9, 11, 12, 15, 17, 24}:
                        off_x += 15
                        off_y -= 5
                    elif id in {7, 10, 19, 20}:
                        off_x += 7
                    elif id == 13:
                        off_x += 10
                        off_y -= 4
                    elif id == 18:
                        off_x -= 1
                        off_y -= 1
                    elif id in {21, 25}:
                        off_x += 12
                    elif id == 22:
                        off_y -= 5
                    elif id in {3, 26}:
                        off_x += 1
                    elif id == 23:
                        off_x -= 3
                        off_y -= 2

            elif f"{id}_03_" in name:
                image = image.rotate(45, resample=Image.BICUBIC, expand=True)
                off_x -= 40 if copy_level else 30

                if TWO in name and id in {3, 5, 6, 8, 16, 17}:
                    off_x += 10

                off_y -= 52 if id == 21 and TWO not in name else 60

            elif f"{id}_04_" in name:

                if copy_level:
                    off_x -= 10
                off_y -= 70

        elif "spider" in name:

            if f"{id}_02_" in name:

                if copy_level > 1:
                    off_x += 55
                    off_y -= 38

                    image = image.transpose(Image.FLIP_LEFT_RIGHT)

                elif copy_level > 0:
                    off_x += 18
                    off_y -= 38
                else:
                    off_x -= 16
                    off_y -= 38

            elif f"{id}_03_" in name:
                off_x -= 86
                off_y -= 38

                if id == 7:
                    off_x += 15
                    off_y += 13

                elif id == 15:
                    off_x += 5
                    off_y += 3

                if TWO in name:

                    if id == 16:
                        off_y += 5
                    elif id in {1, 8, 9, 11, 13, 14}:
                        off_x += 2
                    elif id in {2, 3}:
                        off_x += 25
                    elif id == 10:
                        off_x += 18
                        off_y -= 5

                if GLOW in name:
                    off_y += 3

                image = image.rotate(-45, resample=Image.BICUBIC, expand=True)

            elif f"{id}_04_" in name:
                off_x -= 30
                off_y -= 20

        return image, off_x, off_y
Exemple #52
0
def pad(
    img: Image.Image,
    padding: Union[int, List[int], Tuple[int, ...]],
    fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
    padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
) -> Image.Image:

    if not _is_pil_image(img):
        raise TypeError(f"img should be PIL Image. Got {type(img)}")

    if not isinstance(padding, (numbers.Number, tuple, list)):
        raise TypeError("Got inappropriate padding arg")
    if not isinstance(fill, (numbers.Number, str, tuple)):
        raise TypeError("Got inappropriate fill arg")
    if not isinstance(padding_mode, str):
        raise TypeError("Got inappropriate padding_mode arg")

    if isinstance(padding, list):
        padding = tuple(padding)

    if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
        raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")

    if isinstance(padding, tuple) and len(padding) == 1:
        # Compatibility with `functional_tensor.pad`
        padding = padding[0]

    if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
        raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")

    if padding_mode == "constant":
        opts = _parse_fill(fill, img, name="fill")
        if img.mode == "P":
            palette = img.getpalette()
            image = ImageOps.expand(img, border=padding, **opts)
            image.putpalette(palette)
            return image

        return ImageOps.expand(img, border=padding, **opts)
    else:
        if isinstance(padding, int):
            pad_left = pad_right = pad_top = pad_bottom = padding
        if isinstance(padding, tuple) and len(padding) == 2:
            pad_left = pad_right = padding[0]
            pad_top = pad_bottom = padding[1]
        if isinstance(padding, tuple) and len(padding) == 4:
            pad_left = padding[0]
            pad_top = padding[1]
            pad_right = padding[2]
            pad_bottom = padding[3]

        p = [pad_left, pad_top, pad_right, pad_bottom]
        cropping = -np.minimum(p, 0)

        if cropping.any():
            crop_left, crop_top, crop_right, crop_bottom = cropping
            img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom))

        pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0)

        if img.mode == "P":
            palette = img.getpalette()
            img = np.asarray(img)
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), mode=padding_mode)
            img = Image.fromarray(img)
            img.putpalette(palette)
            return img

        img = np.asarray(img)
        # RGB image
        if len(img.shape) == 3:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
        # Grayscale image
        if len(img.shape) == 2:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)

        return Image.fromarray(img)
 def __call__(self, image: Image.Image) -> Image.Image:
     return image.resize(self.size, self.interpolation)
Exemple #54
0
def hflip(img: Image.Image) -> Image.Image:
    if not _is_pil_image(img):
        raise TypeError(f"img should be PIL Image. Got {type(img)}")

    return img.transpose(Image.FLIP_LEFT_RIGHT)
Exemple #55
0
 def resize_img(self, img: Image.Image):
     width, height = img.size
     scale = width / self.target_width
     img = img.resize((int(width / scale), int(height / scale)))
     return img
Exemple #56
0
def vflip(img: Image.Image) -> Image.Image:
    if not _is_pil_image(img):
        raise TypeError(f"img should be PIL Image. Got {type(img)}")

    return img.transpose(Image.FLIP_TOP_BOTTOM)
Exemple #57
0
def reshape(x: Image.Image, h, w, resample=0):
    "`resize` `x` to `(w,h)`"
    return x.resize((w, h), resample=resample)
Exemple #58
0
def ensure_rgb_format(image: Image.Image) -> Image.Image:
    return image.convert("RGB")
Exemple #59
0
 def __call__(self, img: Image.Image):
     if random.random() < self.p:
         return img.transpose(Image.FLIP_LEFT_RIGHT)
     return img
Exemple #60
0
def to_bytes_format(im: Image.Image, format='png'):
    "Convert to bytes, default to PNG format"
    arr = io.BytesIO()
    im.save(arr, format=format)
    return arr.getvalue()