コード例 #1
0
def convert_image_to_array(image: PIL.Image.Image) -> np.ndarray:
    if image.mode != "RGB":
        image = image.convert("RGB")

    (im_width, im_height) = image.size

    return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
コード例 #2
0
def save_image(img: PIL.Image.Image, img_path: str) -> None:
    """
    Saves PIL.Image.

    Parameters:
        img (PIL.Image) : image to save.
        img_path (str) : path of the image.
    Returns: 
        None.
    """
    img.save(img_path)
コード例 #3
0
def strip_image(image: PIL.Image.Image) -> PIL.Image.Image:
    """Remove white and black edges"""
    for x in range(image.width):
        for y in range(image.height):

            r, g, b, _ = image.getpixel((x, y))

            if r > 247 and g > 247 and b > 247:
                image.putpixel((x, y), (0, 0, 0, 0))

    image = image.crop(image.getbbox())

    return image
コード例 #4
0
    def preprocess_pil_image(self, pil_image: PIL.Image.Image):
        """Дообработка изображения в формате PIL"""
        # Конвертировать изображение в формат RGB, если оно еще не в этом формате.
        if pil_image.mode != "RGB":
            pil_image = pil_image.convert("RGB")

        # Изменить размер изображения на подходящий нейросети.
        pil_image = pil_image.resize((self.image_size_x, self.image_size_y))

        # Преобразовать изображения из формата PIL в трехмерный массив Numpy.
        np_array = image.img_to_array(pil_image)

        # Выполнить обработку изображения собственной функцией препроцессинга нейросети.
        return self.preprocess_input(np.expand_dims(np_array, axis=0))
コード例 #5
0
def invert(img: PIL.Image.Image) -> PIL.Image.Image:
    """Invert an image, i.e. take the complement of the correspondent array.

    Parameters
    ----------
    img : PIL.Image.Image
        Input image

    Returns
    -------
    PIL.Image.Image
        Inverted image
    """
    if img.mode == "RGBA":
        r, g, b, a = img.split()
        rgb_img = PIL.Image.merge("RGB", (r, g, b))

        inverted_img_rgb = PIL.ImageOps.invert(rgb_img)

        r2, g2, b2 = inverted_img_rgb.split()
        inverted_img = PIL.Image.merge("RGBA", (r2, g2, b2, a))

    else:
        inverted_img = PIL.ImageOps.invert(img)

    return inverted_img
コード例 #6
0
    def predict(self, image: PIL.Image.Image, top_k: int = 3) -> List[InferencerPrediction]:
        """
        Predict labels for image
        :param image:
        :param top_k:
        :return:
        """
        # resize the input image and preprocess it
        image = image.resize(self.target_size)
        image = tf.keras.preprocessing.image.img_to_array(image)
        image = tf.keras.applications.mobilenet_v2.preprocess_input(image)
        image = np.expand_dims(image, axis=0)

        # pass to model
        result = self.classifier.predict(image)

        result = sorted(
            list(zip(
                self.labels
                , np.squeeze(result).tolist()
            )
            )
            , key=lambda x: x[1]
            , reverse=True
        )

        result = result[:top_k]

        res = [InferencerPrediction(label=r[0], confidence=r[1]) for r in result]

        return res
コード例 #7
0
def set_image_mpl_cmap(image: PIL.Image.Image, cmap: str):
    """
    Set a PIL.Image to use a matplotlib colour map

    See https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html

    Args:
        image: Image to modify
        cmap: Matplotlib colour map name
    """
    image = image.copy()
    cmap = plt.get_cmap(cmap)
    value_levels = numpy.linspace(0, 1, 2**8)
    pallet = to_bytes(cmap(value_levels))[:, 0:3]
    image.putpalette(pallet, "RGB")
    return image.convert("RGB")
コード例 #8
0
    def predict(self, image: PIL.Image.Image):
        bytesio = io.BytesIO()
        image.save(bytesio, format='PNG')
        image_raw = tf.image.decode_image(bytesio.getvalue(), channels=3)
        image = tf.expand_dims(image_raw, 0)
        image = transform_images(image, self._flags["size"])

        boxes, scores, classes, nums = self._model(image)
        boxes, scores, classes, nums = boxes[0], scores[0], classes[0], nums[0]

        scored_boxes = [(score, box) for box, score in zip(boxes, scores)]
        scored_boxes = sorted(scored_boxes, key=lambda x: -x[0])

        score, box = scored_boxes[0]

        return box.numpy()
コード例 #9
0
ファイル: model.py プロジェクト: GroupLe/grouple-face-tagger
    def get_similars(self, target_pic: PIL.Image.Image,
                     pics_pool: List[PIL.Image.Image]) -> List[int]:

        composed = torchvision.transforms.Compose(
            [T.ToTensor(), T.Resize((128, 128))])
        cos = nn.CosineSimilarity()
        target_pic = composed(target_pic)

        target_emb = self.get_embedding(target_pic.unsqueeze(0))

        distance_counter = DistanceCounter(target_emb, cos)

        pics_pool = list(map(composed, pics_pool))

        processes = 5
        with Pool(processes) as pool:
            pics_emb_pool = list(
                tqdm(pool.imap(self.get_embedding_predict, pics_pool)))
            pics_similarity = list(
                tqdm(pool.imap(distance_counter.count_distance,
                               pics_emb_pool)))

        pics_similarity_pairs = []
        for i in range(len(pics_similarity)):
            pics_similarity_pairs.append((pics_similarity[i], i))
        pool = sorted(pics_similarity_pairs, key=itemgetter(0), reverse=True)
        pics = []
        for i in pool:
            pics.append(i[1])
        return pics
コード例 #10
0
def add_shapes(
    background: PIL.Image.Image,
    shape_img: PIL.Image.Image,
    shape_params,
) -> Tuple[List[Tuple[int, int, int, int, int]], PIL.Image.Image]:
    """Paste shapes onto background and return bboxes"""
    shape_bboxes: List[Tuple[int, int, int, int, int]] = []

    for i, shape_param in enumerate(shape_params):

        x = shape_param[-2]
        y = shape_param[-1]
        x1, y1, x2, y2 = shape_img.getbbox()
        bg_at_shape = background.crop((x1 + x, y1 + y, x2 + x, y2 + y))
        bg_at_shape.paste(shape_img, (0, 0), shape_img)
        background.paste(bg_at_shape, (x, y))
        # Slightly expand the bounding box in order to simulate variability with
        # the detection boxes. Always make the crop larger than needed because training
        # augmentations will only be able to crop down.
        dx = random.randint(0, int(0.1 * (x2 - x1)))
        dy = random.randint(0, int(0.1 * (y2 - y1)))
        x1 -= dx
        x2 += dx
        y1 -= dy
        y2 += dy

        background = background.crop((x1 + x, y1 + y, x2 + x, y2 + y))
        background = background.filter(ImageFilter.SMOOTH_MORE)
    return background.convert("RGB")
コード例 #11
0
    def editImage(image: PIL.Image.Image,
                  command: List[str]) -> Optional[PIL.Image.Image]:
        temp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
        tempPath = temp.name

        image.save(temp, format='PNG')
        temp.close()

        # `{}` is where the file path should be inserted
        result = subprocess.run([(tempPath if segment == '{}' else segment)
                                 for segment in command])

        retValue = None
        if result.returncode == 0:
            retValue = PIL.Image.open(tempPath)

        os.unlink(tempPath)
        return retValue
コード例 #12
0
def _binarize_image(img: PIL.Image.Image, threshold: float) -> PIL.Image.Image:
    output = img.convert("L")
    for x in range(output.width):
        for y in range(output.height):
            output.putpixel(
                xy=(x, y),
                value=0 if output.getpixel((x, y)) < threshold else 255,
            )
    return output
コード例 #13
0
def _resize_image_pair(
    img1: PIL.Image.Image,
    img2: PIL.Image.Image,
    trim1: bool = True,
    trim2: bool = True,
) -> typing.Tuple[PIL.Image.Image, PIL.Image.Image]:
    def _trim_helper(img):
        return _trim_image(
            _trim_image(
                _quantize_color(img, distance=15),  # hardcoded threshold
                border_color=PIL.ImageColor.getcolor("white", "RGBA")))

    img1 = _trim_helper(img1) if trim1 else img1
    img2 = _trim_helper(img2) if trim2 else img2

    w = max(img1.width, img2.width)
    h = max(img1.height, img2.height)

    return img1.resize((w, h)), img2.resize((w, h))
コード例 #14
0
ファイル: image_util.py プロジェクト: stfnwong/lernomatic
def make_power_2(img: PIL.Image.Image,
                 base: int,
                 interp_method=Image.BICUBIC) -> np.ndarray:
    ow, oh = img.size
    h = int(np.round(oh / base) * base)
    w = int(np.round(ow / base) * base)
    if (h == oh) and (w == ow):
        return img

    return img.resize((w, h), interp_method)
コード例 #15
0
def add_shapes(
    background: PIL.Image.Image,
    shape_imgs: PIL.Image.Image,
    shape_params,
    blur_radius: int,
) -> Tuple[List[Tuple[int, int, int, int, int]], PIL.Image.Image]:
    """Paste shapes onto background and return bboxes"""
    shape_bboxes: List[Tuple[int, int, int, int, int]] = []

    for i, shape_param in enumerate(shape_params):

        x = shape_param[-2]
        y = shape_param[-1]
        shape_img = shape_imgs[i]
        shape_img = shape_img.filter(ImageFilter.GaussianBlur(1))
        x1, y1, x2, y2 = shape_img.getbbox()
        bg_at_shape = background.crop((x1 + x, y1 + y, x2 + x, y2 + y))
        bg_at_shape.paste(shape_img, (0, 0), shape_img)
        bg_at_shape = bg_at_shape.filter(ImageFilter.SMOOTH_MORE)
        background.paste(bg_at_shape, (x, y))

        im_w, im_h = background.size
        x /= im_w
        y /= im_h

        w = (x2 - x1) / im_w
        h = (y2 - y1) / im_h

        shape_bboxes.append((CLASSES.index(shape_param[0]), x, y, w, h))
        """
        shape_bboxes.append(
            (
                CLASSES.index(shape_param[2]),
                x + (0.1 * w),
                y + (0.1 * h),
                0.8 * w,
                0.8 * h,
            )
        )
        """
    return shape_bboxes, background.convert("RGB")
コード例 #16
0
ファイル: code.py プロジェクト: LZY2006/Text-to-image
def get_text_from_img(img: PIL.Image.Image):

    width, height = img.size
    text = []
    for y in range(height):
        for x in range(width):
            red, green, blue = img.getpixel((x, y))
            if red == 0:
                break
            index = (green << 8) + blue
            text.append(chr(index))
    return "".join(text)
コード例 #17
0
    def _serialize_example(self, image: PIL.Image.Image, label: str) -> str:
        """
         Creates a tf.Example message ready to be written to a file.
         """
        # Create a dictionary mapping the feature name to the tf.Example-compatible
        # data type.
        height = image.height
        width = image.width
        depth = len(image.getbands())
        image_bytes = image.tobytes()
        feature = {
            'height': self._int64_feature(height),
            'width': self._int64_feature(width),
            'depth': self._int64_feature(depth),
            'label': self._bytes_feature(tf.compat.as_bytes(label)),
            'image_raw': self._bytes_feature(image_bytes)
        }

        # Create a Features message using tf.train.Example.
        example_proto = tf.train.Example(features=tf.train.Features(
            feature=feature))
        return example_proto.SerializeToString()
コード例 #18
0
def grid(image: PIL.Image.Image, grid_size,
         overlap) -> List[List[PIL.Image.Image]]:
    the_grid = []
    w, h = image.size
    for x in range(0, w, grid_size):
        row = []
        for y in range(0, h, grid_size):
            row.append(
                image.crop((x - overlap // 2, y - overlap // 2,
                            x + grid_size + overlap // 2,
                            y + grid_size + overlap // 2)))
        the_grid.append(row)
    return the_grid
コード例 #19
0
def _trim_image(
    img: PIL.Image.Image,
    border_color: typing.Tuple[int] = None,
) -> PIL.Image.Image:

    if border_color is None:
        trimmed = img
        for xy in itertools.product(range(2), range(2)):
            trimmed = _trim_image(
                img=trimmed,
                border_color=img.getpixel(xy),
            )
        return trimmed

    bg = PIL.Image.new(img.mode, img.size, border_color)
    diff = PIL.ImageChops.difference(img, bg)
    bbox = diff.getbbox()

    if bbox:
        return img.crop(bbox)
    else:
        # found no content
        raise ValueError("cannot trim; image was empty")
コード例 #20
0
    def _serialize_example(self, id: str, image: PIL.Image.Image,
                           **kwargs) -> str:
        """
         Creates a tf.Example message ready to be written to a file.
        """
        # Create a dictionary mapping the feature name to the tf.Example-compatible
        # data type.
        height = image.height
        width = image.width
        depth = len(image.getbands())
        image_bytes = image.tobytes()

        feature = {
            'id': utils.bytes_feature(tf.compat.as_bytes(id)),
            'image_raw': utils.bytes_feature(image_bytes),
            'height': utils.int64_feature(height),
            'width': utils.int64_feature(width),
            'depth': utils.int64_feature(depth),
        }

        if "label" in kwargs:
            feature["label"] = utils.bytes_feature(
                tf.compat.as_bytes(kwargs["label"]))

        for key in set(kwargs.keys()).difference({'label'}):
            value = kwargs[key]
            if isinstance(value, int) or isinstance(value, bool):
                feature[key] = utils.int64_feature(value)
            elif isinstance(value, str):
                feature[key] = utils.bytes_feature(tf.compat.as_bytes(value))
            elif isinstance(value, float):
                feature[key] = utils.float_feature(value)

        # Create a Features message using tf.train.Example.
        example_proto = tf.train.Example(features=tf.train.Features(
            feature=feature))
        return example_proto.SerializeToString()
コード例 #21
0
def preprocess_image(
    image: PIL.Image.Image,
    new_size: int = 256,
    mean: np.ndarray = np.array([0.40760392, 0.45795686, 0.48501961])
) -> torch.Tensor:
    assert isinstance(image, PIL.Image.Image)

    # use PIL here because it resamples properly
    # (https://twitter.com/jaakkolehtinen/status/1258102168176951299)
    image = image.resize((new_size, new_size), resample=PIL.Image.LANCZOS)

    # RGB to BGR
    r, g, b = image.split()
    image_bgr = PIL.Image.merge('RGB', (b, g, r))

    # normalization
    image_numpy = np.array(image_bgr, dtype=np.float32) / 255.0
    image_numpy -= mean
    image_numpy *= 255.0

    # [H, W, C] -> [N, C, H, W]
    image_numpy = np.transpose(image_numpy, (2, 0, 1))[None, :, :, :]

    return torch.from_numpy(image_numpy).to(torch.float32)
コード例 #22
0
ファイル: image_util.py プロジェクト: stfnwong/lernomatic
def img_to_tensor(img: PIL.Image.Image,
                  tensor_dtype: str = 'float') -> torch.Tensor:
    img = np.asarray(img)
    img = img.transpose(2, 1, 0)
    img = np.expand_dims(img, 0)  # NOTE: should we check shape?
    if tensor_dtype == 'float':
        t = torch.FloatTensor(img)
    elif tensor_type == 'double':
        t = torch.DoubleTensor(img)
    elif tensor_type == 'long':
        t = torch.LongTensor(img)
    else:
        raise ValueError('Unsupported dtype [%s]' % str(tensor_dtype))

    return t
コード例 #23
0
    def setFrame(self, index: QtC.QModelIndex, image: PIL.Image.Image) -> None:
        direction = index.row()
        frame = index.column()

        leftMostChange = frame

        if len(self.state.icons[direction]) <= frame:
            leftMostChange = len(self.state.icons[direction])
            self.state.icons[direction].extend(
                [None] * (frame - len(self.state.icons[direction]) + 1))

        self.state.icons[direction][frame] = image.copy()

        self.dataChanged.emit(self.index(direction, leftMostChange),
                              self.index(direction, frame),
                              [QtC.Qt.DecorationRole])
コード例 #24
0
def prepare_img_for_numpy(img: PIL.Image.Image) -> None:
    """
    Preparing image for transforming into numpy ndarray.

    Parameters:
        img (PIL.Image.Image) : image to be prepared.
    Returns:
        prepared_img (PIL.Image.Image) : prepared image.
    """
    if not isinstance(img, PIL.Image.Image):
        raise TypeError(
            "prepare_img_for_numpy: expected image of type PIL.Image.Image, got {0}"
            .format(type(img)))

    prepared_img = img.convert("RGB")
    return prepared_img
コード例 #25
0
def zoom_region(image: PIL.Image.Image, lat: float, lon: float, scale: float,
                size: Tuple[int, int]):
    """
    Zooms to a specific region of an Aus400 rendered image

    The output image is centred at (lat, lon), is 'scale' degrees wide, with
    the height determined by the aspect ratio of 'size'

    Args:
        image: Source image, must cover the full d0036t grid
        lat: Central latitude
        lon: Central longitude
        scale: Output longitude width in degrees
        size: Output image size in pixels

    Returns:
        :obj:`PIL.Image.Image` with size 'size'
    """

    if image.size != (13194, 10554):
        raise Exception(
            "Input image has unexpected size, make sure it is on the d0036t grid"
        )

    # Maintain output aspect ratio
    lat_scale = scale / size[0] * size[1]

    lon0 = lon - scale / 2
    lon1 = lon + scale / 2

    lat0 = lat - lat_scale / 2
    lat1 = lat + lat_scale / 2

    x0 = int((lon0 - 109.5106) / 0.0036)
    x1 = int((lon1 - 109.5106) / 0.0036)

    y0 = int((-8.8064 - lat0) / 0.0036)
    y1 = int((-8.8064 - lat1) / 0.0036)

    return image.transform(
        size=size,
        method=PIL.Image.QUAD,
        data=(x0, y1, x0, y0, x1, y0, x1, y1),
        resample=PIL.Image.BICUBIC,
        fillcolor=(0, 0, 0, 255),
    )
コード例 #26
0
def postprocess_image(img: torch.Tensor,
                      target_img: PIL.Image.Image) -> PIL.Image.Image:
    assert img.shape[0] == 1 and img.shape[1] == 3
    assert isinstance(target_img, PIL.Image.Image)

    # resize target image if needed (= if it was resized in preprocessing)
    source_size = (img.shape[3], img.shape[2])
    target_size = target_img.size
    target_img = target_img.resize(source_size, resample=PIL.Image.LANCZOS)

    # convert both source and target to numpy
    target_img_numpy = np.array(target_img)
    img_numpy = img.numpy().squeeze().transpose(1, 2, 0)[:, :, ::-1]

    result = histogram_matching(img_numpy, target_img_numpy)
    result_pil = PIL.Image.fromarray(result.astype(np.uint8))
    return result_pil.resize(target_size, resample=PIL.Image.LANCZOS)
コード例 #27
0
def apply_blur(img: PIL.Image.Image,
               blur_radius: int = BLUR_blur_radius) -> PIL.Image.Image:
    """
    Applies blur for PIL.Image image.

    Parameters:
        img (PIL.Image) : image to be blurred.
        blur_radius (int) : blur blur_radius, or blur strength.
    Returns: 
        blurred_image (PIL.Image) : blurred image.
    """
    if not isinstance(img, PIL.Image.Image):
        raise TypeError(
            "apply_blur: expected img of type PIL.Image, got {0}".format(
                type(img)))

    blurred_image = img.convert("RGBA")
    blurred_image = blurred_image.filter(ImageFilter.GaussianBlur(blur_radius))
    return blurred_image
コード例 #28
0
def apply_palette_reduction(
    img: PIL.Image.Image,
    reduced_palette_colors_count: int = REDUCED_PALETTE_COLORS_COUNT
) -> PIL.Image.Image:
    """
    Applies palette reduction for PIL.Image image.

    Parameters:
        img (PIL.Image) : image to be processed.
        reduced_palette_colors_count (int) : count of colors for processed image.
    Returns: 
        processed_image (PIL.Image) : processed image with pallete count reduced.
    """
    if not isinstance(img, PIL.Image.Image):
        raise TypeError(
            "apply_palette_reduction: expected img of type PIL.Image, got {0}".
            format(type(img)))

    processed_image = img.convert('P',
                                  palette=Image.ADAPTIVE,
                                  colors=reduced_palette_colors_count)
    return processed_image
コード例 #29
0
ファイル: resize.py プロジェクト: Hultner/get-caged
def resize_keep_aspect_ratio(
    image: PIL.Image.Image, target_width: int, target_height: int
) -> (PIL.Image.Image, str):
    """
    Resize image to target width, but keep the aspect ratio to avoid distorting the image.

    :param image: original image
    :param target_width: the width we want
    :param target_height: the height we want
    :return: a tuple of resized image, and string saying if it was resized by height or width
    """
    ratio_width = target_width / image.width
    ratio_height = target_height / image.height
    if ratio_width > ratio_height:
        resized_by = "width"
        resize_width = target_width
        resize_height = round(ratio_width * image.height)
    else:
        resized_by = "height"
        resize_width = round(ratio_height * image.width)
        resize_height = target_height

    resized_image = image.resize((resize_width, resize_height), Image.LANCZOS)
    return resized_image, resized_by
コード例 #30
0
def resize_img(img: PIL.Image.Image, size: list) -> PIL.Image.Image:
    return img.resize(size=size, resample=Image.ANTIALIAS)