Example #1
0
def bgr2rgb(data, columns=None):
    '''
    Convert BGR images to RGB

    Parameters
    ----------
    data : PIL.Image or DataFrame
        The image data
    columns : string or list-of-strings, optional
        If `data` is a DataFrame, this is the list of columns that 
        contain image data.

    Returns
    -------
    :class:`PIL.Image`
        If `data` is a :class:`PIL.Image` 
    :class:`pandas.DataFrame`
        If `data` is a :class:`pandas.DataFrame`

    '''
    if hasattr(data, 'columns'):
        if len(data):
            if not columns:
                columns = list(data.columns)
            elif isinstance(columns, six.string_types):
                columns = [columns]
            for col in columns:
                if Image.isImageType(data[col].iloc[0]):
                    data[col] = data[col].apply(_bgr2rgb)
        return data

    elif Image.isImageType(data):
        return _bgr2rgb(data)

    return data
Example #2
0
def bgr2rgb(data, columns=None):
    '''
    Convert BGR images to RGB

    Parameters
    ----------
    data : PIL.Image or DataFrame
        The image data
    columns : string or list-of-strings, optional
        If `data` is a DataFrame, this is the list of columns that 
        contain image data.

    Returns
    -------
    :class:`PIL.Image`
        If `data` is a :class:`PIL.Image` 
    :class:`pandas.DataFrame`
        If `data` is a :class:`pandas.DataFrame`

    '''
    if hasattr(data, 'columns'):
        if len(data):
            if not columns:
                columns = list(data.columns)
            elif isinstance(columns, six.string_types):
                columns = [columns]
            for col in columns:
                if Image.isImageType(data[col].iloc[0]):
                    data[col] = data[col].apply(_bgr2rgb)
        return data

    elif Image.isImageType(data):
        return _bgr2rgb(data)

    return data
Example #3
0
 def set_logon_screen(self, imageData):
     '''
     ***Must be run with administrator privileges***
     '''
     success = False
     screen = Monitor()
     savePath = self.pictures.logon_screen
     if not path.exists(savePath):
         os.makedirs(savePath)
     if path.exists(imageData):
         img = Image.open(imageData)
     try:
         if Image.isImageType(imageData):
             img = imageData
     except:
         pass
     try:
         if Image.isImageType(img_loads(imageData)):
             img = img_loads(imageData)
     except:
         pass
     try:
         logon_img = img.resize(screen.logon_dimensions(), Image.ANTIALIAS)
     except:
         warn('Failed to resize image.')
     try:
         if self.enable_custom_logon_screen():
             logon_img.save(savePath + '\\backgroundDefault.jpg')
             success = True
         else:
             warn('Failed to enable custom logon screen')
     except:
         warn('Failed to customize logon screen')
     return success
Example #4
0
def autoConver(path, forceImage=False):
    #====Define====#
    currentFile = autoReadFile(path, forceImage=forceImage)
    if Image.isImageType(currentFile):
        prbl(en="Image file read successfully!",
             zh="\u56fe\u50cf\u6587\u4ef6\u8bfb\u53d6\u6210\u529f\uff01")
        try:
            result = converImageToBinary(imageFile=currentFile,
                                         path=path,
                                         compressMode=False,
                                         message=True)
            result[1].close()
        except BaseException as e:
            printExcept(e, "autoConver()->")
        else:
            return
    if (not forceImage) and not Image.isImageType(currentFile):
        printPathBL(
            en="Now try to load %s as binary",
            zh=
            "\u73b0\u5728\u5c1d\u8bd5\u8bfb\u53d6\u4e8c\u8fdb\u5236\u6570\u636e %s",
            path=path)
    converBinaryToImage(path=path,
                        binaryFile=readBinary(path),
                        returnBytes=False,
                        compressMode=False,
                        message=True)
Example #5
0
def StyleTransfer(Ic, Is, alpha=0.5):
    try:
        model = torch.load("production_models/autoencoder.pt")
    except:
        raise "FATAL ERROR: CANT LOAD MODEL"

    if not (Image.isImageType(Ic) and Image.isImageType(Is)):
        try:
            Ic = Image.open(Ic).convert('RGB')
            Is = Image.open(Is).convert('RGB')
        except:
            raise "Cannot load images."

    loader = custom_transform
    unloader = torchvision.transforms.ToPILImage()

    Ic = Variable(loader(Ic))
    Is = Variable(loader(Is))

    cF = model.encode(Ic.unsqueeze(0)).data.cpu().squeeze(0).view(64, 64 * 64)
    sF = model.encode(Is.unsqueeze(0)).data.cpu().squeeze(0).view(64, 64 * 64)
    #print(cF.shape)
    #print(sF.shape)
    csF = Variable(whiten_and_color(cF, sF))

    cF = cF.view(64, 64, 64).double()
    sF = sF.view(64, 64, 64).double()
    target = transform(cF, sF, csF, alpha)
    target = model.decode(target.float())

    target_img = unloader(target.view(3, img_shape[0], img_shape[1]))
    return target_img
Example #6
0
def resize_image_pil(orig_img, new_filename, size, fit=False, filter=Image.BICUBIC):
    if Image.isImageType(orig_img):
        orig_filename = orig_img.filename
    else:
        orig_filename = orig_img

    if os.path.realpath(orig_filename) == os.path.realpath(new_filename):
        raise IOException('Original and resized filename can not be the same: %s' % orig_filename)

    if not os.path.exists(new_filename):
        logging.debug("Generating %s image for %s" % (size, orig_filename))
        if not Image.isImageType(orig_img):
            orig_img = Image.open(orig_filename)

        if fit:
            thumb = ImageOps.fit(orig_img, size, filter)
            thumb.save(new_filename)
        else:
            orig_img.thumbnail(size, filter)
            orig_img.save(new_filename)

        logging.debug("Resizing complete")
        return True

    return False
Example #7
0
def scan_codes(code_type, image):
    """Get *code_type* codes from a PIL Image

    *code_type* can be any of zbar supported code type [#zbar_symbologies]_:

    - **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
    - **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
      DataBar (`databar`) and DataBar Expanded (`databar-exp`)
    - **2D**: QR Code (`qrcode`)
    - **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`

    .. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html

    Args:
        code_type (str): Code type to search
        image (PIL.Image.Image): Image to scan

    returns:
        A list of *code_type* code values or None
    """
    assert Image.isImageType(image)
    converted_image = image.convert('L')  # Convert image to gray scale (8 bits per pixel).
    raw = converted_image.tobytes()  # Get image data.
    width, height = converted_image.size  # Get image size.
    return zbar_code_scanner('{0}.enable'.format(code_type).encode(), raw, width, height)
Example #8
0
def totensor(input, t_size: tuple = None):
    r"""Converts image_file or PIL image to torch tensor.

    Args:
        input (str/pil-image): full path of image or pil-image
        t_size (list, optional): tensor_size in BCHW, used to resize the input
    """
    if isinstance(input, torch.Tensor):
        if t_size is not None:
            if len(t_size) == input.dim() == 4:
                if t_size[2] != input.size(2) or t_size[3] != input.size(3):
                    input = F.interpolate(input, size=t_size[2:])
        return input

    if isinstance(input, str):
        if not os.path.isfile(input):
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                    input)
        input = ImPIL.open(input).convert("RGB")

    if ImPIL.isImageType(input):
        if t_size is not None:
            if t_size[1] == 1:
                input = input.convert("L")
            if t_size[2] != input.size[1] or t_size[3] != input.size[0]:
                input = input.resize((t_size[3], t_size[2]), ImPIL.BILINEAR)
    else:
        raise TypeError("totensor: input must be str/pil-imgage: "
                        "{}".format(type(input).__name__))
    tensor = _totensor(input)
    if tensor.dim() == 2:
        tensor.unsqueeze_(0)
    return tensor
Example #9
0
    def to_pil(image, t_size: tuple = None, ltrb_boxes: np.ndarray = None):
        r"""Converts file_name or ndarray or 3D torch.Tensor to pillow image.
        Adjusts the ltrb_boxes when ltrb_boxes are provided along with t_size.

        Args:
            image (str/np.ndarray/torch.Tensor):
                input
            t_size (tuple, optional)
                BCHW (Ex: (None, 3, 60, 60)) used to convert to grey scale or
                resize
            ltrb_boxes (np.ndarray, optional)
                Must be pixel locations in (left, top, right, bottom).
        """
        if ImPIL.isImageType(image):
            o = image
        elif isinstance(image, str):
            o = ImPIL.open(image).convert("RGB")
        elif isinstance(image, np.ndarray):
            o = ImPIL.fromarray(image)
        elif isinstance(image, torch.Tensor):
            o = PillowUtils.tensor_to_pil(image)
        else:
            raise TypeError("to_pil: image must be str/np.ndarray/torch.Tensor"
                            ": {}".format(type(image).__name__))
        if t_size is not None and len(t_size) == 4:
            w, h = o.size
            if t_size[1] == 1:
                o = o.convert("L")
            if not (t_size[2] == w and t_size[3] == h):
                o = o.resize((t_size[3], t_size[2]), ImPIL.BILINEAR)
            if ltrb_boxes is not None:
                ltrb_boxes[:, 0::2] *= t_size[3] / w
                ltrb_boxes[:, 1::2] *= t_size[2] / h
                return o, ltrb_boxes
        return o
Example #10
0
def getBGPic(tiffPath, coords, x, y, picWidth, picHeight, overallWidth,
             overallHeight):
    """
	get single 80x80 image of background that does not contain any seals
	:param tiffPath: path to png parts
	:param coords: list of seal coordinates for given tiff file
	:param x: x coordinate of the background
	:param y: y coordinate of the background
	:param picWidth: width of png part
	:param picHeight: height of png part
	:param overallWidth: width of tiff file
	:param overallHeight: height of tiff file
	:return: the background as Image object
	"""
    reversedY = overallHeight - y
    curX = x - (int(x / picWidth)) * picWidth
    curY = reversedY - (int(reversedY / picHeight)) * picHeight
    reqPics = getRequiredPics(x, reversedY, picWidth, picHeight, overallWidth,
                              overallHeight, tiffPath)

    if (len(reqPics) == 1):
        if (not containsSeal(x, y, coords)):
            cropped = reqPics[0].crop(
                (curX, curY, curX + sealWidth, curY + sealWidth))
            if Image.isImageType(cropped):
                cropped = cropped.convert("RGB")
                return cropped
    else:
        return None
Example #11
0
def image_loader(img):
    # Convert image to tensor, resize to appropriate size, and load it to device
    if not Image.isImageType(img):
        img = Image.open(img)

    img = loader(img).unsqueeze(0)  # wrap image in tensor
    return img.to(device, torch.float)
Example #12
0
 def test_name(self):
     type_channel_modes = [('opencv', 'bgr', 'uint8'), ('pil', 'rgb', 'uint8'), ('numpy', 'bgr', 'uint8'),
                           ('opencv', 'bgr', 'float32'), ('numpy', 'bgr', 'float32')]
     to_np8 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'bgr'})
     to_np32 = lambda x: imfeat.convert_image(x, {'type': 'numpy', 'dtype': 'float32', 'mode': 'bgr'})
     for fn in ['lena.jpg', 'lena.pgm', 'lena.ppm']:
         image_np8 = None
         image_np32 = None
         for i in load_images(fn):
             if image_np8 is None:
                 image_np8 = to_np8(i)
                 image_np32 = to_np32(i)
                 np.testing.assert_equal(image_np8, np.array(image_np32 * 255, dtype=np.uint8))
             for t, c, m in type_channel_modes:
                 cur_img = imfeat.convert_image(i, {'type': t, 'dtype': m, 'mode': c})
                 if t == 'opencv':
                     self.assertTrue(isinstance(cur_img, cv.iplimage))
                 elif t == 'pil':
                     self.assertTrue(Image.isImageType(cur_img))
                 else:
                     self.assertTrue(isinstance(cur_img, np.ndarray))
                 if m == 'uint8':
                     np.testing.assert_equal(image_np8, to_np8(cur_img))
                 else:
                     np.testing.assert_equal(image_np32, to_np32(cur_img))
Example #13
0
def fromimage(im, flatten=False, mode=None):
    if not Image.isImageType(im):
        raise TypeError("Input is not a PIL image.")

    if mode is not None:
        if mode != im.mode:
            im = im.convert(mode)
    elif im.mode == 'P':
        # Mode 'P' means there is an indexed "palette".  If we leave the mode
        # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
        # containing the indices into the palette, and not a 3-D array
        # containing the RGB or RGBA values.
        if 'transparency' in im.info:
            im = im.convert('RGBA')
        else:
            im = im.convert('RGB')

    if flatten:
        im = im.convert('F')
    elif im.mode == '1':
        # Workaround for crash in PIL. When im is 1-bit, the call array(im)
        # can cause a seg. fault, or generate garbage. See
        # https://github.com/scipy/scipy/issues/2138 and
        # https://github.com/python-pillow/Pillow/issues/350.
        #
        # This converts im from a 1-bit image to an 8-bit image.
        im = im.convert('L')

    a = array(im)
    return a
Example #14
0
def _prepare_frame(image):
    """Image must be a PIL image.

    Returns:
        a numpy uint8 rgba array suitable for quikklycore input.
        format
        width
        height
        bytes_per_row
    """
    from PIL import Image
    import numpy

    if not Image.isImageType(image):
        raise QuikklyError('prepare_frame() requires a PIL image.')

    w, h = image.size
    # TODO: this could use some optimisation
    image = image.convert(mode='RGBA')
    frame = numpy.array(image, dtype=numpy.uint8)

    assert frame.shape == (h, w, 4)
    assert frame.dtype == numpy.uint8

    return frame, QC_IMAGE_FORMAT_RGBA_UINT32, w, h, w * 4
Example #15
0
    def display_image(self, image, resize_factor=None):
        """
    Display a cv2 image by converting it to PIL and calling the display() function.
    This is a workaround for Jupyter Notebook as it doesnt support cv2.imshow()
    
    :param np_array img: cv2 Image data
    """
        # pil images
        if Image.isImageType(image):
            out = image

        # cv2 images
        elif isinstance(image, np.ndarray):
            if isinstance(image[0][0], np.uint8):
                _io = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif isinstance(image, np.ndarray) and len(image[0][0]) == 3:
                _io = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            elif isinstance(image, np.ndarray) and len(image[0][0]) == 4:
                _io = cv2.cvtColor(image, cv2.COLOR_BGRA2RGB)
            else:
                _io = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            out = Image.fromarray(_io)

        # path to image
        elif isinstance(image, str):
            out = Image.open(image)

        if resize_factor:
            width, height = out.size
            self._logger.info("Resizing by factor %s" % resize_factor)
            out = out.resize(
                (int(width * resize_factor), int(height * resize_factor)),
                Image.ANTIALIAS)

        return out
Example #16
0
def fromimage(im, flatten=0):
    """
    Return a copy of a PIL image as a numpy array.

    Parameters
    ----------
    im : PIL image
        Input image.
    flatten : bool
        If true, convert the output to grey-scale.

    Returns
    -------
    fromimage : ndarray
        The different colour bands/channels are stored in the
        third dimension, such that a grey-image is MxN, an
        RGB-image MxNx3 and an RGBA-image MxNx4.

    """
    if not Image.isImageType(im):
        raise TypeError("Input is not a PIL image.")
    if flatten:
        im = im.convert('F')
    elif im.mode == '1':
        # workaround for crash in PIL, see #1613.
        im.convert('L')

    return array(im)
Example #17
0
def fromimage(im, flatten=0):
    """
    Return a copy of a PIL image as a numpy array.

    Parameters
    ----------
    im : PIL image
        Input image.
    flatten : bool
        If true, convert the output to grey-scale.

    Returns
    -------
    fromimage : ndarray
        The different colour bands/channels are stored in the
        third dimension, such that a grey-image is MxN, an
        RGB-image MxNx3 and an RGBA-image MxNx4.

    """
    if not Image.isImageType(im):
        raise TypeError("Input is not a PIL image.")
    if flatten:
        im = im.convert('F')
    elif im.mode == '1':
        # workaround for crash in PIL, see #1613.
        im.convert('L')

    return array(im)
Example #18
0
    def _open_image_source(self):
        if system_config.PIL_available:
            if isinstance(self._image_source, str):
                # the source is a string, so try and open as a path
                self._pil_image = Image.open(self._image_source)
                self._tk_image = ImageTk.PhotoImage(self._pil_image)

            elif Image.isImageType(self._image_source):
                # the source is a PIL Image
                self._pil_image = self._image_source
                self._tk_image = ImageTk.PhotoImage(self._pil_image)

            elif isinstance(self._image_source, (PhotoImage, ImageTk.PhotoImage)):
                self._tk_image = self._image_source

            else:
                raise Exception("Image must be a file path, PIL.Image or tkinter.PhotoImage")

        else:
            if isinstance(self._image_source, str):
                self._tk_image = PhotoImage(file=self._image_source)

            elif isinstance(self._image_source, PhotoImage):
                self._tk_image = self._image_source

            else:
                raise Exception("Image must be a file path or tkinter.PhotoImage")
Example #19
0
    def pad(self, input: np.array = None, padding: int = None):
        """
    Add zero-value padding around the input matrix based on the padding size
    """
        pad = padding if padding is not None else self.padding
        left_input = top_input = pad
        right_input = bottom_input = padded_input_rows = padded_input_cols = depth = None
        if Image.isImageType(input):
            right_input, bottom_input = input.size[0] + pad, input.size[1] + pad
            padded_input_rows, padded_input_cols = input.size[
                1] + 2 * pad, input.size[0] + 2 * pad
            depth = len(input.getbands())
            input = Image.fromarray(input)
        else:
            if input is not None:
                right_input, bottom_input = input.shape[2] + pad, input.shape[
                    1] + pad
                padded_input_rows, padded_input_cols = input.shape[
                    1] + 2 * pad, input.shape[2] + 2 * pad
                depth = input.shape[0]
            elif self.input_shape:
                right_input, bottom_input = self.input_shape[
                    1] + pad, self.input_shape[0] + pad
                padded_input_rows, padded_input_cols = self.input_shape[
                    0] + 2 * pad, self.input_shape[1] + 2 * pad
                depth = self.input_depth

        result = np.zeros_like(input,
                               shape=(depth, padded_input_rows,
                                      padded_input_cols))
        result[:, top_input:bottom_input,
               left_input:right_input] = input[:, :, :]
        input = result
        return input
Example #20
0
def pil_to_byte(pil_image):
    if not Image.isImageType(pil_image):
        raise Exception("Image must be of type '" + str(Image) + "'!")
    img_byte_arr = io.BytesIO()
    pil_image.save(img_byte_arr, format=pil_image.format)
    img_byte_arr.seek(0)
    return img_byte_arr.read()
Example #21
0
def read_codes(image,
               barcode_type=BarcodeType.NONE,
               try_harder=False,
               hybrid=False,
               search_multi=False):
    """
    Reads codes from a PIL Image.

    Args:
        image (PIL.Image.Image): Image to read barcodes from.
        barcode_type (zxinglight.BarcodeType): Barcode type to look for.
        try_harder (bool): Spend more time trying to find a barcode.
        hybrid (bool): Use Hybrid Binarizer instead of Global Binarizer. For more information,
            see `ZXing's documentation`_.

    Returns:
        A list of barcode values.

    .. _ZXing's documentation:
        https://zxing.github.io/zxing/apidocs/com/google/zxing/Binarizer.html
    """

    if not Image.isImageType(image):
        raise ValueError('Provided image is not a PIL image')

    if not isinstance(barcode_type, BarcodeType):
        raise ValueError('barcode_type is not an enum member of BarcodeType')

    grayscale_image = image.convert('L')

    raw_image = grayscale_image.tobytes()
    width, height = grayscale_image.size

    return zxing_read_codes(raw_image, width, height, barcode_type, try_harder,
                            hybrid, search_multi)
def show_landmarks(image, landmarks):
    """Show image with landmarks"""
    if Image.isImageType(image):
        image = np.asarray(image)
    plt.imshow(image)
    plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
    plt.pause(0.001)  # pause a bit so that plots are updated
Example #23
0
    def _update_meta(self, handle, warn=True):
        self._debug("Update meta", handle, warn)
        iitems = ['shape', 'dtype', 'size', 'ndim', 'nbytes', 'pages']
        oitems = ['inferred_shape', 'inferred_dtype', 'inferred_size', 'inferred_ndim', 'inferred_nbytes', 'pages']
        for i, o in zip(iitems, oitems):
            v = getattr(handle, i, None)

            # Special treatment
            if i == 'size' and isinstance(v, (tuple, list)):  # handle pytiff bug + PIL.Image
                v = np.multiply.reduce(v)
            elif i == 'shape' and Image.isImageType(handle):
                v = getattr(handle, 'size', None)
            elif i == 'pages' and v is not None and isinstance(v, (list, tuple)):
                v = len(v)

            if warn and hasattr(self, o):
                c = getattr(self, o)
                if None not in (c, v) and c != v:
                    warnings.warn(f'Encountered different {i}! Expected {c} but got {v}.')
            setattr(self, o, v)
            self._debug("    update", i, o, v, "result:", getattr(self, o))

        # Special treatment
        if self.inferred_ndim is None:
            self.inferred_ndim = len(self.shape)
Example #24
0
def scan_codes(code_type, image):
    """
    Get *code_type* codes from a PIL Image.

    *code_type* can be any of zbar supported code type [#zbar_symbologies]_:

    - **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
    - **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
      DataBar (`databar`) and DataBar Expanded (`databar-exp`)
    - **2D**: QR Code (`qrcode`)
    - **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`

    .. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html

    Args:
        code_type (str): Code type to search (see ``zbarlight.Symbologies`` for supported values)
        image (PIL.Image.Image): Image to scan

    returns:
        A list of *code_type* code values or None

    """
    if not Image.isImageType(image):
        raise RuntimeError('Bad or unknown image format')
    converted_image = image.convert(
        'L')  # Convert image to gray scale (8 bits per pixel).
    raw = converted_image.tobytes()  # Get image data.
    width, height = converted_image.size  # Get image size.
    symbologie = Symbologies.get(code_type.upper())
    if not symbologie:
        raise UnknownSymbologieError('Unknown Symbologie: %s' % code_type)
    return zbar_code_scanner(symbologie, raw, width, height)
Example #25
0
def fromimage(image, flatten=False, mode=None, dtype=None):
    """
    Return the data from an input PIL image as a `numpy.ndarray`.
    
    Parameters
    ----------
    im : PIL image
        Input image.
    flatten : bool, optional
        If true, convert the output to greyscale. Default is False.
    mode : str / Mode, optional
        Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
        `imread` docstring for more details.
    dtype : str / ``numpy.dtype``, optional
        Numpy dtype to which to cast the output image array data,
        e.g. ``'float64'`` or ``'uint16'``. 
    
    Returns
    -------
    fromimage : ndarray (rank 2..3)
        The individual color channels of the input image are stored in the
        third dimension, such that greyscale (`L`) images are MxN (rank-2),
        `RGB` images are MxNx3 (rank-3), and `RGBA` images are MxNx4 (rank-3).
    """
    from PIL import Image

    if not Image.isImageType(image):
        raise TypeError(f"Input is not a PIL image (got {image!r})")

    if mode is not None:
        if not Mode.is_mode(mode):
            mode = Mode.for_string(mode)
        image = mode.process(image)
    elif Mode.of(image) is Mode.P:
        # Mode 'P' means there is an indexed "palette".  If we leave the mode
        # as 'P', then when we do `a = numpy.array(im)` below, `a` will be a 2D
        # containing the indices into the palette, and not a 3D array
        # containing the RGB or RGBA values.
        if 'transparency' in image.info:
            image = Mode.RGBA.process(image)
        else:
            image = Mode.RGB.process(image)

    if flatten:
        image = Mode.F.process(image)
    elif Mode.of(image) is Mode.MONO:
        # Workaround for crash in PIL. When im is 1-bit, the call numpy.array(im)
        # can cause a seg. fault, or generate garbage. See
        # https://github.com/scipy/scipy/issues/2138 and
        # https://github.com/python-pillow/Pillow/issues/350.
        # This converts im from a 1-bit image to an 8-bit image.
        image = Mode.L.process(image)

    out = numpy.array(image)

    if dtype is not None:
        return out.astype(numpy.dtype(dtype))

    return out
Example #26
0
 def detect_objects(self, image):
     if isinstance(image, str):
         return self.detect_objects_in_image(image)
     elif Image.isImageType(image):
         return self.detect_objects_in_pil_image(image)
     elif isinstance(image, np.ndarray):
         return self.detect_objects_in_np_image(image)
     return None
Example #27
0
def openimg(img):
	if (Image.isImageType(img)): return img
	try: return Image.open(open(img.split('file://', maxsplit=1)[-1], 'rb') if (isinstance(img, str)) else img)
	except Exception:
		import requests
		try: return Image.open(requests.get(img, stream=True).raw)
		except Exception: ex = True
		if (ex): raise
Example #28
0
def jpg_compress(x, quality=(70, 90)):
    if not Image.isImageType(x):
        x = Image.fromarray(x)
    out = BytesIO()
    x.save(out,
           format='jpeg',
           quality=np.random.randint(quality[0], quality[1]))
    x = Image.open(out)
    return x
Example #29
0
def fromimage(im, flatten=False, mode=None):
    """
    Return a copy of a PIL image as a numpy array.

    This function is only available if Python Imaging Library (PIL) is installed.

    Parameters
    ----------
    im : PIL image
        Input image.
    flatten : bool
        If true, convert the output to grey-scale.
    mode : str, optional
        Mode to convert image to, e.g. ``'RGB'``.  See the Notes of the
        `imread` docstring for more details.

    Returns
    -------
    fromimage : ndarray
        The different colour bands/channels are stored in the
        third dimension, such that a grey-image is MxN, an
        RGB-image MxNx3 and an RGBA-image MxNx4.

    """
    if not pillow_installed:
        raise ImportError("The Python Imaging Library (PIL) "
                          "is required to load data from jpeg files")

    if not Image.isImageType(im):
        raise TypeError("Input is not a PIL image.")

    if mode is not None:
        if mode != im.mode:
            im = im.convert(mode)
    elif im.mode == 'P':
        # Mode 'P' means there is an indexed "palette".  If we leave the mode
        # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
        # containing the indices into the palette, and not a 3-D array
        # containing the RGB or RGBA values.
        if 'transparency' in im.info:
            im = im.convert('RGBA')
        else:
            im = im.convert('RGB')

    if flatten:
        im = im.convert('F')
    elif im.mode == '1':
        # Workaround for crash in PIL. When im is 1-bit, the call array(im)
        # can cause a seg. fault, or generate garbage. See
        # https://github.com/scipy/scipy/issues/2138 and
        # https://github.com/python-pillow/Pillow/issues/350.
        #
        # This converts im from a 1-bit image to an 8-bit image.
        im = im.convert('L')

    a = array(im)
    return a
Example #30
0
def imread_topil(img_or_path):
    if is_str(img_or_path):
        return Image.open(img_or_path)
    elif isinstance(img_or_path, np.ndarray):
        return np_img_to_pil(img_or_path)
    elif Image.isImageType(img_or_path):
        return img_or_path
    else:
        raise TypeError('"img" must be a numpy array or a filename or a PIL Image')
Example #31
0
def fromimage(im, flatten=False, mode=None):
    """
    Return a copy of a PIL image as a numpy array.

    This function is only available if Python Imaging Library (PIL) is installed.

    Parameters
    ----------
    im : PIL image
        Input image.
    flatten : bool
        If true, convert the output to grey-scale.
    mode : str, optional
        Mode to convert image to, e.g. ``'RGB'``.  See the Notes of the
        `imread` docstring for more details.

    Returns
    -------
    fromimage : ndarray
        The different colour bands/channels are stored in the
        third dimension, such that a grey-image is MxN, an
        RGB-image MxNx3 and an RGBA-image MxNx4.

    """
    if not pillow_installed:
        raise ImportError("The Python Imaging Library (PIL) "
                          "is required to load data from jpeg files")

    if not Image.isImageType(im):
        raise TypeError("Input is not a PIL image.")

    if mode is not None:
        if mode != im.mode:
            im = im.convert(mode)
    elif im.mode == 'P':
        # Mode 'P' means there is an indexed "palette".  If we leave the mode
        # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
        # containing the indices into the palette, and not a 3-D array
        # containing the RGB or RGBA values.
        if 'transparency' in im.info:
            im = im.convert('RGBA')
        else:
            im = im.convert('RGB')

    if flatten:
        im = im.convert('F')
    elif im.mode == '1':
        # Workaround for crash in PIL. When im is 1-bit, the call array(im)
        # can cause a seg. fault, or generate garbage. See
        # https://github.com/scipy/scipy/issues/2138 and
        # https://github.com/python-pillow/Pillow/issues/350.
        #
        # This converts im from a 1-bit image to an 8-bit image.
        im = im.convert('L')

    a = array(im)
    return a
Example #32
0
def equalize_v_hist(img):
    is_pil = False
    if Image.isImageType(img):
        img = np.array(img)
        is_pil = True
    img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
    img_hsv[:, :, 2] = cv2.equalizeHist(img_hsv[:, :, 2])
    if is_pil:
        return Image.fromarray(cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB))
    return cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB)
def image_from_array(image, normalize=False):
    if isinstance(image, np.ndarray):
        maxval = image.max()
        if maxval <= 1:
            image *= 256
        if normalize:
            image /= maxval
        image = Image.fromarray(image.astype('uint8'))
    assert Image.isImageType(image)
    return image
def image_from_array(image, normalize=False):
  if isinstance(image, np.ndarray):
    maxval = image.max()
    if maxval <= 1:
      image *= 256 
    if normalize:
      image /= maxval 
    image = Image.fromarray(image.astype('uint8'))
  assert Image.isImageType(image)
  return image 
    def test_bytes2image(self):
        # Image
        jpg = open(os.path.join(DATA_DIR, 'scooter.jpg'), 'rb').read()
        img = bytes2image(jpg)
        self.assertTrue(Image.isImageType(img))

        # DataFrame
        df_jpg = pd.DataFrame([['a', jpg]], columns=['Name', 'Image'])
        df_jpg = bytes2image(df_jpg)
        self.assertEqual(list(df_jpg.columns), ['Name', 'Image'])
        self.assertTrue(Image.isImageType(df_jpg['Image'][0]))

        # DataFrame with columns
        df_jpg = pd.DataFrame([['a', jpg]], columns=['Name', 'Image'])
        df_jpg = bytes2image(df_jpg, columns='Image')
        self.assertEqual(list(df_jpg.columns), ['Name', 'Image'])
        self.assertTrue(Image.isImageType(df_jpg['Image'][0]))

        # Unknown data
        self.assertEqual(bytes2image(10), 10)
Example #36
0
    def scale(self, original, size):
        if not Image.isImageType(original):
            raise TypeError('Scaling can only occur using a PIL Image')

        if not isinstance(size, tuple) or len(size) != 2:
            raise ValueError('Size must be a (width, height) tuple')

        image = original.copy()
        image.thumbnail(size, Image.ANTIALIAS)
        thumbnailIO = StringIO()
        image.save(thumbnailIO, original.format, quality=90)
        thumbnailIO.seek(0)
        return thumbnailIO
Example #37
0
 def __init__(self, image, mode, **kwargs):
     assert Image.isImageType(image)
     assert Mode.is_mode(mode)
     self.verbose = bool(kwargs.get('verbose', False))
     if self.verbose:
         label = junkdrawer.or_none(image, 'filename') \
             and os.path.basename(getattr(image, 'filename')) \
             or str(image)
         print("ModeContext.__init__: configured with image: %s" % label)
     self.initial_image = image
     self.image = None
     self.final_image = None
     self.original_mode = Mode.of(image)
     self.mode = mode
Example #38
0
def scan_codes(code_types, image):
    """
    Get *code_type* codes from a PIL Image.

    *code_type* can be any of zbar supported code type [#zbar_symbologies]_:

    - **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
    - **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
      DataBar (`databar`) and DataBar Expanded (`databar-exp`)
    - **2D**: QR Code (`qrcode`)
    - **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`

    .. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html

    Args:
        code_types (list(str)): Code type(s) to search (see ``zbarlight.Symbologies`` for supported values).
        image (PIL.Image.Image): Image to scan

    returns:
        A list of *code_type* code values or None

    """
    if isinstance(code_types, str):
        code_types = [code_types]
        warnings.warn(
            'Using a str for code_types is deprecated, please use a list of str instead',
            DeprecationWarning,
        )

    # Translate symbologies
    symbologies = [
        Symbologies.get(code_type.upper())
        for code_type in set(code_types)
    ]

    # Check that all symbologies are known
    if None in symbologies:
        bad_code_types = [code_type for code_type in code_types if code_type.upper() not in Symbologies]
        raise UnknownSymbologieError('Unknown Symbologies: %s' % bad_code_types)

    # Convert the image to be used by c-extension
    if not Image.isImageType(image):
        raise RuntimeError('Bad or unknown image format')
    converted_image = image.convert('L')  # Convert image to gray scale (8 bits per pixel).
    raw = converted_image.tobytes()  # Get image data.
    width, height = converted_image.size  # Get image size.

    return zbar_code_scanner(symbologies, raw, width, height)
Example #39
0
def hist(filename_or_img):
    if Image.isImageType(filename_or_img):
        im = filename_or_img
    else:
        im = Image.open(filename_or_img)

    hist = np.array(im.histogram())

    print hist.shape
    if hist.shape[0] > 257:
        R,G,B = np.split(hist,3)
        result = [R,G,B]
    else:
        result = hist

    return result
Example #40
0
def _load_list(srclist):
    """
    Internal function for loading the content from a list.

    Image files are converted to `numpy.ndarray`;
    empty classes are normalized to a string of lenghth 0.

    Parameters
    ----------
    srclist: list of tuples
         A list of tuples, with first entry in tuple being
        a string, an Image or `numpy.ndarray` instances and
        second being classes (None for no class).

    Returns
    -------
    result: list of tuples
        The method creates a list of tuples, with first entry in tuple being
        `numpy.ndarray` instances and second being targets (None for no
        target) - integer classes (classification) or real values
        (regression).
    """

    # we're going to accumulate Images and categories here
    result = []

    for img, cls in srclist:
        if isinstance(img, basestring):
            imgin = Image.open(img)
        elif isinstance(img, numpy.ndarray):
            imgin = Image.fromarray(img)
        elif isinstance(img, Image.Image):
            imgin = img
        elif Image.isImageType(img):
            imgin = img
        else:
            raise ValueError("Valid input for images are strings (a "
                             "path towards a file), pil images "
                             "and numpy arrays; %s is not supported" %
                             str(img.__class__))
        if cls is None:
            cls = ''
        imgin = imgin.convert('RGB')
        result.append((numpy.array(imgin), cls))
    return result
Example #41
0
def fromimage(im, flatten=False, mode=None):
    """
    Return a copy of a PIL image as a numpy array.

    Parameters
    ----------
    im : PIL image
        Input image.
    flatten : bool
        If true, convert the output to grey-scale.
    mode : str, optional
        Mode to convert image to, e.g. ``'RGB'``.  See the Notes of the
        `imread` docstring for more details.

    Returns
    -------
    fromimage : ndarray
        The different colour bands/channels are stored in the
        third dimension, such that a grey-image is MxN, an
        RGB-image MxNx3 and an RGBA-image MxNx4.

    """
    if not Image.isImageType(im):
        raise TypeError("Input is not a PIL image.")

    if mode is not None:
        im = im.convert(mode)

    if flatten:
        im = im.convert('F')
    elif im.mode == '1':
        # Workaround for crash in PIL. When im is 1-bit, the call array(im)
        # can cause a seg. fault, or generate garbage. See
        # https://github.com/scipy/scipy/issues/2138 and
        # https://github.com/python-pillow/Pillow/issues/350.
        #
        # This converts im from a 1-bit image to an 8-bit image.
        im = im.convert('L')

    a = array(im)
    return a
Example #42
0
	def __init__(self, data):
		"""init from file path, bytes data, Numpy array, NpImage, PIL Image or GDAL dataset"""

		if self.IFACE is None:
			raise ImportError("No image lib available")
		
		self.data = None

		#init from numpy array
		if isinstance(data, np.ndarray):
			self.data = data
		
		#init from bytes data
		if isinstance(data, bytes):
			self.data = self._npFromBLOB(data)
		
		#init from file path
		if isinstance(data, str):
			if os.path.exists(data):
				self.data = self._npFromPath(data)
			else:
				raise ValueError('Unable to load image data')
	
		#init from another NpImage instance
		if isinstance(data, NpImage):
			self.data = data.data

		#init from GDAL dataset instance
		if HAS_GDAL:
			if isinstance(data, gdal.Dataset):
				self.data = self._npFromGDAL(data)

		#init from PIL Image instance
		if HAS_PIL:
			if Image.isImageType(data):
				self.data = self._npFromPIL(data)
	
		if self.data is None:
			raise ValueError('Unable to load image data')
Example #43
0
def hist_hsv(filename_or_img):
    #img = scipy.misc.imread(filename_or_img)
    if Image.isImageType(filename_or_img):
        img = filename_or_img
    else:
        img = Image.open(filename_or_img)
    array = np.asarray(img)
    arr = (array.astype(float))/255.0
    img_hsv = colors.rgb_to_hsv(arr[...,:3])

    h = np.histogram(img_hsv[..., 0].flatten() * 255, bins=256, range=(0.0, 255.0))[0]
    s = np.histogram(img_hsv[..., 1].flatten() * 255, bins=256, range=(0.0, 255.0))[0]
    v = np.histogram(img_hsv[..., 2].flatten() * 255, bins=256, range=(0.0, 255.0))[0]

    print 'H'
    print h.shape


    hsv = np.array((h,s,v))

    print 'hsv'
    print hsv.shape

    return hsv
Example #44
0
def extract_colors(
        filename_or_img, min_saturation=config.MIN_SATURATION,
        min_distance=config.MIN_DISTANCE, max_colors=config.MAX_COLORS,
        min_prominence=config.MIN_PROMINENCE, n_quantized=config.N_QUANTIZED):
    """
    Determine what the major colors are in the given image.
    """
    if Image.isImageType(filename_or_img):
        im = filename_or_img
    else:
        temp_image = Image.open(filename_or_img)
        # Workaround for "Too many open files" issue in Pillow
        # see https://github.com/python-pillow/Pillow/issues/1237
        im = temp_image.copy()
        temp_image.close()

    # get point color count
    if im.mode != 'RGB':
        im = im.convert('RGB')
    im = autocrop(im, config.WHITE)  # assume white box
    im = im.convert(
        'P', palette=Image.ADAPTIVE, colors=n_quantized).convert('RGB')
    dist = Counter({color: count for count, color
                    in im.getcolors(n_quantized)})
    n_pixels = mul(*im.size)

    # aggregate colors
    to_canonical = {config.BLACK: config.BLACK}
    aggregated = Counter({config.BLACK: 0})
    sorted_cols = sorted(dist.items(), key=itemgetter(1), reverse=True)
    for c, n in sorted_cols:
        if c in aggregated:
            # exact match!
            aggregated[c] += n
        else:
            d, nearest = min((distance(c, alt), alt) for alt in aggregated)
            if d < min_distance:
                # nearby match
                aggregated[nearest] += n
                to_canonical[c] = nearest
            else:
                # no nearby match
                aggregated[c] = n
                to_canonical[c] = c

    # order by prominence
    colors = sorted(
        [Color(c, n / float(n_pixels)) for c, n in aggregated.items()],
        key=attrgetter('prominence'), reverse=True)

    colors, bg_color = detect_background(im, colors, to_canonical)

    # keep any color which meets the minimum saturation
    sat_colors = [c for c in colors if meets_min_saturation(c, min_saturation)]
    if bg_color and not meets_min_saturation(bg_color, min_saturation):
        bg_color = None
    if sat_colors:
        colors = sat_colors
    else:
        # keep at least one color
        colors = colors[:1]

    # keep any color that hits the min prominence value
    # only colours sufficiently different from the background color will be kept
    color_list = []
    color_count = 0
    min_distance_to_bg = 2

    for color in colors:

        similar_to_bg = bg_color and (distance(color.value, bg_color.value) < min_distance_to_bg)
        if color.prominence > min_prominence and not similar_to_bg:
            color_list.append(color)
            color_count += 1

        if color_count >= max_colors:
            break

    return Palette(color_list, bg_color)
Example #45
0
import sys
from PIL import Image, ImageFilter

size = (128,128)
myimage = Image.open("C:\Users\Public\Pictures\Sample Pictures\Jellyfish.jpg")

if Image.isImageType(myimage):
    print("Yes Image")
else:
    print("Not an Image")

list1 = ['BHE','BOW','FHB']


myimage.load()
myimage.show()
blurred = myimage.filter(ImageFilter.BLUR)
blurred.thumbnail(size)
blurred.show()

sys.exit()





Example #46
0
def extract_colors(filename_or_img, min_saturation=MIN_SATURATION,
        min_distance=MIN_DISTANCE, max_colors=MAX_COLORS,
        min_prominence=MIN_PROMINENCE, n_quantized=N_QUANTIZED,
        is_auto_crop=True, is_auto_detect=True, is_manual_crop=False,
        manual_crop_percent=None):
    """
    Determine what the major colors are in the given image.
    """
    if Im.isImageType(filename_or_img):
        im = filename_or_img
    else:
        im = Im.open(filename_or_img)

    # get point color count
    if im.mode != 'RGB':
        im = im.convert('RGB')
    if is_auto_crop:
        im = autocrop(im, WHITE) # assume white box
    elif is_manual_crop and manual_crop_percent:
        im = im.crop((int(im.size[0]*manual_crop_percent[0]),
            int(im.size[1]*manual_crop_percent[1]),
            int(im.size[0]*manual_crop_percent[2]),
            int(im.size[1]*manual_crop_percent[3])))
    im = im.convert('P', palette=Im.ADAPTIVE, colors=n_quantized,
            ).convert('RGB')
    data = im.getdata()
    dist = Counter(data)
    n_pixels = mul(*im.size)

    # aggregate colors
    to_canonical = {WHITE: WHITE, BLACK: BLACK}
    aggregated = Counter({WHITE: 0, BLACK: 0})
    sorted_cols = sorted(dist.iteritems(), key=itemgetter(1), reverse=True)
    for c, n in sorted_cols:
        if c in aggregated:
            # exact match!
            aggregated[c] += n
        else:
            d, nearest = min((distance(c, alt), alt) for alt in aggregated)
            if d < min_distance:
                # nearby match
                aggregated[nearest] += n
                to_canonical[c] = nearest
            else:
                # no nearby match
                aggregated[c] = n
                to_canonical[c] = c

    # order by prominence
    colors = sorted((Color(c, n / float(n_pixels)) \
                for (c, n) in aggregated.iteritems()),
            key=attrgetter('prominence'),
            reverse=True)

    if is_auto_detect:
        colors, bg_color = detect_background(im, colors, to_canonical)

        # keep any color which meets the minimum saturation
        sat_colors = [c for c in colors if meets_min_saturation(c, min_saturation)]
        if bg_color and not meets_min_saturation(bg_color, min_saturation):
            bg_color = None
        if sat_colors:
            colors = sat_colors
        else:
            # keep at least one color
            colors = colors[:1]
    else:
        bg_color = None

    # keep any color within 10% of the majority color
    colors = [c for c in colors if c.prominence >= colors[0].prominence
            * min_prominence][:max_colors]
    return Palette(colors, bg_color)
Example #47
0
def extract_colors(
        filename_or_img, min_saturation=config.MIN_SATURATION,
        min_distance=config.MIN_DISTANCE, max_colors=config.MAX_COLORS,
        min_prominence=config.MIN_PROMINENCE, n_quantized=config.N_QUANTIZED):
    """
    Determine what the major colors are in the given image.
    """
    if Image.isImageType(filename_or_img):
        im = filename_or_img
    else:
        im = Image.open(filename_or_img)

    # get point color count
    if im.mode != 'RGB':
        im = im.convert('RGB')
    im = autocrop(im, config.WHITE)  # assume white box
    im = im.convert(
        'P', palette=Image.ADAPTIVE, colors=n_quantized).convert('RGB')
    data = im.getdata()
    dist = Counter(data)
    n_pixels = mul(*im.size)

    # aggregate colors
    to_canonical = {config.WHITE: config.WHITE, config.BLACK: config.BLACK}
    aggregated = Counter({config.WHITE: 0, config.BLACK: 0})
    sorted_cols = sorted(dist.iteritems(), key=itemgetter(1), reverse=True)
    for c, n in sorted_cols:
        if c in aggregated:
            # exact match!
            aggregated[c] += n
        else:
            d, nearest = min((distance(c, alt), alt) for alt in aggregated)
            if d < min_distance:
                # nearby match
                aggregated[nearest] += n
                to_canonical[c] = nearest
            else:
                # no nearby match
                aggregated[c] = n
                to_canonical[c] = c

    # order by prominence
    colors = sorted(
        [Color(c, n / float(n_pixels)) for c, n in aggregated.iteritems()],
        key=attrgetter('prominence'), reverse=True)

    colors, bg_color = detect_background(im, colors, to_canonical)

    # keep any color which meets the minimum saturation
    sat_colors = [c for c in colors if meets_min_saturation(c, min_saturation)]
    if bg_color and not meets_min_saturation(bg_color, min_saturation):
        bg_color = None
    if sat_colors:
        colors = sat_colors
    else:
        # keep at least one color
        colors = colors[:1]

    # keep any color within 10% of the majority color
    color_list = []
    color_count = 0

    for color in colors:
        if color.prominence >= colors[0].prominence * min_prominence:
            color_list.append(color)
            color_count += 1

        if color_count >= max_colors:
            break

    return Palette(color_list, bg_color)
Example #48
0
	def __init__(self, data, subBoxPx=None, noData=None, georef=None, adjustGeoref=False):
		'''
		init from file path, bytes data, Numpy array, NpImage, PIL Image or GDAL dataset
		subBoxPx : a BBOX object in pixel coordinates space used as data filter (will by applyed) (y counting from top)
		noData : the value used to represent nodata, will be used to define a numpy mask
		georef : a Georef object used to set georeferencing informations, optional
		adjustGeoref: determine if the submited georef must be adjusted against the subbox or if its already correct

		Notes :
		* With GDAL the subbox filter can be applyed at reading level whereas with others imaging
		library, all the data must be extracted before we can extract the subset (using numpy slice).
		In this case, the dataset must fit entirely in memory otherwise it will raise an overflow error
		* If no georef was submited and when the class is init using gdal support or from another npImage instance,
		existing georef of input data will be automatically extracted and adjusted against the subbox
		'''
		self.IFACE = self._getIFACE()

		self.data = None
		self.subBoxPx = subBoxPx
		self.noData = noData

		self.georef = georef
		if self.subBoxPx is not None and self.georef is not None:
			if adjustGeoref:
				self.georef.setSubBoxPx(subBoxPx)
				self.georef.applySubBox()

		#init from another NpImage instance
		if isinstance(data, NpImage):
			self.data = self._applySubBox(data.data)
			if data.isGeoref and not self.isGeoref:
				self.georef = data.georef
				#adjust georef against subbox
				if self.subBoxPx is not None:
					self.georef.setSubBoxPx(subBoxPx)
					self.georef.applySubBox()

		#init from numpy array
		if isinstance(data, np.ndarray):
			self.data = self._applySubBox(data)

		#init from bytes data (BLOB)
		if isinstance(data, bytes):
			self.data = self._npFromBLOB(data)

		#init from file path
		if isinstance(data, str):
			if os.path.exists(data):
				self.data = self._npFromPath(data)
			else:
				raise ValueError('Unable to load image data')

		#init from GDAL dataset instance
		if HAS_GDAL:
			if isinstance(data, gdal.Dataset):
				self.data = self._npFromGDAL(data)

		#init from PIL Image instance
		if HAS_PIL:
			if Image.isImageType(data):
				self.data = self._npFromPIL(data)

		if self.data is None:
			raise ValueError('Unable to load image data')

		#Mask nodata value to avoid bias when computing min or max statistics
		if self.noData is not None:
			self.data = np.ma.masked_array(self.data, self.data == self.noData)
Example #49
0
def fromimage(image, flatten=False,
                        mode=None,
                       dtype=None):
    """
    Return the data from an input PIL image as a `numpy.ndarray`.
    
    Parameters
    ----------
    im : PIL image
        Input image.
    flatten : bool, optional
        If true, convert the output to greyscale. Default is False.
    mode : str / Mode, optional
        Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
        `imread` docstring for more details.
    dtype : str / ``numpy.dtype``, optional
        Numpy dtype to which to cast the output image array data,
        e.g. ``'float64'`` or ``'uint16'``. 
    
    Returns
    -------
    fromimage : ndarray (rank 2..3)
        The individual color channels of the input image are stored in the
        third dimension, such that greyscale (`L`) images are MxN (rank-2),
        `RGB` images are MxNx3 (rank-3), and `RGBA` images are MxNx4 (rank-3).
    """
    from PIL import Image
    
    if not Image.isImageType(image):
        raise TypeError("Input is not a PIL image (got %s)" % repr(image))
    
    if mode is not None:
        if not Mode.is_mode(mode):
            mode = Mode.for_string(mode)
        image = mode.process(image)
    elif Mode.of(image) is Mode.P:
        # Mode 'P' means there is an indexed "palette".  If we leave the mode
        # as 'P', then when we do `a = numpy.array(im)` below, `a` will be a 2D
        # containing the indices into the palette, and not a 3D array
        # containing the RGB or RGBA values.
        if 'transparency' in image.info:
            image = Mode.RGBA.process(image)
        else:
            image = Mode.RGB.process(image)
    
    if flatten:
        image = Mode.F.process(image)
    elif Mode.of(image) is Mode.MONO:
        # Workaround for crash in PIL. When im is 1-bit, the call numpy.array(im)
        # can cause a seg. fault, or generate garbage. See
        # https://github.com/scipy/scipy/issues/2138 and
        # https://github.com/python-pillow/Pillow/issues/350.
        # This converts im from a 1-bit image to an 8-bit image.
        image = Mode.L.process(image)
    
    out = numpy.array(image)
    
    if dtype is not None:
        return out.astype(
              numpy.dtype(dtype))
    
    return out