Exemple #1
0
def decode_rle(data, width, height, depth, version):
    row_size = max(width * depth // 8, 1)
    with io.BytesIO(data) as fp:
        bytes_counts = read_be_array(('H', 'I')[version - 1], height, fp)
        return b''.join(
            rle_impl.decode(fp.read(count), row_size)
            for count in bytes_counts)
Exemple #2
0
def read_image_data(fp, header):
    """
    Reads merged image pixel data which is stored at the end of PSD file.
    """
    w, h = header.width, header.height
    compress_type = read_fmt("H", fp)[0]

    bytes_per_pixel = header.depth // 8

    channel_byte_counts = []
    if compress_type == Compression.PACK_BITS:
        for ch in range(header.number_of_channels):
            if header.version == 1:
                channel_byte_counts.append(read_be_array("H", h, fp))
            elif header.version == 2:
                channel_byte_counts.append(read_be_array(
                    "I", h, fp))  # Undocumented...

    channel_data = []
    for channel_id in range(header.number_of_channels):

        data = None

        if compress_type == Compression.RAW:
            data_size = w * h * bytes_per_pixel
            data = fp.read(data_size)

        elif compress_type == Compression.PACK_BITS:
            byte_counts = channel_byte_counts[channel_id]
            data_size = sum(byte_counts)
            data = fp.read(data_size)

        # are there any ZIP-encoded composite images in a wild?
        elif compress_type == Compression.ZIP:
            warnings.warn(
                "ZIP compression of composite image is not supported.")

        elif compress_type == Compression.ZIP_WITH_PREDICTION:
            warnings.warn(
                "ZIP_WITH_PREDICTION compression of composite image is not supported."
            )

        if data is None:
            return []
        channel_data.append(ChannelData(compress_type, data))

    return channel_data
Exemple #3
0
def decode_packbits(data, height, version):
    with io.BytesIO(data) as fp:
        bytes_counts = read_be_array(
            ('H', 'I')[version - 1], height, fp
        )
        return b''.join(
            packbits.decode(fp.read(count)) for count in bytes_counts
        )
Exemple #4
0
def decode_virtual_memory_array_list(fp,w,h):

    start = fp.tell()
    version,length = read_fmt("II",fp)
    top,left,bottom,right,channels  = read_fmt("IIIIi",fp)

    print("topleftbottomright,channels",top,left,bottom,right,channels)
    ## virtal memory array list, repating x channels + user mask + sheet mask

    arrayWritten = read_fmt("I",fp)[0] # skip if 0
    ###############
    channel_data = []



    while arrayWritten != 0:


        length2 = read_fmt("I",fp)[0] # skip if 0
        dataStart = fp.tell()
        expectedEnding = dataStart + length2
        bytes_per_pixel = read_fmt("I",fp)[0] #  1, 8, 16 or 32
        anotherRect = fp.read(16)
        anotherDepth = fp.read(2)
        compress_type = fp.read(1)[0] # 1 = zip this is not correct in spec.

        #fp.seek(fp.tell())


        # read data size

        if compress_type == Compression.RAW:
            data_size = w * h * bytes_per_pixel
            #data = fp.read(data_size)
            data = fp.read(length2 - 23)

        elif compress_type == Compression.PACK_BITS:
            byte_counts = read_be_array("H", 200, fp) #channel_byte_counts[channel_id]
            data_size = sum(byte_counts)
            data = fp.read(data_size)

        #fix add more compression types
        channel_data.append(ChannelData(compress_type, data))
        print("decode virtual pixel depth at","expected pos",expectedEnding,"pos",fp.tell())


        arrayWritten = read_fmt("I",fp)[0]
    #########
    return channel_data
Exemple #5
0
def _read_channel_image_data(fp, layer, depth):
    """
    Reads image data for all channels in a layer.
    """
    channel_data = []

    bytes_per_pixel = depth // 8

    for channel in layer.channels:
        if channel.id == ChannelID.USER_LAYER_MASK:
            w, h = layer.mask_data.width(), layer.mask_data.height()
        else:
            w, h = layer.width(), layer.height()

        start_pos = fp.tell()
        compress_type = read_fmt("H", fp)[0]

        data = None

        if compress_type == Compression.RAW:
            data_size = w * h * bytes_per_pixel
            data = fp.read(data_size)

        elif compress_type == Compression.PACK_BITS:
            byte_counts = read_be_array("H", h, fp)
            data_size = sum(byte_counts) * bytes_per_pixel
            data = fp.read(data_size)

        elif compress_type == Compression.ZIP:
            data = zlib.decompress(fp.read(channel.length - 2))

        elif compress_type == Compression.ZIP_WITH_PREDICTION:
            decompressed = zlib.decompress(fp.read(channel.length - 2))
            data = compression.decode_prediction(decompressed, w, h, bytes_per_pixel)

        if data is None:
            return []

        channel_data.append(ChannelData(compress_type, data))

        remaining_bytes = channel.length - (fp.tell() - start_pos) - 2
        if remaining_bytes > 0:
            fp.seek(remaining_bytes, 1)

    return channel_data
Exemple #6
0
def read_image_data(fp, header):
    """
    Reads merged image pixel data which is stored at the end of PSD file.
    """
    w, h = header.width, header.height
    bytes_per_pixel = header.depth // 8
    compress_type = read_fmt("H", fp)[0]

    logger.debug('reading composite image data...')
    logger.debug('  start_pos=%s, compress_type=%s',
                 fp.tell() - 2, Compression.name_of(compress_type))

    if compress_type == Compression.RAW:
        data_size = w * h * bytes_per_pixel
        logger.debug('  data size = %sx%sx%s=%s bytes', w, h, bytes_per_pixel, data_size)

    elif compress_type == Compression.PACK_BITS:
        channel_byte_counts = []
        for _ in range(header.number_of_channels):
            channel_byte_counts.append(read_be_array("H", h, fp))

    # are there any ZIP-encoded composite images in a wild?
    elif compress_type in (Compression.ZIP, Compression.ZIP_WITH_PREDICTION):
        warnings.warn(
            "%s compression of composite image is not supported." %
            Compression.name_of(compress_type)
        )
        return []

    else:
        warnings.warn("Bad compression type %s" % compress_type)
        return []

    channel_data = []
    for channel_id in range(header.number_of_channels):
        if compress_type == Compression.PACK_BITS:
            byte_counts = channel_byte_counts[channel_id]
            data_size = sum(byte_counts)
            logger.debug('  data size = %s bytes', data_size)

        data = fp.read(data_size)
        channel_data.append(ChannelData(compress_type, data))

    return channel_data
Exemple #7
0
def read_image_data(fp, header):
    """
    Reads merged image pixel data which is stored at the end of PSD file.
    """
    w, h = header.width, header.height
    compress_type = read_fmt("H", fp)[0]

    bytes_per_pixel = header.depth // 8

    channel_byte_counts = []
    if compress_type == Compression.PACK_BITS:
        for ch in range(header.number_of_channels):
            channel_byte_counts.append(read_be_array("H", h, fp))

    channel_data = []
    for channel_id in range(header.number_of_channels):

        data = None

        if compress_type == Compression.RAW:
            data_size = w * h * bytes_per_pixel
            data = fp.read(data_size)

        elif compress_type == Compression.PACK_BITS:
            byte_counts = channel_byte_counts[channel_id]
            data_size = sum(byte_counts) * bytes_per_pixel
            data = fp.read(data_size)

        # are there any ZIP-encoded composite images in a wild?
        elif compress_type == Compression.ZIP:
            warnings.warn("ZIP compression of composite image is not supported.")

        elif compress_type == Compression.ZIP_WITH_PREDICTION:
            warnings.warn("ZIP_WITH_PREDICTION compression of composite image is not supported.")

        if data is None:
            return []
        channel_data.append(ChannelData(compress_type, data))

    return channel_data
Exemple #8
0
def test_read_be_array_from_file_like_objects():
    fp = BytesIO(b"\x00\x01\x00\x05")
    res = read_be_array("H", 2, fp)
    assert list(res) == [1, 5]
Exemple #9
0
def _read_channel_image_data(fp, layer, depth):
    """
    Reads image data for all channels in a layer.
    """
    bytes_per_pixel = depth // 8
    channel_data = []

    for channel in layer.channels:
        logger.debug("  reading %s", channel)

        if channel.id == ChannelID.USER_LAYER_MASK:
            w, h = layer.mask_data.width(), layer.mask_data.height()
        elif channel.id == ChannelID.REAL_USER_LAYER_MASK:
            w, h = layer.mask_data.real_width(), layer.mask_data.real_height()
        else:
            w, h = layer.width(), layer.height()

        start_pos = fp.tell()
        compress_type = read_fmt("H", fp)[0]

        logger.debug("    start_pos=%s, compress_type=%s",
                     start_pos, Compression.name_of(compress_type))

        # read or calculate data size
        if compress_type == Compression.RAW:
            data_size = w * h * bytes_per_pixel
            logger.debug('    data size = %sx%sx%s=%s bytes', w, h, bytes_per_pixel, data_size)

        elif compress_type == Compression.PACK_BITS:
            byte_counts = read_be_array("H", h, fp)
            data_size = sum(byte_counts)
            logger.debug('    data size = %s bytes', data_size)

        elif compress_type in (Compression.ZIP, Compression.ZIP_WITH_PREDICTION):
            data_size = channel.length - 2
            logger.debug('    data size = %s-2=%s bytes', channel.length, data_size)

        else:
            warnings.warn("Bad compression type %s" % compress_type)
            return []

        # read the data itself
        if data_size > channel.length:
            warnings.warn("Incorrect data size: %s > %s" % (data_size, channel.length))
        else:
            data = fp.read(data_size)
            if compress_type == Compression.ZIP:
                data = zlib.decompress(data)
            elif compress_type == Compression.ZIP_WITH_PREDICTION:
                data = zlib.decompress(data)
                data = compression.decode_prediction(data, w, h, bytes_per_pixel)
                if data is None:
                    warnings.warn("Prediction decode failed!")
                    return []

            channel_data.append(ChannelData(compress_type, data))

        remaining_length = channel.length - (fp.tell() - start_pos)
        if remaining_length > 0:
            fp.seek(remaining_length, 1)
            logger.debug('    skipping %s bytes', remaining_length)

    return channel_data
Exemple #10
0
def _read_channel_image_data(fp, layer, depth):
    """
    Reads image data for all channels in a layer.
    """
    channel_data = []

    bytes_per_pixel = depth // 8

    for idx, channel in enumerate(layer.channels):
        logger.debug("  reading %s", channel)
        if channel.id == ChannelID.USER_LAYER_MASK:
            w, h = layer.mask_data.width(), layer.mask_data.height()
        elif channel.id == ChannelID.REAL_USER_LAYER_MASK:
            w, h = layer.mask_data.real_width(), layer.mask_data.real_height()
        else:
            w, h = layer.width(), layer.height()

        start_pos = fp.tell()
        compress_type = read_fmt("H", fp)[0]

        logger.debug("    start_pos=%s, compress_type=%s", start_pos,
                     Compression.name_of(compress_type))

        data = None

        # read data size
        if compress_type == Compression.RAW:
            data_size = w * h * bytes_per_pixel
            logger.debug('    data size = %sx%sx%s=%s bytes', w, h,
                         bytes_per_pixel, data_size)

        elif compress_type == Compression.PACK_BITS:
            byte_counts = read_be_array("H", h, fp)
            data_size = sum(byte_counts)
            logger.debug('    data size = %s bytes', data_size)

        elif compress_type in (Compression.ZIP,
                               Compression.ZIP_WITH_PREDICTION):
            data_size = channel.length - 2
            logger.debug('    data size = %s-2=%s bytes', channel.length,
                         data_size)

        else:
            warnings.warn("Bad compression type %s" % compress_type)
            return []

        # read the data itself
        if data_size > channel.length:
            warnings.warn("Incorrect data size: %s > %s" %
                          (data_size, channel.length))
        else:
            raw_data = fp.read(data_size)
            if compress_type in (Compression.RAW, Compression.PACK_BITS):
                data = raw_data
            elif compress_type == Compression.ZIP:
                data = zlib.decompress(raw_data)
            elif compress_type == Compression.ZIP_WITH_PREDICTION:
                decompressed = zlib.decompress(raw_data)
                data = compression.decode_prediction(decompressed, w, h,
                                                     bytes_per_pixel)

            if data is None:
                return []

            channel_data.append(ChannelData(compress_type, data))

        remaining_length = channel.length - (fp.tell() - start_pos)
        if remaining_length > 0:
            fp.seek(remaining_length, 1)
            logger.debug('    skipping %s bytes', remaining_length)

    return channel_data
Exemple #11
0
def _read_channel_image_data(fp, layer, depth):
    """
    Reads image data for all channels in a layer.
    """
    channel_data = []

    bytes_per_pixel = depth // 8

    for idx, channel in enumerate(layer.channels):
        logger.debug("  reading %s", channel)
        if channel.id == ChannelID.USER_LAYER_MASK:
            w, h = layer.mask_data.width(), layer.mask_data.height()
        else:
            w, h = layer.width(), layer.height()

        start_pos = fp.tell()
        compress_type = read_fmt("H", fp)[0]

        logger.debug("    start_pos=%s, compress_type=%s",
                     start_pos, Compression.name_of(compress_type))

        data = None

        # read data size
        if compress_type == Compression.RAW:
            data_size = w * h * bytes_per_pixel
            logger.debug('    data size = %sx%sx%s=%s bytes', w, h, bytes_per_pixel, data_size)

        elif compress_type == Compression.PACK_BITS:
            byte_counts = read_be_array("H", h, fp)
            sum_counts = sum(byte_counts)
            data_size = sum_counts * bytes_per_pixel
            logger.debug('    data size = %sx%s=%s bytes', sum_counts, bytes_per_pixel, data_size)

        elif compress_type == Compression.ZIP:
            data_size = channel.length - 2
            logger.debug('    data size = %s-2=%s bytes', channel.length, data_size)

        elif compress_type == Compression.ZIP_WITH_PREDICTION:
            data_size = channel.length - 2
            logger.debug('    data size = %s-2=%s bytes', channel.length, data_size)

        else:
            warnings.warn("Bad compression type %s" % compress_type)
            return []

        # read the data itself
        if data_size > channel.length:
            warnings.warn("Incorrect data size: %s > %s" % (data_size, channel.length))
        else:
            raw_data = fp.read(data_size)
            if compress_type in (Compression.RAW, Compression.PACK_BITS):
                data = raw_data
            elif compress_type == Compression.ZIP:
                data = zlib.decompress(raw_data)
            elif compress_type == Compression.ZIP_WITH_PREDICTION:
                decompressed = zlib.decompress(raw_data)
                data = compression.decode_prediction(decompressed, w, h, bytes_per_pixel)

            if data is None:
                return []

            channel_data.append(ChannelData(compress_type, data))

        remaining_bytes = channel.length - (fp.tell() - start_pos) - 2
        if remaining_bytes > 0:
            fp.seek(remaining_bytes, 1)
            logger.debug('    skipping %s bytes', remaining_bytes)

    return channel_data
Exemple #12
0
def test_read_be_array_from_file_like_objects():
    fp = BytesIO(b"\x00\x01\x00\x05")
    res = read_be_array("H", 2, fp)
    assert list(res) == [1, 5]