def _save(im, fp, filename): try: rawmode = RAWMODE[im.mode] except KeyError: raise IOError("cannot write mode %s as JPEG" % im.mode) info = im.encoderinfo dpi = info.get("dpi", (0, 0)) # get keyword arguments im.encoderconfig = ( info.get("quality", 0), # "progressive" is the official name, but older documentation # says "progression" # FIXME: issue a warning if the wrong form is used (post-1.1.5) info.has_key("progressive") or info.has_key("progression"), info.get("smooth", 0), info.has_key("optimize"), info.get("streamtype", 0), dpi[0], dpi[1] ) if im.mode == "CMYK": # invert it so it's handled correctly in Photoshop/etc. - Kevin Cazabon. im = ImageChops.invert(im) ImageFile._save(im, fp, [("jpeg", (0,0)+im.size, 0, rawmode)])
def _save(im, fp, filename, check=0): try: type, rawmode = SAVE[im.mode] except KeyError: raise ValueError("Cannot save %s images as IM" % im.mode) try: frames = im.encoderinfo["frames"] except KeyError: frames = 1 if check: return check fp.write(("Image type: %s image\r\n" % type).encode('ascii')) if filename: fp.write(("Name: %s\r\n" % filename).encode('ascii')) fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode('ascii')) fp.write(("File size (no of images): %d\r\n" % frames).encode('ascii')) if im.mode == "P": fp.write(b"Lut: 1\r\n") fp.write(b"\000" * (511-fp.tell()) + b"\032") if im.mode == "P": fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes ImageFile._save(im, fp, [("raw", (0,0)+im.size, 0, (rawmode, 0, -1))])
def saveGif(self, filename): with open(filename, 'wb') as f: #TODO: check if file is writable? #Write the header f.write(self.headerBytes()) #Write the screen descriptor f.write(self.screenDescriptorBytes()) #write global colour palette, if any if self.globalPalette: assert(len(self.globalPalette) == 768) f.write(self.globalPalette) if len(self.Frames)>1: f.write(self.loopControlBytes()) for GCE, ID, img in self.Frames: if len(self.Frames)>1: f.write(GCE.toBytes()) f.write(ID.toBytes()) #convert to new image. imOut = self.convertRGBtoIndexed(img, self.globalPalette) imOut.encoderconfig = (8, False) #no interlace. f.write(b'\x08') ImageFile._save(imOut, f, [("gif", (0,0)+imOut.size, 0, RAWMODE[imOut.mode])]) f.write(b"\0") #end of image data. #Write end of file (0x3B) f.write(b';')
def _save(im, fp, filename): if im.mode == "1": rawmode, head = "1;I", b"P4" elif im.mode == "L": rawmode, head = "L", b"P5" elif im.mode == "I": if im.getextrema()[1] < 2 ** 16: rawmode, head = "I;16B", b"P5" else: rawmode, head = "I;32B", b"P5" elif im.mode == "RGB": rawmode, head = "RGB", b"P6" elif im.mode == "RGBA": rawmode, head = "RGB", b"P6" else: raise IOError("cannot write mode %s as PPM" % im.mode) fp.write(head + ("\n%d %d\n" % im.size).encode("ascii")) if head == b"P6": fp.write(b"255\n") if head == b"P5": if rawmode == "L": fp.write(b"255\n") elif rawmode == "I;16B": fp.write(b"65535\n") elif rawmode == "I;32B": fp.write(b"2147483648\n") ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))])
def getdata(im, offset = (0, 0), **params): """Return a list of strings representing this image. The first string is a local image header, the rest contains encoded image data.""" class collector: data = [] def write(self, data): self.data.append(data) im.load() # make sure raster data is available fp = collector() try: im.encoderinfo = params # local image header fp.write(b"," + o16(offset[0]) + # offset o16(offset[1]) + o16(im.size[0]) + # size o16(im.size[1]) + o8(0) + # flags o8(8)) # bits ImageFile._save(im, fp, [("gif", (0,0)+im.size, 0, RAWMODE[im.mode])]) fp.write(b"\0") # end of image data finally: del im.encoderinfo return fp.data
def getdata(im, offset=(0, 0), **params): """Return a list of strings representing this image. The first string is a local image header, the rest contains encoded image data.""" class Collector(object): data = [] def write(self, data): self.data.append(data) im.load() # make sure raster data is available fp = Collector() try: im.encoderinfo = params # local image header _get_local_header(fp, im, offset, 0) ImageFile._save(im, fp, [("gif", (0, 0)+im.size, 0, RAWMODE[im.mode])]) fp.write(b"\0") # end of image data finally: del im.encoderinfo return fp.data
def _save(im, fp, filename): if im.mode != "1": raise IOError("cannot write mode %s as MSP" % im.mode) # create MSP header header = [0] * 16 header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 header[2], header[3] = im.size header[4], header[5] = 1, 1 header[6], header[7] = 1, 1 header[8], header[9] = im.size checksum = 0 for h in header: checksum = checksum ^ h header[12] = checksum # FIXME: is this the right field? # header for h in header: fp.write(o16(h)) # image body ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 32, ("1", 0, 1))])
def encode_image(img): img = img.convert('P', palette=Image.ADAPTIVE, colors=256) palbytes = img.palette.palette # assert len(img.palette) in (2, 4, 8, 0x10, 0x20, 0x40, 0x80, 0x100) imio = io.BytesIO() ImageFile._save(img, imio, [("gif", (0, 0)+img.size, 0, 'P')]) return ( # GCE b'\x21' # extension block + b'\xf9' # graphic control extension + b'\x04' # block size + b'\x00' # flags + b'\x00\x00' # frame delay + b'\x00' # transparent color index + b'\x00' # block terminator # Image headers + b'\x2c' # image block + b'\x00\x00' # image x + b'\x00\x00' # image y + struct.pack('<H', img.width) # image width + struct.pack('<H', img.height) # image height + bytes((0x80 | int(math.log(len(palbytes)//3, 2.0))-1,)) # flags (local palette and palette size) # Palette + palbytes # LZW code size + b'\x08' # Image data. We're using pillow here because I was too lazy finding a suitable LZW encoder. + imio.getbuffer() # End of image data + b'\x00' )
def _save(im, fp, filename, eps=1): """EPS Writer for the Python Imaging Library.""" # # make sure image data is available im.load() # # determine postscript image mode if im.mode == "L": operator = (8, 1, "image") elif im.mode == "RGB": operator = (8, 3, "false 3 colorimage") elif im.mode == "CMYK": operator = (8, 4, "false 4 colorimage") else: raise ValueError("image mode is not supported") class NoCloseStream: def __init__(self, fp): self.fp = fp def __getattr__(self, name): return getattr(self.fp, name) def close(self): pass base_fp = fp fp = io.TextIOWrapper(NoCloseStream(fp), encoding='latin-1') if eps: # # write EPS header fp.write("%!PS-Adobe-3.0 EPSF-3.0\n") fp.write("%%Creator: PIL 0.1 EpsEncode\n") #fp.write("%%CreationDate: %s"...) fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size) fp.write("%%Pages: 1\n") fp.write("%%EndComments\n") fp.write("%%Page: 1 1\n") fp.write("%%ImageData: %d %d " % im.size) fp.write("%d %d 0 1 1 \"%s\"\n" % operator) # # image header fp.write("gsave\n") fp.write("10 dict begin\n") fp.write("/buf %d string def\n" % (im.size[0] * operator[1])) fp.write("%d %d scale\n" % im.size) fp.write("%d %d 8\n" % im.size) # <= bits fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1])) fp.write("{ currentfile buf readhexstring pop } bind\n") fp.write(operator[2] + "\n") fp.flush() ImageFile._save(im, base_fp, [("eps", (0,0)+im.size, 0, None)]) fp.write("\n%%%%EndBinary\n") fp.write("grestore end\n") fp.flush()
def delete(self, file_, delete_file=True): """ Deletes file_ references in Key Value store and optionally the file_ it self. """ image_file = ImageFile(file_) if delete_file: image_file.delete()
def _save(im, fp, filename, check=0): try: rawmode, bits, colors = SAVE[im.mode] except KeyError: raise IOError("cannot write mode %s as BMP" % im.mode) if check: return check info = im.encoderinfo dpi = info.get("dpi", (96, 96)) # 1 meter == 39.3701 inches ppm = tuple(map(lambda x: int(x * 39.3701), dpi)) stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3) header = 40 # or 64 for OS/2 version 2 offset = 14 + header + colors * 4 image = stride * im.size[1] # bitmap header fp.write( b"BM" + o32(offset + image) + o32(0) + o32(offset) # file type (magic) # file size # reserved ) # image data offset # bitmap info header fp.write( o32(header) + o32(im.size[0]) # info header size + o32(im.size[1]) # width + o16(1) # height + o16(bits) # planes + o32(0) # depth + o32(image) # compression (0=uncompressed) + o32(ppm[0]) # size of bitmap + o32(ppm[1]) + o32(colors) # resolution + o32(colors) # colors used ) # colors important fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) if im.mode == "1": for i in (0, 255): fp.write(o8(i) * 4) elif im.mode == "L": for i in range(256): fp.write(o8(i) * 4) elif im.mode == "P": fp.write(im.im.getpalette("RGB", "BGRX")) ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))])
def _save(im, fp, filename): if _imaging_gif: # call external driver try: _imaging_gif.save(im, fp, filename) return except IOError: pass # write uncompressed file if im.mode in RAWMODE: im_out = im else: # convert on the fly (EXPERIMENTAL -- I'm not sure PIL # should automatically convert images on save...) if Image.getmodebase(im.mode) == "RGB": palette_size = 256 if im.palette: palette_size = len(im.palette.getdata()[1]) // 3 im_out = im.convert("P", palette=1, colors=palette_size) else: im_out = im.convert("L") # header try: palette = im.encoderinfo["palette"] except KeyError: palette = None im.encoderinfo["optimize"] = im.encoderinfo.get("optimize", True) header, used_palette_colors = getheader(im_out, palette, im.encoderinfo) for s in header: fp.write(s) flags = 0 if get_interlace(im): flags = flags | 64 # local image header get_local_header(fp, im, (0, 0), flags) im_out.encoderconfig = (8, get_interlace(im)) ImageFile._save(im_out, fp, [("gif", (0, 0)+im.size, 0, RAWMODE[im_out.mode])]) fp.write(b"\0") # end of image data fp.write(b";") # end of file try: fp.flush() except: pass
def _save(im, fp, filename, check=0): try: version, bits, planes, rawmode = SAVE[im.mode] except KeyError: raise ValueError("Cannot save %s images as PCX" % im.mode) if check: return check # bytes per plane stride = (im.size[0] * bits + 7) // 8 # stride should be even stride += stride % 2 # Stride needs to be kept in sync with the PcxEncode.c version. # Ideally it should be passed in in the state, but the bytes value # gets overwritten. if Image.DEBUG: print ("PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d" % ( im.size[0], bits, stride)) # under windows, we could determine the current screen size with # "Image.core.display_mode()[1]", but I think that's overkill... screen = im.size dpi = 100, 100 # PCX header fp.write( o8(10) + o8(version) + o8(1) + o8(bits) + o16(0) + o16(0) + o16(im.size[0]-1) + o16(im.size[1]-1) + o16(dpi[0]) + o16(dpi[1]) + b"\0"*24 + b"\xFF"*24 + b"\0" + o8(planes) + o16(stride) + o16(1) + o16(screen[0]) + o16(screen[1]) + b"\0"*54 ) assert fp.tell() == 128 ImageFile._save(im, fp, [("pcx", (0,0)+im.size, 0, (rawmode, bits*planes))]) if im.mode == "P": # colour palette fp.write(o8(12)) fp.write(im.im.getpalette("RGB", "RGB")) # 768 bytes elif im.mode == "L": # greyscale palette fp.write(o8(12)) for i in range(256): fp.write(o8(i)*3)
def _save(im, fp, filename): try: rawmode = RAWMODE[im.mode] except KeyError: raise IOError("cannot write mode %s as JPEG" % im.mode) info = im.encoderinfo dpi = info.get("dpi", (0, 0)) subsampling = info.get("subsampling", -1) if subsampling == "4:4:4": subsampling = 0 elif subsampling == "4:2:2": subsampling = 1 elif subsampling == "4:1:1": subsampling = 2 extra = "" icc_profile = info.get("icc_profile") if icc_profile: ICC_OVERHEAD_LEN = 14 MAX_BYTES_IN_MARKER = 65533 MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN markers = [] while icc_profile: markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER]) icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:] i = 1 for marker in markers: size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker)) extra = extra + ("\xFF\xE2" + size + "ICC_PROFILE\0" + chr(i) + chr(len(markers)) + marker) i = i + 1 # get keyword arguments im.encoderconfig = ( info.get("quality", 0), # "progressive" is the official name, but older documentation # says "progression" # FIXME: issue a warning if the wrong form is used (post-1.1.7) info.has_key("progressive") or info.has_key("progression"), info.get("smooth", 0), info.has_key("optimize"), info.get("streamtype", 0), dpi[0], dpi[1], subsampling, extra, ) ImageFile._save(im, fp, [("jpeg", (0,0)+im.size, 0, rawmode)])
def _save(im, fp, filename): if im.mode[0] != "F": im = im.convert('F') hdr = makeSpiderHeader(im) if len(hdr) < 256: raise IOError("Error creating Spider header") # write the SPIDER header fp.writelines(hdr) rawmode = "F;32NF" # 32-bit native floating point ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, 1))])
def _save(im, fp, filename): if im.mode == "1": rawmode, head = "1;I", b"P4" elif im.mode == "L": rawmode, head = "L", b"P5" elif im.mode == "RGB": rawmode, head = "RGB", b"P6" elif im.mode == "RGBA": rawmode, head = "RGB", b"P6" else: raise IOError("cannot write mode %s as PPM" % im.mode) fp.write(head + ("\n%d %d\n" % im.size).encode('ascii')) if head != b"P4": fp.write(b"255\n") ImageFile._save(im, fp, [("raw", (0,0)+im.size, 0, (rawmode, 0, 1))])
def chunk_iCCP(self, pos, length): # ICC profile s = ImageFile._safe_read(self.fp, length) # according to PNG spec, the iCCP chunk contains: # Profile name 1-79 bytes (character string) # Null separator 1 byte (null character) # Compression method 1 byte (0) # Compressed profile n bytes (zlib with deflate compression) i = s.find(b"\0") logger.debug("iCCP profile name %r", s[:i]) logger.debug("Compression method %s", i8(s[i])) comp_method = i8(s[i]) if comp_method != 0: raise SyntaxError("Unknown compression method %s in iCCP chunk" % comp_method) try: icc_profile = _safe_zlib_decompress(s[i+2:]) except ValueError: if ImageFile.LOAD_TRUNCATED_IMAGES: icc_profile = None else: raise except zlib.error: icc_profile = None # FIXME self.im_info["icc_profile"] = icc_profile return s
def chunk_PLTE(self, pos, length): # palette s = ImageFile._safe_read(self.fp, length) if self.im_mode == "P": self.im_palette = "RGB", s return s
def SOF(self, marker): # # Start of frame marker. Defines the size and mode of the # image. JPEG is color blind, so we use some simple # heuristics to map the number of layers to an appropriate # mode. Note that this could be made a bit brighter, by # looking for JFIF and Adobe APP markers. n = i16(self.fp.read(2))-2 s = ImageFile._safe_read(self.fp, n) self.size = i16(s[3:]), i16(s[1:]) self.bits = ord(s[0]) if self.bits != 8: raise SyntaxError("cannot handle %d-bit layers" % self.bits) self.layers = ord(s[5]) if self.layers == 1: self.mode = "L" elif self.layers == 3: self.mode = "RGB" elif self.layers == 4: self.mode = "CMYK" else: raise SyntaxError("cannot handle %d-layer images" % self.layers) if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]: self.info["progression"] = 1 for i in range(6, len(s), 3): t = s[i:i+3] # 4-tuples: id, vsamp, hsamp, qtable self.layer.append((t[0], ord(t[1])/16, ord(t[1])&15, ord(t[2])))
def chunk_iTXt(self, pos, length): # international text r = s = ImageFile._safe_read(self.fp, length) try: k, r = r.split(b"\0", 1) except ValueError: return s if len(r) < 2: return s cf, cm, r = i8(r[0]), i8(r[1]), r[2:] try: lang, tk, v = r.split(b"\0", 2) except ValueError: return s if cf != 0: if cm == 0: try: v = zlib.decompress(v) except zlib.error: return s else: return s if bytes is not str: try: k = k.decode("latin-1", "strict") lang = lang.decode("utf-8", "strict") tk = tk.decode("utf-8", "strict") v = v.decode("utf-8", "strict") except UnicodeError: return s self.im_info[k] = self.im_text[k] = iTXt(v, lang, tk) return s
def chunk_zTXt(self, pos, len): # compressed text s = ImageFile._safe_read(self.fp, len) try: k, v = s.split(b"\0", 1) except ValueError: k = s; v = b"" if v: comp_method = i8(v[0]) else: comp_method = 0 if comp_method != 0: raise SyntaxError("Unknown compression method %s in zTXt chunk" % comp_method) import zlib try: v = zlib.decompress(v[1:]) except zlib.error: v = b"" if k: if bytes is not str: k = k.decode('latin-1', 'strict') v = v.decode('latin-1', 'replace') self.im_info[k] = self.im_text[k] = v return s
def chunk_zTXt(self, pos, length): # compressed text s = ImageFile._safe_read(self.fp, length) try: k, v = s.split(b"\0", 1) except ValueError: k = s v = b"" if v: comp_method = i8(v[0]) else: comp_method = 0 if comp_method != 0: raise SyntaxError("Unknown compression method %s in zTXt chunk" % comp_method) try: v = _safe_zlib_decompress(v[1:]) except zlib.error: v = b"" if k: if bytes is not str: k = k.decode("latin-1", "strict") v = v.decode("latin-1", "replace") self.im_info[k] = self.im_text[k] = v self.check_text_memory(len(v)) return s
def load(self, fp): # load tag dictionary self.reset() self.offset = fp.tell() i16 = self.i16 i32 = self.i32 for i in range(i16(fp.read(2))): ifd = fp.read(12) tag, typ = i16(ifd), i16(ifd, 2) if Image.DEBUG: from PIL import TiffTags tagname = TiffTags.TAGS.get(tag, "unknown") typname = TiffTags.TYPES.get(typ, "unknown") print("tag: %s (%d)" % (tagname, tag), end=' ') print("- type: %s (%d)" % (typname, typ), end=' ') try: dispatch = self.load_dispatch[typ] except KeyError: if Image.DEBUG: print("- unsupported type", typ) continue # ignore unsupported type size, handler = dispatch size = size * i32(ifd, 4) # Get and expand tag value if size > 4: here = fp.tell() fp.seek(i32(ifd, 8)) data = ImageFile._safe_read(fp, size) fp.seek(here) else: data = ifd[8:8+size] if len(data) != size: warnings.warn("Possibly corrupt EXIF data. " "Expecting to read %d bytes but only got %d. " "Skipping tag %s" % (size, len(data), tag)) continue self.tagdata[tag] = data self.tagtype[tag] = typ if Image.DEBUG: if tag in (COLORMAP, IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, ICCPROFILE, XMP): print("- value: <table: %d bytes>" % size) else: print("- value:", self[tag]) self.next = i32(fp.read(4))
def COM(self, marker): # # Comment marker. Store these in the APP dictionary. n = i16(self.fp.read(2))-2 s = ImageFile._safe_read(self.fp, n) self.app["COM"] = s # compatibility self.applist.append(("COM", s))
def _save(im, fp, filename): if im.mode != "1": raise IOError("cannot write mode %s as XBM" % im.mode) fp.write(("#define im_width %d\n" % im.size[0]).encode('ascii')) fp.write(("#define im_height %d\n" % im.size[1]).encode('ascii')) hotspot = im.encoderinfo.get("hotspot") if hotspot: fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode('ascii')) fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode('ascii')) fp.write(b"static char im_bits[] = {\n") ImageFile._save(im, fp, [("xbm", (0, 0) + im.size, 0, None)]) fp.write(b"};\n")
def load(self, fp): self.reset() self._offset = fp.tell() try: for i in range(self._unpack("H", self._ensure_read(fp, 2))[0]): tag, typ, count, data = self._unpack("HHL4s", self._ensure_read(fp, 12)) if DEBUG: tagname = TAGS_V2.get(tag, TagInfo()).name typname = TYPES.get(typ, "unknown") print( "tag: %s (%d) - type: %s (%d)" % (tagname, tag, typname, typ), end=" ") try: unit_size, handler = self._load_dispatch[typ] except KeyError: if DEBUG: print("- unsupported type", typ) continue # ignore unsupported type size = count * unit_size if size > 4: here = fp.tell() offset, = self._unpack("L", data) if DEBUG: print( "Tag Location: %s - Data Location: %s" % (here, offset), end=" ") fp.seek(offset) data = ImageFile._safe_read(fp, size) fp.seek(here) else: data = data[:size] if len(data) != size: warnings.warn( "Possibly corrupt EXIF data. " "Expecting to read %d bytes but only got %d. " "Skipping tag %s" % (size, len(data), tag)) continue self._tagdata[tag] = data self.tagtype[tag] = typ if DEBUG: if size > 32: print("- value: <table: %d bytes>" % size) else: print("- value:", self[tag]) self.next, = self._unpack("L", self._ensure_read(fp, 4)) except IOError as msg: warnings.warn(str(msg)) return
def _save(im, fp, filename, check=0): try: rawmode, bits, colors = SAVE[im.mode] except KeyError: raise IOError("cannot write mode %s as BMP" % im.mode) if check: return check stride = ((im.size[0]*bits+7)//8+3)&(~3) header = 40 # or 64 for OS/2 version 2 offset = 14 + header + colors * 4 image = stride * im.size[1] # bitmap header fp.write(b"BM" + # file type (magic) o32(offset+image) + # file size o32(0) + # reserved o32(offset)) # image data offset # bitmap info header fp.write(o32(header) + # info header size o32(im.size[0]) + # width o32(im.size[1]) + # height o16(1) + # planes o16(bits) + # depth o32(0) + # compression (0=uncompressed) o32(image) + # size of bitmap o32(1) + o32(1) + # resolution o32(colors) + # colors used o32(colors)) # colors important fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) if im.mode == "1": for i in (0, 255): fp.write(o8(i) * 4) elif im.mode == "L": for i in range(256): fp.write(o8(i) * 4) elif im.mode == "P": fp.write(im.im.getpalette("RGB", "BGRX")) ImageFile._save(im, fp, [("raw", (0,0)+im.size, 0, (rawmode, stride, -1))])
def _save(im, fp, filename, check=0): # check if im.mode is compatible with MRC (see Bmp...) if check: return check header = MrcHeader() header['width'] = im.size[0] header['height'] = im.size[1] header['depth'] = 1 header['mode'] = pilmode_mrcmode[im.mode] header.tofile(fp) rawmode = mrcmode_rawmode[header['mode']] tile = [("raw", (0,0)+im.size, header.headerlen, (rawmode, 0, 1))] print 'savetile:', tile ImageFile._save(im, fp, tile)
def _save(im, fp, filename): if filename.endswith('.j2k'): kind = 'j2k' else: kind = 'jp2' # Get the keyword arguments info = im.encoderinfo offset = info.get('offset', None) tile_offset = info.get('tile_offset', None) tile_size = info.get('tile_size', None) quality_mode = info.get('quality_mode', 'rates') quality_layers = info.get('quality_layers', None) num_resolutions = info.get('num_resolutions', 0) cblk_size = info.get('codeblock_size', None) precinct_size = info.get('precinct_size', None) irreversible = info.get('irreversible', False) progression = info.get('progression', 'LRCP') cinema_mode = info.get('cinema_mode', 'no') fd = -1 if hasattr(fp, "fileno"): try: fd = fp.fileno() except: fd = -1 im.encoderconfig = ( offset, tile_offset, tile_size, quality_mode, quality_layers, num_resolutions, cblk_size, precinct_size, irreversible, progression, cinema_mode, fd ) ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)])
def APP(self, marker): # # Application marker. Store these in the APP dictionary. # Also look for well-known application markers. n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) app = "APP%d" % (marker & 15) self.app[app] = s # compatibility self.applist.append((app, s)) if marker == 0xFFE0 and s[:4] == b"JFIF": # extract JFIF information self.info["jfif"] = version = i16(s, 5) # version self.info["jfif_version"] = divmod(version, 256) # extract JFIF properties try: jfif_unit = i8(s[7]) jfif_density = i16(s, 8), i16(s, 10) except: pass else: if jfif_unit == 1: self.info["dpi"] = jfif_density self.info["jfif_unit"] = jfif_unit self.info["jfif_density"] = jfif_density elif marker == 0xFFE1 and s[:5] == b"Exif\0": # extract Exif information (incomplete) self.info["exif"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:5] == b"FPXR\0": # extract FlashPix information (incomplete) self.info["flashpix"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0": # Since an ICC profile can be larger than the maximum size of # a JPEG marker (64K), we need provisions to split it into # multiple markers. The format defined by the ICC specifies # one or more APP2 markers containing the following data: # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) # Marker sequence number 1, 2, etc (1 byte) # Number of markers Total of APP2's used (1 byte) # Profile data (remainder of APP2 data) # Decoders should use the marker sequence numbers to # reassemble the profile, rather than assuming that the APP2 # markers appear in the correct sequence. self.icclist.append(s) elif marker == 0xFFEE and s[:5] == b"Adobe": self.info["adobe"] = i16(s, 5) # extract Adobe custom properties try: adobe_transform = i8(s[1]) except: pass else: self.info["adobe_transform"] = adobe_transform
def APP(self, marker): # # Application marker. Store these in the APP dictionary. # Also look for well-known application markers. n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) app = "APP%d" % (marker & 15) self.app[app] = s # compatibility self.applist.append((app, s)) if marker == 0xFFE0 and s[:4] == "JFIF": # extract JFIF information self.info["jfif"] = version = i16(s, 5) # version self.info["jfif_version"] = divmod(version, 256) # extract JFIF properties try: jfif_unit = ord(s[7]) jfif_density = i16(s, 8), i16(s, 10) except: pass else: if jfif_unit == 1: self.info["dpi"] = jfif_density self.info["jfif_unit"] = jfif_unit self.info["jfif_density"] = jfif_density elif marker == 0xFFE1 and s[:5] == "Exif\0": # extract Exif information (incomplete) self.info["exif"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:5] == "FPXR\0": # extract FlashPix information (incomplete) self.info["flashpix"] = s # FIXME: value will change elif marker == 0xFFEE and s[:5] == "Adobe": self.info["adobe"] = i16(s, 5) # extract Adobe custom properties try: adobe_transform = ord(s[1]) except: pass else: self.info["adobe_transform"] = adobe_transform
def chunk_tEXt(self, pos, length): # text s = ImageFile._safe_read(self.fp, length) try: k, v = s.split(b"\0", 1) except ValueError: # fallback for broken tEXt tags k = s v = b"" if k: if bytes is not str: k = k.decode('latin-1', 'strict') v = v.decode('latin-1', 'replace') self.im_info[k] = self.im_text[k] = v self.check_text_memory(len(v)) return s
def CreateMonoBag(imgs, bagname): '''Creates a bag file with camera images''' bag = rosbag.Bag(bagname, 'w') try: for i in range(len(imgs)): print("Adding %s" % imgs[i]) fp = open(imgs[i], "r") p = ImageFile.Parser() '''read image size''' imgpil = ImagePIL.open(imgs[0]) width, height = imgpil.size # print "size:",width,height # width 1241, height 376 while 1: s = fp.read(1024) if not s: break p.feed(s) im = p.close() Stamp = rospy.rostime.Time.from_sec(time.time()) '''set image information ''' Img = Image() Img.header.stamp = Stamp Img.height = height Img.width = width Img.header.frame_id = "camera" '''for rgb8''' # Img.encoding = "rgb8" # Img_data = [pix for pixdata in im.getdata() for pix in pixdata] # Img.step = Img.width * 3 '''for mono8''' Img.encoding = "mono8" Img_data = [pix for pixdata in [im.getdata()] for pix in pixdata] Img.step = Img.width Img.data = Img_data bag.write('camera/image_raw', Img) finally: bag.close()
def check_image_resource(url): '''reference: https://stackoverflow.com/questions/7460218/get-image-size-without-downloading-it-in-python ''' req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) connection = urlopen(req) assert connection.code == 200 p = ImageFile.Parser() for i in range(100): # at most read 100KB data # 根據實際測試的結果,jpeg大部分在i=0讀到size,最多看過i=27才能讀到的情況 # png則大部分在i=0~5會讀到size,最多看過i=28才讀到的情況 data = connection.read(1024) if not data: break p.feed(data) if p.image: connection.close() return p.image.size connection.close() return (0, 0)
def get_image_dimensions(file_or_path, close=False): """ Returns the (width, height) of an image, given an open file or a path. Set 'close' to True to close the file at the end if it is initially in an open state. """ from PIL import ImageFile as PillowImageFile p = PillowImageFile.Parser() if hasattr(file_or_path, 'read'): file = file_or_path file_pos = file.tell() file.seek(0) else: file = open(file_or_path, 'rb') close = True try: # Most of the time Pillow only needs a small chunk to parse the image # and get the dimensions, but with some TIFF files Pillow needs to # parse the whole file. chunk_size = 1024 while 1: data = file.read(chunk_size) if not data: break try: p.feed(data) except Exception as e: # ignore zlib complaining on truncated stream, just feed more # data to parser (ticket #19457). if e.args[0].startswith("Error -5"): pass else: raise if p.image: return p.image.size chunk_size *= 2 return None finally: if close: file.close() else: file.seek(file_pos)
def CreateMonoBag(imgs,bagname,yamlName): '''Creates a bag file with camera images''' bag =rosbag.Bag(bagname, 'w') try: for i in range(len(imgs)): print("Adding %s" % imgs[i]) fp = open( imgs[i], "rb" ) p = ImageFile.Parser() while 1: s = fp.read(1024) if not s: break p.feed(s) im = p.close() Stamp = rospy.rostime.Time.from_sec(time.time()) Img = Image() Img.header.stamp = Stamp Img.width = im.size[0] Img.height = im.size[1] if im.mode=='RGB': #(3x8-bit pixels, true color) Img.encoding = "rgb8" Img.header.frame_id = "camera_rgb_optical_frame" Img.step = Img.width*3 Img_data = [pix for pixdata in im.getdata() for pix in pixdata] elif im.mode=='L': #(8-bit pixels, black and white) Img.encoding = "mono8" Img.header.frame_id = "camera_gray_optical_frame" Img.step = Img.width Img_data=[pix for pixdata in [im.getdata()] for pix in pixdata] Img.data = Img_data [calib, cameraName]=yaml_to_CameraInfo(yamlName) calib.header.stamp = Stamp if im.mode=='RGB': calib.header.frame_id = "camera_rgb_optical_frame" elif im.mode=='L': calib.header.frame_id = "camera_gray_optical_frame" bag.write( cameraName + '/camera_info', calib, Stamp) bag.write( cameraName + '/image_raw', Img, Stamp) finally: bag.close()
def getsizes(uri): # get file size *and* image size (None if not known) MIN_SIZE = 980 #pixels, one dimension file = urllib.urlopen(uri) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: #check this out return p.image.size else: 'pass' file.close()
def clean_image(self): if not self.cleaned_data.get('image', None): # This check is done in the global clean as well, so we accept it here since # we might have decliend it. return None imagedata = self.cleaned_data['image'] try: p = ImageFile.Parser() p.feed(imagedata.read()) p.close() image = p.image except Exception as e: raise ValidationError("Could not parse image: %s" % e) if image.format != self.params['format'].upper(): raise ValidationError( "Only %s format images are accepted, not '%s'" % (self.params['format'].upper(), image.format)) xres = int(self.params['xres']) yres = int(self.params['yres']) resstr = "%sx%s" % (xres, yres) upresstr = "%sx%s" % image.size # Check maximum resolution if image.size[0] > xres or image.size[1] > yres: raise ValidationError( "Maximum size of image is %s. Uploaded image is %s." % (resstr, upresstr)) # One of the sizes has to be exactly what the spec says, otherwise we might have an image that's # too small. if image.size[0] != xres and image.size[1] != yres: raise ValidationError( "Image must be %s pixels wide or %s pixels high. Uploaded image is %s." % (xres, yres, upresstr)) if int(self.params.get('transparent', 0)) == 1: # Require transparency, only supported for PNG if self.params['format'].upper() != 'PNG': raise ValidationError( "Transparency validation requires PNG images") if image.mode != 'RGBA': raise ValidationError("Image must have transparent background") return self.cleaned_data['image']
def getImgSize(uri): # get file size *and* image size (None if not known) try: file = urllib.request.urlopen(uri) # size = file.headers.get("content-length") # if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: return p.image.size break file.close() except: pass return None
def get_image_dimensions(filepath): """ Returns the (width, height) of an image, given an open file or a path. Adapted from django.core.files.image. """ p = PILImageFile.Parser() file = open(filepath, 'rb') try: while True: data = file.read(1024) if not data: break p.feed(data) if p.image: return p.image.size return None finally: file.close()
def verify(self, endchunk=b"IEND"): # Simple approach; just calculate checksum for all remaining # blocks. Must be called directly after open. cids = [] while True: try: cid, pos, length = self.read() except struct.error: raise IOError("truncated PNG file") if cid == endchunk: break self.crc(cid, ImageFile._safe_read(self.fp, length)) cids.append(cid) return cids
def get_image_dimensions(file_or_path, close=False): """ A modified version of ``django.core.files.images.get_image_dimensions`` which accounts for Exif orientation. """ p = ImageFile.Parser() if hasattr(file_or_path, 'read'): file = file_or_path file_pos = file.tell() file.seek(0) else: file = open(file_or_path, 'rb') close = True try: # Most of the time PIL only needs a small chunk to parse the image and # get the dimensions, but with some TIFF files PIL needs to parse the # whole file. chunk_size = 1024 while 1: data = file.read(chunk_size) if not data: break try: p.feed(data) except zlib.error as e: # ignore zlib complaining on truncated stream, just feed more # data to parser (ticket #19457). if e.args[0].startswith("Error -5"): pass else: raise if p.image: return exif_aware_size(p.image) chunk_size *= 2 return None finally: if close: file.close() else: file.seek(file_pos)
def scores_for_image(imagecontent, n, N): imagedata = {} p = ImageFile.Parser() p.feed(imagecontent) p.close() width, height = p.image.size h = p.image.histogram().count(0) imagedata['width'] = width imagedata['height'] = height imagedata['blank columns in histogram'] = h s = width * height imagedata['size in pixels'] = s r = width / height imagedata['ratio width/height'] = r imagedata['byte size'] = \ sys.getsizeof(imagecontent) k1 = 0.1 k2 = 0.4 k3 = 10 k4 = 0.5 score = (k1 * (N - n)) + (k2 * s) - (k3 * h) - (k4 * r) imagedata['N'] = N imagedata['n'] = n imagedata['k1'] = k1 imagedata['k2'] = k2 imagedata['k3'] = k3 imagedata['k4'] = k4 imagedata['calculated score'] = score return imagedata
def CreateBag(args): imgs = GetImages(args[0]) if not imgs: print("No images found in %s" % dir) exit() rosbagfile = args[1] if (os.path.exists(rosbagfile)): os.remove(rosbagfile) bag = rosbag.Bag(rosbagfile, 'w') try: for filename in imgs: print("Adding %s" % filename) with open(filename, "rb") as image_data: parser = ImageFile.Parser() while 1: raw_bytes = image_data.read(1024) if not raw_bytes: break parser.feed(raw_bytes) parsed_image = parser.close() timestamp = rospy.Time.from_sec(time.time()) image = Image() image.header.stamp = timestamp image.width = parsed_image.size[0] image.height = parsed_image.size[1] image.step = image.width * 4 image.encoding = "rgba8" image.header.frame_id = "image_data/image" image.data = [ pix for pixdata in parsed_image.getdata() for pix in pixdata ] bag.write('image_node/image_raw', image, timestamp) finally: bag.close()
def chunk_tRNS(self, pos, length): # transparency s = ImageFile._safe_read(self.fp, length) if self.im_mode == "P": if _simple_palette.match(s): # tRNS contains only one full-transparent entry, # other entries are full opaque i = s.find(b"\0") if i >= 0: self.im_info["transparency"] = i else: # otherwise, we have a byte string with one alpha value # for each palette entry self.im_info["transparency"] = s elif self.im_mode == "L": self.im_info["transparency"] = i16(s) elif self.im_mode == "RGB": self.im_info["transparency"] = i16(s), i16(s[2:]), i16(s[4:]) return s
def getImageSizes(uri): # get file size *and* image size (None if not known) # http://effbot.org/zone/pil-image-size.htm try: file = urllib2.urlopen(uri, timeout=0.5) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: return size, p.image.size break file.close() return size, None except: return [0, [0, 0]]
def thumbnail(fp, size=128, bs=2048): """generate png thumbnails""" p = ImageFile.Parser() try: while True: s = fp.read(bs) if not s: break p.feed(s) img = p.close() img.thumbnail((size, size)) op = io.BytesIO() img.save(op, 'PNG') op.seek(0) return op.read().encode('base64') except IOError: raise
def DQT(self, marker): # # Define quantization table. Support baseline 8-bit tables # only. Note that there might be more than one table in # each marker. # FIXME: The quantization tables can be used to estimate the # compression quality. n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) while len(s): if len(s) < 65: raise SyntaxError("bad quantization table marker") v = i8(s[0]) if v // 16 == 0: self.quantization[v & 15] = array.array("b", s[1:65]) s = s[65:] else: return # FIXME: add code to read 16-bit tables!
def parse(cls, file): """ 解析远程图片文件, 生成Image类型对象返回. params: file: 从request.FILES中获取的数据对象 returns: img: PIL Image Object """ try: parser = ImageFile.Parser() for chunk in file.chunks(): parser.feed(chunk) finally: image = parser.close() if image.mode != 'RGBA': image = image.convert('RGBA') return image
def __parse_image(self, data): """ Load the image from data with PIL """ try: parser = ImageFile.Parser() parser.feed(data) self.__image = parser.close() except IOError: raise AlbumArtError('Error parsing albumart image data') try: self.__mimetype = PIL_MIME_MAP[self.__image.format] except KeyError: self.__image = None raise AlbumArtError('Unsupported PIL image format: %s' % self.__image.format ) if self.__image.mode != 'RGB': self.__image = self.__image.convert('RGB')
def isLandscape(URL): file = urllib.request.urlopen(URL) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: # return p.image.size if p.image.size[0] >= p.image.size[1]: return True break else: return False break file.close() return False
def getsizes(url): """ Takes a image URL and returns its size :param url: A URL :rtype: Int """ file = urlopen(url) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: return size, p.image.size break file.close() return size, None
def chunk_iTXt(self, pos, length): # international text r = s = ImageFile._safe_read(self.fp, length) try: k, r = r.split(b"\0", 1) except ValueError: return s if len(r) < 2: return s cf, cm, r = i8(r[0]), i8(r[1]), r[2:] try: lang, tk, v = r.split(b"\0", 2) except ValueError: return s if cf != 0: if cm == 0: try: v = _safe_zlib_decompress(v) except ValueError: if ImageFile.LOAD_TRUNCATED_IMAGES: return s else: raise except zlib.error: return s else: return s if bytes is not str: try: k = k.decode("latin-1", "strict") lang = lang.decode("utf-8", "strict") tk = tk.decode("utf-8", "strict") v = v.decode("utf-8", "strict") except UnicodeError: return s self.im_info[k] = self.im_text[k] = iTXt(v, lang, tk) self.check_text_memory(len(v)) return s
def get_size_and_format(url): try: checked = UrlStr.is_url(url) except ValueError as err: return { "size": None, "width": None, "height": None, "format": None } if checked: try: requesting_file = requests.get(url, stream=True) requesting_file.raise_for_status() except requests.exceptions.RequestException as err: requesting_file.close() raise Exception(err) if requesting_file.headers.get("Content-Type").startswith( "image/"): size = requesting_file.headers.get("Content-Length") size = int(size) or size image_parser = ImageFile.Parser() while True: data = requesting_file.raw.read(1024) if not data: requesting_file.close() return size, None, None image_parser.feed(data) if image_parser.image: requesting_file.close() img = image_parser.close() return { "size": size, "width": img.size[0], "height": img.size[1], "format": img.format }
def getsizes(self, uri): #urllib.urlretrieve(uri, uri) # get file size *and* image size (None if not known) file = urllib.urlopen(uri) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: return size, p.image.size break file.close() return size, None
def getsizes(uri, dc): try: # get file size *and* image size (None if not known) data = None if uri in dc: return dc[uri] else: file = request.urlopen(uri) data = file.read(1024) file.close() if data: p = ImageFile.Parser() p.feed(data) if p.image: dc[uri] = p.image.size return p.image.size except Exception as e: print(e) print('Cannot detect size') return None, None
def getFileSizeAndType(uri): # get file size, image size and file type (None if not known) file = urllib.request.urlopen(uri) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: break file.close() if p.image: return size, p.image.size, p.image.format raise ValueError("Could not read file size and type from: %s" % uri) return size, (0, 0), None
def __call__(self, value): if value.size is None: # This happens when no new file is uploaded, so assume things are fine return try: p = ImageFile.Parser() p.feed(value.read()) p.close() img = p.image except Exception as e: raise ValidationError("Could not parse image: %s" % e) if img.format.upper() not in self.formats: raise ValidationError( "Files of format {0} are not accepted, only {1}".format( img.format, ", ".join(self.formats))) if self.maxsize: if img.size[0] > self.maxsize[0] or img.size[1] > self.maxsize[1]: raise ValidationError( "Maximum image size is {}x{}".format(*self.maxsize))
def getsizes(uri): # get file size *and* image size (None if not known) file = urllib.request.urlopen(uri) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: print(p.image.size) width = p.image.size[0] height = p.image.size[1] #print(width) #print(height) file.close() if (height > width or height < 1080 or width < 1920): return False return True
def getsizes(uri): # get file size *and* image size (None if not known) file = urllib.request.urlopen(uri) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: return ( "Image file size: " + str(size), "Image dimensions: " + str(p.image.size), "Filename: " + uri, ) break file.close() return size, None