def load_animation(im): if im.format == 'GIF' and im.mode == 'P': # TODO: Pillow has bug with gif animation # https://github.com/python-pillow/Pillow/labels/GIF raise NotImplementedError('Pillow has bug with gif animation, ' 'fallback to GdkPixbuf') anime = anime_tools.AnimeFrameBuffer(im.n_frames, loop=im.info['loop']) background = im.info.get('background', None) if isinstance(background, tuple): color = 0 for n, c in enumerate(background): color |= c << n * 8 background = color frameiter = ImageSequence.Iterator(im) for n, frame in enumerate(frameiter): anime.add_frame(n, pil_to_pixbuf(frame), frame.info.get('duration', 0), background=background) return anime.create_animation()
def load_qrcode_to_base64(self): buf = BytesIO() if self.image_type == 'png': qr_img = self.remake_qrcode(self.qr_img) qr_img.save(buf, format='PNG') self.base64_str = f'base64://{base64.b64encode(buf.getvalue()).decode()}' elif self.image_type == 'gif': self.info = self.qr_img.info sequence = [ self.remake_qrcode(f.copy()) for f in ImageSequence.Iterator(self.qr_img) ] sequence[0].save(buf, format='GIF', save_all=True, append_images=sequence[1:], disposal=2, quality=100, **self.info) self.base64_str = f'base64://{base64.b64encode(buf.getvalue()).decode()}'
def webp2mp4(_buffer): _id = uuid.uuid1() filename = "himawarisay/{}.mp4".format(_id) thumbname = "himawarisay/{}.png".format(_id) im = Image.open(_buffer) width, height = im.size fourcc = VideoWriter_fourcc(*'avc1') video = VideoWriter(filename, fourcc, float(8), (width, height)) for frame in ImageSequence.Iterator(im): if frame.im is not None: for i in range(int(frame.info['duration'] / 125)): video.write(numpy.array(frame.convert('RGB'))[..., ::-1]) else: video.write(numpy.array(frame.convert('RGB'))[..., ::-1]) frame.save(thumbname) """for ratio in [1.5+numpy.sin(i)/2 for i in numpy.arange(0, numpy.pi*4, numpy.pi*4/180)]: video.write(numpy.array(scale(im, ratio).convert('RGB'))[...,::-1])""" video.release() #im.save(thumbname) return filename, thumbname, width, height
def __init__(self, master, root): self.master = master self.root = root self.canvas = Canvas(master, width=1000, height=680) self.canvas.config(bg="black") self.canvas.pack() # Create a sequence of frames in a list that corresponds to the gif self.sequence = [ ImageTk.PhotoImage(img) for img in ImageSequence.Iterator( Image.open( os.path.join(os.path.dirname(__file__), "DEPENDENCES/IMAGES/initialize.gif"))) ] self.image = self.canvas.create_image(500, 340, anchor=CENTER, image=self.sequence[0]) self.animate(1)
def modify(self, function, *args, **kwargs): """ Modify the image object using the given Image function. This function supplies sequence support. """ if not gif_support or not self.gif: self.object = function(self.object, *args, **kwargs) else: frames = [] duration = self.object.info.get("duration") / 1000 for frame in ImageSequence.Iterator(self.object): frame_bytes = utils.convert_image_object( function(frame, *args, **kwargs)) frames.append(imageio.imread(frame_bytes, format="PNG")) # Save the image as bytes and recreate the image object image_bytes = imageio.mimwrite(imageio.RETURN_BYTES, frames, format=self.format, duration=duration) self.object = Image.open(BytesIO(image_bytes)) self.gif_bytes = image_bytes
def create_image(self, buffer): img = Image.open(BytesIO(buffer)) self.icc_profile = img.info.get('icc_profile') self.transparency = img.info.get('transparency') self.exif = img.info.get('exif') self.subsampling = JpegImagePlugin.get_sampling(img) if (self.subsampling == -1): # n/a for this file self.subsampling = None self.qtables = getattr(img, 'quantization', None) if self.context.config.ALLOW_ANIMATED_GIFS and self.extension == '.gif': frames = [] for frame in ImageSequence.Iterator(img): frames.append(frame.convert('P')) img.seek(0) self.frame_count = len(frames) return frames return img
def iter(self, *, mode=None, rotate=False, apply_gamma=False) -> np.ndarray: """ Iterate over all ndimages/frames in the URI Parameters ---------- mode : {str, None} Convert the image to the given mode before returning it. If None, the mode will be left unchanged. Possible modes can be found at: https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes rotate : {bool} If set to ``True`` and the image contains an EXIF orientation tag, apply the orientation before returning the ndimage. apply_gamma : {bool} If ``True`` and the image contains metadata about gamma, apply gamma correction to the image. """ for im in ImageSequence.Iterator(self._image): yield self._apply_transforms(im, mode, rotate, apply_gamma)
def handle_image_conversion(): global countv, mode from PIL import Image image_array = [] if img_selected == None: print 'Warning: no image provided. Creating blank image instead' blank_screen = Image.new("1", (720, 348), 255 * background) if mode == 0: print_arduino(blank_screen) time.sleep(.04) elif mode == 1: file_name = 'temp.gif' blank_screen.save(file_name) image_array.append(imageio.imread(file_name)) print 'Saving Image...' os.remove(file_name) imageio.mimsave('test.gif', image_array) print 'Done' return image = Image.open(img_selected) frames = [frame.copy() for frame in ImageSequence.Iterator(image)] for im in frames: print 'Frame: ' + str(frames.index(im) + 1) + '/' + str( len(frames)) + ', ' + str(countv) + '\r', image_graphic = convert_image_to_ascii(im) if mode == 0: print_arduino(image_graphic) time.sleep(.04) elif mode == 1: file_name = 'temp.' + img_selected.split('.')[1] image_graphic.save(file_name) image_array.append(imageio.imread(file_name)) if mode == 1: print '\nSaving Image...' os.remove(file_name) imageio.mimsave('test.gif', image_array) print 'Done'
def place_bling(): if request.method == 'GET': return "You must POST a json Object in the following form: {\"spots\": number, \"image\": \"base64encodedimage\"}" else: try: spot_count = request.json['spots'] image = request.json['image'] image_data = re.sub('^data:image/.+;base64', '', image) except: return "Malformed JSON request! Given: " + str(request.json) im = Image.open(BytesIO(base64.b64decode(image_data))) paster = Image.open('images/bling.gif') im.thumbnail((500, 500)) width, height = im.size bling_width, bling_height = 40, 40 spots = [] for i in range(spot_count): spots.append((random.randint(0, width - bling_width), random.randint(0, height - bling_height))) frames = [] for frame in ImageSequence.Iterator(paster): frame = frame.convert('RGBA') frame.thumbnail((bling_width, bling_height)) curr = im.copy().convert('RGBA') for spot in spots: curr.paste(frame, spot, mask=frame) frames.append(curr) buffered = BytesIO() frames[0].save(buffered, format="GIF", save_all=True, append_images=frames[1:], loop=0) # save_all necessary for animation img_str = base64.b64encode(buffered.getvalue()) print("Image created!") return img_str
def _process(self, **payload): banner = Image.open( self.get_cached_image_from_url(payload["banner_url"])) x, y = banner.size _x, _y = (self.DISCORD_BANNER_SIZE[0], int( (x / y)) * self.DISCORD_BANNER_SIZE[1]) _ = int(y / self.BANNER_AVATAR_RATIO) avatar = Image.open(self.get_image_from_url( payload["avatar_url"])).convert("RGBA") # avatar = self.add_avatar_border(avatar) avatar = self.get_round_avatar(avatar) border_width = y // self.BORDER_HEIGHT_RATIO avatar = avatar.resize((_, _)) avatar = self.add_avatar_border(avatar, border_width, payload.get("border_color")) avatar_xy = ((x - avatar.size[0]) // 2, y // self.AVATAR_RATIO_Y) frames = [f.copy() for f in ImageSequence.Iterator(banner)] if len(frames) == 1: banner.paste(avatar, avatar_xy, avatar) banner = add_banner_border(banner, border_width, outline=payload.get("border_color")) banner = self.write_text(banner, payload) banner.thumbnail(self.DISCORD_BANNER_SIZE, Image.ANTIALIAS) frames = banner else: for i, frame in enumerate(frames): frame = frame.convert("RGBA") frame.paste(avatar, avatar_xy, avatar) frame = add_banner_border(frame, border_width, outline=payload.get("border_color")) frame = self.write_text(frame, payload) frame.thumbnail(self.DISCORD_BANNER_SIZE, Image.ANTIALIAS) frames[i] = frame return self.to_bytes(frames)
async def collect_image(url, static=False): data = io.BytesIO() length = 0 async with aiohttp.ClientSession() as cs: async with cs.get(url) as resp: while True: dat = await resp.content.read(16384) if not dat: break length += len(dat) if length > MAX_FILE_SIZE: return None, None, None data.write(dat) data.seek(0) im = Image.open(data) if im.size[0] * im.size[1] > PIXEL_COUNT_LIMIT: return None, None, None frames = [] for frame in ImageSequence.Iterator(im): frames.append(frame.copy()) if static: break if im.size[0] * im.size[1] > MAX_PIXEL_COUNT: aspect = im.size[0] / im.size[1] height = math.sqrt(MAX_PIXEL_COUNT / aspect) width = height * aspect if height < im.size[1] and width < im.size[0]: for n, frame in enumerate(frames): frames[n] = frame.resize((int(width), int(height)), Image.ANTIALIAS) for n, frame in enumerate(frames): frames[n] = frame.convert('RGBA') return frames, url
def make_new_avatar(self, author_avatar: BytesIO, choice_avatar: BytesIO, is_gif: bool) -> Optional[BytesIO]: avatar = Image.open(author_avatar) new_avatar = Image.open(choice_avatar) new_avatar = new_avatar.convert("RGBA") if is_gif: gif_list = [ frame.copy() for frame in ImageSequence.Iterator(avatar) ] img_list = [] for frame in gif_list: temp2 = Image.new("RGBA", frame.size) temp2.paste(frame, (0, 0)) w, h = frame.size new_avatar = new_avatar.resize((w, h)) temp2.paste(new_avatar, (0, 0), new_avatar) temp2 = temp2.resize((200, 200), Image.ANTIALIAS) img_list.append(temp2) temp = BytesIO() temp2.save(temp, format="GIF", save_all=True, append_images=img_list, duration=0, loop=0) temp.name = "trustyavatar.gif" if sys.getsizeof(temp) > 7000000 and sys.getsizeof( temp) < 8000000: break else: temp2 = avatar.copy() w, h = temp2.size new_avatar = new_avatar.resize((w, h)) temp2.paste(new_avatar, (0, 0), new_avatar) temp2 = temp2.resize((200, 200), Image.ANTIALIAS) temp = BytesIO() temp2.save(temp, format="PNG") temp.name = "trustyavatar.png" if temp: temp.seek(0) return temp
def __aemote(): # Rules to start a new image processing task: # The animated emote hasn't been resized yet # The animated emote was requested under a size that hasn't been scaled yet metadata = image.info # Extract the frames for resizing frames_resize = [] for frame in ImageSequence.Iterator(image): frames_resize.append( frame.resize((int(resize_value * image.width), int(resize_value * image.height)), resample=Image.BOX)) first = next(iter(frames_resize)) first.info = metadata first.save(outfile_path, format='GIF', quality=100, save_all=True, append_images=frames_resize)
def dealOneFile(filePath): img_orign = Image.open(filePath) _, file_type = os.path.splitext(filePath) basename = os.path.basename(filePath) if file_type == '.gif': sequence = [] offX = random.random() offY = random.random() for f in ImageSequence.Iterator(img_orign): if len(sequence) % 2 == 0: offX = random.random() offY = random.random() sequence.append(dealOneImage(f.convert(), offX, offY)) sequence[0].save(f'./output/{basename}', save_all=True, append_images=sequence[1:]) else: image_out = (dealOneImage(img_orign)) for x in range(2): image_out = (dealOneImage(image_out)) image_out.save(f'./output/{basename}')
def resize_and_save(gif_file_name, size): ''' gif_file_name: string, specifying the filename of the gif you want to resize size: tuple of 2 nonnegative integers specifying the desired output size ''' im = Image.open(gif_file_name) frames = ImageSequence.Iterator(im) def thumbnails(frames): for frame in frames: thumbnail = frame.copy() thumbnail.thumbnail(size, Image.ANTIALIAS) yield thumbnail frames = thumbnails(frames) output_image = next(frames) output_image.info = im.info prfx = str(size[0])+'x'+str(size[1]) output_image.save(prfx+gif_file_name, save_all=True, append_images=list(frames))
def test_gif_legacy_pillow(image_files: Path, im_in: str, mode: str): """ This test tests backwards compatibility of using the new API with a legacy plugin. IN particular reading ndimages I'm not sure where this test should live, so it is here for now. """ im_path = image_files / im_in with iio.imopen(im_path, "r", search_legacy_only=True, format="GIF-PIL") as file: iio_im = file.read(pilmode=mode) pil_im = np.asarray([ np.array(frame.convert(mode)) for frame in ImageSequence.Iterator(Image.open(im_path)) ]) if pil_im.shape[0] == 1: pil_im = pil_im.squeeze(axis=0) assert np.allclose(iio_im, pil_im)
def clean_in_path(path): print(f'Cleaning path {path}...') entries = os.listdir(path) for index, dirent in enumerate(entries): gif_path = os.path.join(path, dirent) gif = Image.open(gif_path) try: frames = [frame.copy() for frame in ImageSequence.Iterator(gif)] except ValueError: print(f' * Removing {gif_path} (bad GIF metadata)') os.remove(gif_path) except IOError: print(f' * Removing {gif_path} (truncated GIF)') os.remove(gif_path) if index % PRINT_EVERY == 0: print(f' * Cleaned {index}/{len (entries)}') print('Done.')
def __init__(self, assetpath, fontpath): self.buffer_from_url = buffer_from_url self.fontpath = fontpath self.triggered_text = Image.open(f"{assetpath}/triggered.jpg") self.triggered_red = Image.new(mode="RGBA", size=(216, 216), color=(255, 0, 0, 100)) self.triggered_bg = Image.new(mode="RGBA", size=(216, 216), color=(0, 0, 0, 0)) self.ussr_frames = [] for frame in ImageSequence.Iterator( Image.open(f"{assetpath}/ussr.gif")): self.ussr_frames.append(frame.convert("RGB")) self.ussr_frames_size = len(self.ussr_frames) self.templates = {} for image in listdir(assetpath): if not image.endswith(".gif"): continue self.templates[image] = Image.open(f"{assetpath}/{image}")
def pick(event): global a, flag while 1: # bundle_dir = getattr(sys, '_MEIPASS', os.path.abspath(os.path.dirname(__file__))) # path = os.path.join(bundle_dir, 'tony.gif') filename = resource_path(os.path.join("images", "tmp.gif")) # filename = resource_path("tmp.gif") print("*" * 10) print(filename) im = Image.open(filename) # im = Image.open(path) # im = Image.open('tmp.gif') # GIF图片流的迭代器 iter = ImageSequence.Iterator(im) #frame就是gif的每一帧,转换一下格式就能显示了 for frame in iter: pic = ImageTk.PhotoImage(frame) canvas.create_image((200, 150), image=pic) time.sleep(0.1) root.update_idletasks() #刷新 root.update()
def generate(): with Image.open('resources/lick.gif') as base: for index, frame in enumerate(ImageSequence.Iterator(base)): draw = ImageDraw.Draw(frame) draw.text(((500 - bw) / 2, 450), b.display_name, font=font, fill=self.hug_colour) if index >= 60 or index <= 8: pass else: draw.text(((450 - aw) / 2, 150), a.display_name, font=font, align='center', fill=99) # draw.text(((500 - w) / 2, (285 - h) / 2), msg, font=font, align='center', fill=99) frames.append(frame.copy()) del draw
def get_gray_static_image(self): # 1.读取图像,2.判断为png还是gif,3.处理返回 # 先保存图片 image_base64 = base64.b64decode(self.image_info) image_io = BytesIO(image_base64) image = Image.open(image_io) # png直接返回灰度图像,gif读取帧然后合并图像 if self.get_suffix() == "png": np_list = np.asarray(image) np_merge = 255 - cv2.cvtColor(np_list, cv2.COLOR_BGR2GRAY) else: np_list = list() for index, image_frame in enumerate(ImageSequence.Iterator(image)): np_list.append(np.asarray(image_frame)) np_merge = reduce(np.minimum, np_list) return np_merge
def pillow_process(args, is_rave, lines_in_text, timestamp): # Open crab.gif and add our font im = Image.open('bot/cogs/memes_cog/assets/crab.gif') fnt = ImageFont.truetype('bot/cogs/memes_cog/assets/LemonMilk.otf', 11) # Draw text on each frame of the gif # Gonna be honest I don't quite understand how it works but I got it from the Pillow docs/issues frames = [] for frame in ImageSequence.Iterator(im): d = ImageDraw.Draw(frame) w, h = d.textsize(args, fnt) # draws the text on to the frame. Tries to center horizontally and tries to go as close to the bottom as possible d.text((im.size[0]/2 - w/2, im.size[1] - h - (5 * lines_in_text)), args, font=fnt, align='center', stroke_width=bool(is_rave), stroke_fill=Colors.ClemsonOrange, spacing=6) del d b = io.BytesIO() frame.save(b, format='GIF') frame = Image.open(b) frames.append(frame) frames[0].save(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif', save_all=True, append_images=frames[1:])
def create_animation_frames(deck, image_filename): icon_frames = list() # Open the source image asset. icon = Image.open(os.path.join(ASSETS_PATH, image_filename)) # Iterate through each animation frame of the source image for frame in ImageSequence.Iterator(icon): # Create new key image of the correct dimensions, black background. frame_image = PILHelper.create_scaled_image(deck, frame) # Pre-convert the generated image to the native format of the StreamDeck # so we don't need to keep converting it when showing it on the device. native_frame_image = PILHelper.to_native_format(deck, frame_image) # Store the rendered animation frame for later user. icon_frames.append(native_frame_image) # Return an infinite cycle generator that returns the next animation frame # each time it is called. return itertools.cycle(icon_frames)
def processImage(r_data, path): """ Iterate the GIF, extracting each frame. """ stream = io.BytesIO(r_data) im = Image.open(stream) max_time = 0 max_frame = None for frame in ImageSequence.Iterator(im): try: if frame.info['duration'] > max_time: max_time = frame.info['duration'] max_frame = frame.convert('RGBA') except KeyError: # Ignore if there was no duration, we will not count that frame. pass # print max_time max_frame.save(path, format='PNG') output = io.BytesIO() max_frame.save(output, format='PNG') return output.getvalue()
def main(): im = Image.open('white.gif') coords = [] for i in ImageSequence.Iterator(im): idata = list(i.getdata()) idx = idata.index(8) x = idx % 200 y = idx // 200 coords.append((x, y)) coords = np.array(coords) interval = np.where(np.all(coords == [100, 100], axis=1) == True) interval = interval[0].tolist() interval.append(coords.shape[0]) imOut = Image.new(im.mode, im.size) x, y = 30, 50 for i in range(len(interval) - 1): draw(imOut, [x, y], coords[interval[i] + 1:interval[i+1]], 200) x += 30 imOut.show()
def main(): image = open_image() result = Image.new("RGB", (1000, 1000)) draw = ImageDraw.Draw(result) center = ((image.width // 2), (image.height // 2)) cursor = [500, 500] # Animated Gif trace = [] spacing = 25 far_right = cursor[0] for frame in ImageSequence.Iterator(image): min_x, min_y, max_x, max_y = frame.getbbox() centroid_x = (max_x + min_x) // 2 centroid_y = (max_y + min_y) // 2 move_x = (centroid_x - center[0]) move_y = (centroid_y - center[1]) if not (move_x or move_y): draw.line(trace) trace = [] cursor[0] = far_right + spacing cursor[0] += move_x cursor[1] += move_y if cursor[0] > far_right: far_right = cursor[0] trace.append(tuple(cursor)) draw.line(trace) result.show() return result
def __init__(self, device, filename, timeout_ms=None): self.device = device self.filename = filename self.timeout_ms = timeout_ms self.time_played_ms = 0 self.eof = False self.current_frame = 0 self.frames = [] self.durations = [] im = Image.open(self.filename) for frame in ImageSequence.Iterator(im): # Initialize a new image new_image = Image.new("RGB", (width, height), "black") new_pix = new_image.load() # Load the frame from the gif gif_image = frame.convert("RGB") gif_pix = gif_image.load() # Find the encoded duration in milliseconds try: frame_duration_ms = frame.info["duration"] except KeyError: frame_duration_ms = 25 self.durations.append(frame_duration_ms) # Map each pixel for col in range(width): x = col * X_MULT + X_OFFSET for row in range(height): y = row * Y_MULT + Y_OFFSET new_pix[col, row] = gif_pix[x, y] # Save the image self.frames.append(new_image)
def removeWhitePixels(gifpath): #using PIL img = Image.open(gifpath) images = [] frames = ImageSequence.Iterator(img) for frame in frames: try: img_mod = frame.convert("RGBA") datas = img_mod.getdata() newData = [] #Currently only peaks stored but rest of the plot is white, so remove white pixels for item in datas: if item[0] == 255 and item[1] == 255 and item[2] == 255: newData.append((255, 255, 255, 0)) else: newData.append(item) img_mod.putdata(newData) images.append(img_mod) except EOFError: continue path = os.path.dirname(gifpath) + "/gifoverlay.gif" images[0].save(path, save_all=True, append_images=images[1:], optimize=True, duration=100, loop=0, disposal=2, transparency=0) return path
def check_image(image, modifier, method): try: modifier_converter = MODIFIERS[modifier] except KeyError: raise RuntimeError('Invalid image modifier.') with Image.open(io.BytesIO(image)) as img: if img.format == "GIF": total = [0, 0, 0, 0] count = 0 for frame in ImageSequence.Iterator(img): f = frame.resize((round(img.width / 3), round(img.height / 3))) values = color_ratios(f, modifier_converter['colors']) for i in range(4): total[i] += values[i] count += 1 ratios = [0, 0, 0, 0] for i in range(4): ratios[i] = round(10000 * total[i] / count) / 100 passed = ratios[3] <= 10 else: img = img.resize((round(img.width / 3), round(img.height / 3))) values = color_ratios(img, modifier_converter['colors']) ratios = [0, 0, 0, 0] for i in range(4): ratios[i] = round(10000 * values[i]) / 100 passed = ratios[3] <= 10 colors = [] for i in range(3): colors.append({'name': modifier_converter['color_names'][i], 'ratio': ratios[i]}) colors.append({'name': 'Non-Blurple', 'ratio': ratios[3]}) data = {'passed': passed, 'colors': colors} return data
def process_lower_level(img: Image.Image, effect: str, arg: int) -> BytesIO: # this will only loop once for still images frame_list, durations = [], [] # if a GIF loops, it will have the attribute loop = 0; if not, then attribute does not exist try: img.info['loop'] image_loop = True except KeyError: image_loop = False pass for _ in ImageSequence.Iterator(img): # if not animated, will throw KeyError try: duration = img.info['duration'] # type: int durations.append(duration) except KeyError: # an empty tuple for durations tells image_to_buffer that image is still pass function_dict: Mapping[str, Callable] = { 'acid': make_acid_img, 'aenima': make_aenima_img, 'caption': make_captioned_img, 'lateralus': make_lateralus_img, 'needban': make_needban_img, 'needping': make_needping_img, 'pingbadge': make_pingbadge_img, 'xokked': make_xokked_img, 'resize': resize_img, } # these are no longer coroutines img_out = function_dict[effect](img.convert('RGBA'), arg) frame_list.append(img_out) fp = image_to_buffer(frame_list, tuple(durations), image_loop) return fp