def convert(f): img = Image.open(f).convert("RGBA") if args.output_dir: print(f) foo, _, _ = f.rpartition('.png') output = os.path.join(args.output_dir, os.path.basename(foo)+'.pony') metadata = json.loads(img.info.get('pixelterm-metadata')) comment = metadata.get('_comment') if comment is not None: del metadata['_comment'] comment = '\n'+comment else: comment = '' metadataarea = '$$$\n' +\ '\n'.join([ '\n'.join([ k.upper() + ': ' + v for v in metadata[k] ]) for k in sorted(metadata.keys()) ]) +\ comment + '\n$$$\n' with open(output, 'w') as of: of.write(metadataarea) of.write(termify_pixels(img)) else: print(termify_pixels(img))
def convert(f): img = Image.open(f).convert("RGBA") if args.output_dir: print(f) foo, _, _ = f.rpartition('.png') output = os.path.join(args.output_dir, os.path.basename(foo) + '.pony') metadata = json.loads(img.info.get('pixelterm-metadata')) comment = metadata.get('_comment') if comment is not None: del metadata['_comment'] comment = '\n' + comment else: comment = '' metadataarea = '$$$\n' +\ '\n'.join([ '\n'.join([ k.upper() + ': ' + v for v in metadata[k] ]) for k in sorted(metadata.keys()) ]) +\ comment + '\n$$$\n' with open(output, 'w') as of: of.write(metadataarea) of.write(termify_pixels(img)) else: print(termify_pixels(img))
def printframe(framedata): print(termify_pixels(MockImage(framedata)))
def gifterm(): parser = argparse.ArgumentParser( description='Render pixel images on 256-color ANSI terminals') parser.add_argument('image', type=str) parser.add_argument('-s', '--size', type=str, help='Terminal size, [W]x[H]') parser.add_argument('--serve', type=int, help='Serve via TCP on given port') args = parser.parse_args() tw, th = None, None if args.size: tw, th = map(int, args.size.split('x')) else: try: tw, th = os.get_terminal_size() except: # If this is not a regular terminal pass th = th * 2 img = Image.open(args.image) palette = img.getpalette() last_frame = Image.new("RGBA", img.size) frames = [] for frame in ImageSequence.Iterator(img): #This works around a known bug in Pillow #See also: http://stackoverflow.com/questions/4904940/python-converting-gif-frames-to-png frame.putpalette(palette) c = frame.convert("RGBA") if img.info['background'] != img.info.get('transparency'): last_frame.paste(c, c) else: last_frame = c im = last_frame.copy() if (tw, th) != (None, None): im.thumbnail((tw, th), Image.NEAREST) frames.append(termify_pixels(im, True)) if args.serve: from socketserver import ThreadingMixIn, TCPServer, BaseRequestHandler # Quote-Of-The-Day protocol implementation # See RFC865 ( https://tools.ietf.org/html/rfc865 ) for details. class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass class QOTDHandler(BaseRequestHandler): def handle(self): try: self.request.sendall(bytes(cursor_invisible, "UTF-8")) while True: for frame in frames: self.request.sendall( bytes(home_cursor + reset_sequence, "UTF-8")) self.request.sendall(bytes(frame, "UTF-8")) time.sleep( min(1 / 10, img.info['duration'] / 1000.0)) except: pass server = ThreadingTCPServer(('', args.serve), QOTDHandler) server.serve_forever() else: print(cursor_invisible) atexit.register(lambda: print(cursor_visible)) signal.signal(signal.SIGTERM, lambda signum, stack_frame: exit(1)) try: while True: for frame in frames: print(home_cursor) print(reset_sequence) print(frame) time.sleep(min(1 / 10, img.info['duration'] / 1000.0)) except KeyboardInterrupt: pass
def gifterm(): parser = argparse.ArgumentParser(description='Render pixel images on 256-color ANSI terminals') parser.add_argument('image', type=str) parser.add_argument('-s', '--size', type=str, help='Terminal size, [W]x[H]') parser.add_argument('--serve', type=int, help='Serve via TCP on given port') args = parser.parse_args() tw, th = None, None if args.size: tw, th = map(int, args.size.split('x')) else: try: tw, th = os.get_terminal_size() except: # If this is not a regular terminal pass th = th*2 img = Image.open(args.image) palette = img.getpalette() last_frame = Image.new("RGBA", img.size) frames = [] for frame in ImageSequence.Iterator(img): #This works around a known bug in Pillow #See also: http://stackoverflow.com/questions/4904940/python-converting-gif-frames-to-png frame.putpalette(palette) c = frame.convert("RGBA") if img.info['background'] != img.info.get('transparency'): last_frame.paste(c, c) else: last_frame = c im = last_frame.copy() if (tw, th) != (None, None): im.thumbnail((tw, th), Image.NEAREST) frames.append(termify_pixels(im, True)) if args.serve: from socketserver import ThreadingMixIn, TCPServer, BaseRequestHandler # Quote-Of-The-Day protocol implementation # See RFC865 ( https://tools.ietf.org/html/rfc865 ) for details. class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass class QOTDHandler(BaseRequestHandler): def handle(self): try: self.request.sendall(bytes(cursor_invisible, "UTF-8")) while True: for frame in frames: self.request.sendall(bytes(home_cursor + reset_sequence, "UTF-8")) self.request.sendall(bytes(frame, "UTF-8")) time.sleep(min(1/10, img.info['duration']/1000.0)) except: pass server = ThreadingTCPServer(('', args.serve), QOTDHandler) server.serve_forever() else: print(cursor_invisible) atexit.register(lambda:print(cursor_visible)) signal.signal(signal.SIGTERM, lambda signum, stack_frame: exit(1)) try: while True: for frame in frames: print(home_cursor) print(reset_sequence) print(frame) time.sleep(min(1/10, img.info['duration']/1000.0)) except KeyboardInterrupt: pass
def _precalculate_one_image(self, coords, pf, dsp, termsize): img = self._get_image(*coords) return ( pf.encode_image(img) if pf else None, dsp.encode_image(img, dsp.size) if dsp else None, pixelterm.termify_pixels(resize_image(img, termsize)) if termsize else None )
for agent in agents: while time.time() - ts < args.wait: if random.random() > 0.2: action = random.choice(agent.animations) print('Playing:', action) for img_pf, img_dsp, img_term in agent(action, not args.nosleep): with runlock: if args.terminal: print('\033[H'+img_term) if args.display: dsp.sendframe(img_dsp) if args.pixelflut: pf.sendframe(img_pf) if time.time() - ts > args.wait: print('Force-advance', ts) break if not args.nosleep: time.sleep(1) print('Advancing', ts) ts = time.time() else: for img_pf, img_dsp, img_term in agents[0](args.action, not args.nosleep): if args.terminal: print(pixelterm.termify_pixels( resize_image(img, termsize))) if args.display: dsp.sendframe(img_dsp) if args.pixelflut: pf.sendframe(img_pf)