def _quantize(t: Image, palette) -> Image: with Image.new('P', (1, 1)) as palette_img: p = [x for sub in palette for x in sub] + [0] * (768 - 3 * len(palette)) palette_img.putpalette(p) palette_img.load() im = t.im.convert('P', 0, palette_img.im) return t._new(im).convert('RGB')
def _quantize(t: Image, palette) -> Image: with Image.new('P', (1, 1)) as palette_img: # Flatten 2d array to 1d, then pad with first color to 786 total values p = [v for color in palette for v in color] + list(palette[0]) * (256 - len(palette)) palette_img.putpalette(p) palette_img.load() im = t.im.convert( 'P', 0, palette_img.im ) # Quantize using internal PIL shit so it's not dithered return t._new(im).convert('RGB')
def recolor(img: Image, alpha: bool, **options): # Alpha channel take 1 color # QUANTIZE with kmeans != 0 # FOR ALPHA IS MANDATORY 2 (3depends on a lib) colors_count = options["colors"][ "colors"] if not alpha else options["colors"]["colors"] - 1 result = img.quantize(colors=colors_count, method=2, kmeans=0) if options["dither"] and options["colors"]["mode"] is not "LA": # utilizing underlying imaging core C library directly # maybe implementing custom algorithm would be better # also reorganizing the chain of operations for recoloring # (by better understanding different parts at work and properly chaining them) dithered = img._new( img.im.convert(options["colors"]["mode"], Image.FLOYDSTEINBERG, result.im)) dithered = dithered.quantize(colors=colors_count) return dithered result = result.convert(mode=options["colors"]["mode"], palette=Image.ADAPTIVE, colors=colors_count) return result