def hide_image(public_img, secret_img): s=4 #the bits we are going to overwrite data = Image.open(public_img) #the bits we are going to write key = ImageOps.autocontrast(Image.open(secret_img).resize(data.size)) for x in range(data.size[0]): for y in range(data.size[1]): p = data.getpixel((x, y)) q = key.getpixel((x, y)) red = p[0] - (p[0] % s) + (s * q[0] / 255) if(x > 200 and x < 206 and y > 200 and y < 206): print(p[0] - (p[0] % s) + (s * q[0] / 255), p[1] - (p[1] % s) + (s * q[1] / 255), p[2] - (p[2] % s) + (s * q[2] / 255)) # print('p[0], q[0]') # print(p[0], q[0]) # print('p[1],q[1]') # print(p[1],q[1]) # print('p[2],q[2]') # print(p[2],q[2]) green = p[1] - (p[1] % s) + (s * q[1] / 255) blue = p[2] - (p[2] % s) + (s * q[2] / 255) data.putpixel((x,y), (red,green,blue)) # if (red > 100 and green < 100 and blue < 100): # print('x,y') # print(x, y) # print('Cover IMG, Hide Image') # print(p,q) # print('R,G,B') # print(red,green,blue) return data
def process(self, image): yield 'Start...', image simage = ImageOps.autocontrast(image, 20) s_width, s_height = image.size[0] / 2, image.size[1] / 2 simage = image.resize((s_width, s_height)) simage = colors.convert_to_luminosity(simage) simage = simage.filter(ImageFilter.SMOOTH) yield 'Mask...', simage mask = ImageEnhance.Brightness(simage).enhance(2) mask = ImageOps.posterize(mask, 2) mask = ImageOps.autocontrast(mask, 20) yield 'Curves...', mask simage = ImageEnhance.Contrast(simage).enhance(2) yield 'posterize...', simage simage = ImageOps.posterize(simage, 2) yield 'Red...', simage red_img = self._apply_color(simage, (255, 0, 0), mask) yield 'green...', red_img green_img = self._apply_color(simage, (0, 255, 0), mask) yield 'blue...', green_img blue_img = self._apply_color(simage, (0, 0, 255), mask) yield 'yellow...', blue_img yellow_img = self._apply_color(simage, (255, 255, 0), mask) yield 'Merge...', yellow_img image = image.copy() image.paste(red_img, (0, 0, s_width, s_height)) image.paste(green_img, (s_width, 0, s_width * 2, s_height)) image.paste(yellow_img, (0, s_height, s_width, s_height * 2)) image.paste(blue_img, (s_width, s_height, s_width * 2, s_height * 2)) yield 'Done', image
def process(self, image): yield 'Start...', image simage = ImageOps.autocontrast(image, 20) s_width, s_height = image.size[0] / 2, image.size[1] / 2 simage = image.resize((s_width, s_height)) simage = simage.filter(ImageFilter.SMOOTH) yield 'Mask...', simage mask = ImageEnhance.Brightness(simage).enhance(2) mask = ImageOps.posterize(mask, 2) mask = ImageOps.autocontrast(mask, 20) yield 'posterize...', mask simage = ImageEnhance.Brightness(simage).enhance(2) simage = ImageOps.equalize(simage) simage = simage.filter(ImageFilter.SMOOTH_MORE) simage = ImageOps.posterize(simage, 1) yield 'Red...', simage red_img = self._apply_color(simage, 0.25, mask) yield 'green...', red_img green_img = self._apply_color(simage, 0.75, mask) yield 'blue...', green_img blue_img = self._apply_color(simage, 1, mask) yield 'yellow...', blue_img yellow_img = self._apply_color(simage, 0.5, mask) yield 'Merge...', yellow_img image = image.copy() image.paste(red_img, (0, 0, s_width, s_height)) image.paste(green_img, (s_width, 0, s_width * 2, s_height)) image.paste(yellow_img, (0, s_height, s_width, s_height * 2)) image.paste(blue_img, (s_width, s_height, s_width * 2, s_height * 2)) yield 'Done', image
def save_ndarray_as_Image(self, ndarray, speichername, **kwargs): """ Needs documentation and reprogramming """ """ kwargs argumente: speicherpfad = str, normiert = bool, color = bool """ resultImage = PILImage.fromarray(ndarray) resultImage = resultImage.convert("L") try: if kwargs['normiert'] == True: import ImageOps resultImage = ImageOps.autocontrast(resultImage, cutoff=0) except: pass try: if kwargs['color'] == True: resultImage = self.cC.convert_to_rgb(resultImage) except: pass if 'speicherpfad' not in kwargs.keys(): speicherpfad = self.speicherpfad + speichername else: speicherpfad = kwargs['speicherpfad'] import os if not os.path.exists(speicherpfad): os.makedirs(speicherpfad) speicherpfad = speicherpfad + speichername resultImage.save(speicherpfad)
def geticonshape(shape, dim_horiz, dim_vert): iconimage = ''.join([shape, '_icon.png']) img = Image.open(iconimage) img = ImageOps.grayscale(img) img = ImageOps.autocontrast(img) img = img.resize((dim_horiz, dim_vert), Image.BILINEAR) return img
def process(self, image): yield 'Start...', image s_width, s_height = image.size[0] / 2, image.size[1] / 2 simage = image.resize((s_width, s_height)) yield 'posterize...', simage simage = simage.convert("L") yield 'posterize 2...', simage simage = ImageEnhance.Brightness(simage).enhance(2) simage = ImageOps.equalize(simage) yield 'posterize 3...', simage simage = ImageOps.autocontrast(simage, 20) yield 'posterize 4...', simage simage = ImageOps.posterize(simage, 1) colors = [] colors.extend([255] * 128) colors.extend([0] * 128) colors.extend([0] * 128) colors.extend([255] * 128) colors.extend([0] * 256) simage = simage.convert("RGB").point(colors) yield 'Red...', simage red_img = self._apply_color(simage, 0.82) yield 'green...', red_img green_img = self._apply_color(simage, 0.75) yield 'blue...', green_img blue_img = self._apply_color(simage, 0) yield 'yellow...', blue_img yellow_img = self._apply_color(simage, 0.6) yield 'Merge...', yellow_img image = image.copy() image.paste(red_img, (0, 0, s_width, s_height)) image.paste(green_img, (s_width, 0, s_width * 2, s_height)) image.paste(yellow_img, (0, s_height, s_width, s_height * 2)) image.paste(blue_img, (s_width, s_height, s_width * 2, s_height * 2)) yield 'Done', image
def sharpenImage(img, enhance_factor = 1.7): # Accepts an image and returns a more detailed image with higher # contrast given by the `enhance_factor`, such that 1.7 gives an # image with 70% more contrast filtered = img.filter(ImageFilter.DETAIL) contrast = ImageOps.autocontrast(filtered) return contrast
def create_photo_strips(): '''using the original images we build a color and black and white photo strip and save it to photos/strips''' strip = Image.new('RGB', (PHOTO_HEIGHT + (BORDER_WIDTH * 2) + FOOTER_HEIGHT, (PHOTO_WIDTH * PHOTO_COUNT) + (BORDER_WIDTH * 2)), BG_COLOR) for i in range(PHOTO_COUNT): photo = Image.open(PHOTO_FOLDER + str(i+1) + '.' + PHOTO_FILE_EXTENSION) w, h = map(lambda x: x/2, photo.size) photo = ImageOps.fit(photo, (PHOTO_WIDTH, PHOTO_HEIGHT), centering=(0.5, 0.5)) photo = photo.rotate(270) photo = ImageOps.autocontrast(photo, cutoff=0) strip.paste(photo, (FOOTER_HEIGHT, (i * PHOTO_WIDTH) + (i * BORDER_WIDTH))) #append footer font = ImageFont.truetype('font_1.ttf', 40) footer_img = Image.new("RGB", ((PHOTO_COUNT * PHOTO_WIDTH) + (PHOTO_COUNT * BORDER_WIDTH), FOOTER_HEIGHT), BG_COLOR) draw = ImageDraw.Draw(footer_img) draw.text((220, 40), "ashley & david's wedding, july 28, 2012", font=font, fill=(100,100,0)) strip.paste(footer_img.rotate(270), (0,0)) strip.save(COLOR_FOLDER + current_timestamp() + '.png', PHOTO_FORMAT) ImageOps.grayscale(strip).save(GREYSCALE_FOLDER + current_timestamp() + '.png', PHOTO_FORMAT) strip_to_print = Image.new('RGB', (PAGE_WIDTH, PAGE_HEIGHT), BG_COLOR) strip_to_print.paste(ImageOps.grayscale(strip), (-BORDER_WIDTH, -BORDER_WIDTH)) strip_to_print.save('to_print.png', PHOTO_FORMAT) return 'to_print.png'
def render(self,im): im2 = im.copy() if self.grayscale: im2 = ImageOps.grayscale(im2) im2 = ImageOps.colorize(im2,"black","white") if self.posterized: im2 = ImageOps.autocontrast(im2) im2 = ImageOps.posterize(im2,self.posterize_bits) im2 = ImageOps.autocontrast(im2) if self.gridlines > 0: im2 = draw_grid(im2,self.gridlines) data = im2.tostring() surface = pygame.image.fromstring(data, im2.size, im2.mode) screen.blit(surface,(0,0)) pygame.display.flip()
def centerMoving(self,rec_time=0.5): """Center on the area with the most motion""" bbox=None # Bounding box of moving region for i in range(5): # Try 5 times to get a frame with motion in it motion = self.detectMotion(rec_time=rec_time) #increase threshold until motion is confined to small bounding box threshold = 0 area_thresh = 7500 motion = ImageOps.autocontrast(motion) # Scales intensity to 0-255 (max and min possible) for j in range(50): # Increase threshold, apply threshold and filter threshold = 5*j thresholded = motion.point(lambda i: 255*(i > threshold)) if i > 1: filtered = thresholded else: filtered = thresholded.filter(ImageFilter.MinFilter(3)) #filter the first couple times self.window.displayEngine3(motion) self.window.displayEngine4(filtered) # Check size of bounding box. If small enough, you're done bbox = filtered.getbbox() if bbox: area = (bbox[3]-bbox[1])*(bbox[2]-bbox[0]) if area < area_thresh: break if bbox: break if not bbox: return [0,0] # No luck after 5 tries x_offset = -320 y_offset = -240 # Move stage to center of bounding box x_disp = self.slide.scales['XPIX2MICR']*((bbox[0]+bbox[2])/2.+x_offset) y_disp = self.slide.scales['YPIX2MICR']*((bbox[1]+bbox[3])/2.+y_offset) self.slide.displace([x_disp,y_disp,0]) return [x_disp,y_disp]
def process(self, image): yield 'Start...', image s_width, s_height = image.size[0] / 3, image.size[1] / 3 simage = image.resize((s_width, s_height)) yield 'posterize...', simage simage = simage.convert("L") simage = ImageEnhance.Brightness(simage).enhance(2) simage = ImageOps.equalize(simage) simage = ImageOps.autocontrast(simage, 20) simage = ImageOps.posterize(simage, 1) yield 'colors...', simage colormap = [] colormap.extend([255] * 128) colormap.extend([0] * 128) colormap.extend([0] * 128) colormap.extend([255] * 128) colormap.extend([0] * 256) simage = simage.convert("RGB").point(colormap) image = image.copy() for x in xrange(3): for y in xrange(3): yield 'Img %d...' % (x * 3 + y + 1), simage simg = colors.apply_hue_lightness_saturation(simage, ((x * 3 + (2.9 - y)) / 10.), 0, 1, True) image.paste(simg, (x * s_width, y * s_height, (x + 1) * s_width, (y + 1) * s_height)) yield 'Done', image
def tratarImagem(img): img = naolinear(img, (3, 3), 'maximo') #Ainda eh meio lento =/ #img = naolinearNormal(img, (3,3), 'mediana') a = Image.fromarray(img) #a = a.filter(ImageFilter.MaxFilter) a = a.filter(ImageFilter.MedianFilter) a = ImageOps.autocontrast(a) img = array(a) return img
def tratarImagem(img): img = naolinear(img, (3,3), 'maximo') #Ainda eh meio lento =/ #img = naolinearNormal(img, (3,3), 'mediana') a = Image.fromarray(img) #a = a.filter(ImageFilter.MaxFilter) a = a.filter(ImageFilter.MedianFilter) a = ImageOps.autocontrast(a) img = array(a) return img
def ConvertToBitifiedImage(file_location, thumbnail_width=settings.THUMBNAIL_WIDTH, final_width=settings.FINAL_WIDTH, bit_depth=settings.BIT_DEPTH): """Loads image from filename, generates bitified Image and returns it.""" # Load image from file location image = Image.open(file_location).convert('RGB') # If image is JPG/GIF perform extra processing to image if image.format == 'JPG': image = ImageOps.autocontrast(image) elif image.format == 'GIF': # Need to equalize to change gif to something that can be converted. image = ImageOps.equalize(image) # Blur image and reduce number of colors to bit_depth image = image.convert('P', palette=Image.ADAPTIVE, colors=bit_depth).convert('RGB') processed_image = image.copy() # Keep only most common color in large area processed_image = processed_image.filter(ImageFilter.BLUR) # Low-pass filter for x in xrange(processed_image.size[0]): for y in xrange(processed_image.size[1]): r, g, b = processed_image.getpixel((x, y)) if r < 50 and g < 50 and b < 50: processed_image.putpixel((x, y), (0, 0, 0)) else: processed_image.putpixel((x, y), (r, g, b)) # Set final width to at most final_width width, height = processed_image.size final_width = min(width, final_width) # Add border to image before shrinking processed_image = AddBorderToImage(processed_image) thumbnail_size = thumbnail_width, thumbnail_width * height/width final_size = final_width, final_width * height/width # Shrink image while keeping aspect ratio processed_image.thumbnail(thumbnail_size, Image.ADAPTIVE) # Resize back, resulting in pixelized image processed_image = processed_image.resize(final_size, Image.NEAREST) # Back to RGB format processed_image = processed_image.convert('RGB', palette=Image.ADAPTIVE, colors=bit_depth) # Add a random quote to the image processed_image = AddRandomTextToImage(processed_image) return processed_image
def ConvertToBitifiedImage(file_location, thumbnail_width=settings.THUMBNAIL_WIDTH, final_width=settings.FINAL_WIDTH, bit_depth=settings.BIT_DEPTH): """Loads image from filename, generates bitified Image and returns it.""" # Load image from file location image = Image.open(file_location).convert('RGB') # If image is JPG/GIF perform extra processing to image if image.format == 'JPG': image = ImageOps.autocontrast(image) elif image.format == 'GIF': # Need to equalize to change gif to something that can be converted. image = ImageOps.equalize(image) # Blur image and reduce number of colors to bit_depth image = image.convert('P', palette=Image.ADAPTIVE, colors=bit_depth).convert('RGB') processed_image = image.copy() # Keep only most common color in large area processed_image = processed_image.filter(ImageFilter.BLUR) # Low-pass filter for x in xrange(processed_image.size[0]): for y in xrange(processed_image.size[1]): r, g, b = processed_image.getpixel((x, y)) if r < 50 and g < 50 and b < 50: processed_image.putpixel((x, y), (0, 0, 0)) else: processed_image.putpixel((x, y), (r, g, b)) # Set final width to at most final_width width, height = processed_image.size final_width = min(width, final_width) # Add border to image before shrinking processed_image = AddBorderToImage(processed_image) thumbnail_size = thumbnail_width, thumbnail_width * height / width final_size = final_width, final_width * height / width # Shrink image while keeping aspect ratio processed_image.thumbnail(thumbnail_size, Image.ADAPTIVE) # Resize back, resulting in pixelized image processed_image = processed_image.resize(final_size, Image.NEAREST) # Back to RGB format processed_image = processed_image.convert('RGB', palette=Image.ADAPTIVE, colors=bit_depth) # Add a random quote to the image processed_image = AddRandomTextToImage(processed_image) return processed_image
def ndarray2image(data, filename): """ writes content of float ndarray as .png file. normalizes content and writes uint8 picture """ #scale image to uint8 data = (255.0 / data.max() * (data - data.min())).astype(np.uint8) im = Image.fromstring("L", (data.shape[1], data.shape[0]), raw.tostring()) im = ImageOps.autocontrast(im) im = ImageOps.invert(im) im.save(filename)
def hide_image(public_img, secret_img, s=4): data = Image.open(public_img) key = ImageOps.autocontrast(Image.open(secret_img)) for x in range(data.size[0]): for y in range(data.size[1]): p = data.getpixel((x, y)) q = key.getpixel((x, y)) red = p[0] - (p[0] % s) + (s * q[0] / 255) green = p[1] - (p[1] % s) + (s * q[1] / 255) blue = p[2] - (p[2] % s) + (s * q[2] / 255) data.putpixel((x, y), (red, green, blue)) return data
def hide_image(public_image,secret_image,s=4): data = Image.open(public_image) key = ImageOps.autocontrast(Image.open(secret_image).resize(data.size)) for x in range(data.size[0]): for y in range(data.size[1]): p = data.getpixel((x,y)) q = key.getpixel((x,y)) red = p[0] - (p[0] % s) + (s * q[0] / 255) green = p[1] - (p[1] % s) + (s * q[1] / 255) blue = p[2] - (p[2] % s) + (s * q[2] / 255) data.putpixel((x,y),(red,green,blue)) data.save("python-secret.png") return data
def autocontrast(image, amount=100.0, cutoff=0): """Apply a filter - amount: 0-1 - repeat: how many times it should be repeated""" image = imtools.convert_safe_mode(image) if imtools.has_transparency(image): im = imtools.remove_alpha(image) else: im = image contrasted = ImageOps.autocontrast(im, cutoff) if imtools.has_transparency(image): imtools.put_alpha(contrasted, imtools.get_alpha(image)) if amount < 100: return imtools.blend(image, contrasted, amount / 100.0) return contrasted
def proceed_image(self, image, params): if image.mode != "L": image = image.convert("L") # optional: apply contrast enhancement here, e.g. image = ImageOps.autocontrast(image) sepia = self.make_linear_ramp((255, 240, 192)) # apply sepia palette image.putpalette(sepia) # convert back to RGB so we can save it as JPEG # (alternatively, save it in PNG or similar) return image.convert("RGB")
def arrayForImage(uri): res = 15 hueStrength = 1.0 satStrength = .5 valStrength = .4 au = absoluteSite(uri) jpg = restkit.Resource(au).get(size='thumb').body_string() i = Image.open(StringIO(jpg)) i = Image.blend(i, ImageOps.autocontrast(i, cutoff=5), .8) i = i.resize((res, int(res * 3 / 4)), Image.ANTIALIAS) ar = numpy.asarray(i, dtype='f') / 255 ar.shape = i.size[1], i.size[0], 3 ar = hsv_from_rgb(ar) * [hueStrength / 360, satStrength, valStrength] return ar
def arrayForImage(uri): res = 15 hueStrength = 1.0 satStrength = .5 valStrength = .4 au = absoluteSite(uri) jpg = restkit.Resource(au).get(size='thumb').body_string() i = Image.open(StringIO(jpg)) i = Image.blend(i, ImageOps.autocontrast(i, cutoff=5), .8) i = i.resize((res ,int(res*3/4)), Image.ANTIALIAS) ar = numpy.asarray(i, dtype='f') / 255 ar.shape = i.size[1], i.size[0], 3 ar = hsv_from_rgb(ar) * [hueStrength/360, satStrength, valStrength] return ar
def i2a(im, fontsize): """turn an image into ascii with colors""" im = im.convert('RGB') im = ImageOps.autocontrast(im) im.thumbnail((im.size[0] / fontsize, im.size[1] / fontsize)) string = '' for y in range(im.size[1]): for x in range(im.size[0]): c = im.getpixel((x, y)) # print c s = makeHTMLascii('翻', c) if x % im.size[0] == 0 and y > 0: s = s + '<br/>' string = string + s return string
def preprocess_image(f): io = StringIO(f) im = Image.open(io) im = ImageOps.autocontrast(im) im = brighten(im, 2.1) im = ImageOps.grayscale(im) im = ImageOps.invert(im) im = im.point(lambda i: 250 if i > 1 else i) im = ImageOps.invert(im) im = clear_noise(im, 254, 1, 255) io = StringIO() im.save(io, "png") ret = io.getvalue() io.close() return ret
def dem2img(self, path, file): # Source LIDAR DEM file source = path + file # Load the ASCII DEM into a numpy array arr = np.loadtxt(source, skiprows=6) # Convert the numpy array to a PIL image im = Image.fromarray(arr).convert('L') # Enhance the image im = ImageOps.equalize(im) im = ImageOps.autocontrast(im) # Begin building our color ramp palette = [] # Hue, Saturaction, Value # color space h = .67 s = 1 v = 1 step = h / 256.0 for i in range(256): rp, gp, bp = colorsys.hsv_to_rgb(h, s, v) r = int(rp * 255) g = int(gp * 255) b = int(bp * 255) palette.extend([r, g, b]) h -= step # Apply the palette to the image im.putpalette(palette) im = im.convert('RGB') # Output image file img_path = os.path.dirname(os.getcwd( )) + '/webGIS/static/showLidar/images/' + file.split('.')[0] + '.jpg' # Save the image im.save(img_path) image = '/static/showLidar/images/' + file.split('.')[0] + '.jpg' return image
def showResult(n,result, dims, k=2): i = Image.new("RGB", dims) i.putdata(result) i = ImageOps.autocontrast(i) new = pil_to_pygame_img(i) screen = pygame.display.get_surface() if not screen.get_width == dims[1]: initScreen(dims[0],dims[1]) screen.blit(pil_to_pygame_img(i),(0,0)) if pygame.font: font = pygame.font.Font(None, 36) ts = 'Rule ' + str(n) text = font.render(ts, 1, (255, 255, 10)) textpos = text.get_rect() textpos.centerx = screen.get_rect().centery screen.blit(text, textpos) pygame.display.flip()
def solve_captcha(path): """ Convert a captcha image into a text, using PyTesseract Python-wrapper for Tesseract Arguments: path (str): path to the image to be processed Return: 'textualized' image """ image = Image.open(path).convert('RGB') image = ImageOps.autocontrast(image) filename = "{}.png".format(os.getpid()) image.save(filename) text = pytesseract.image_to_string(Image.open(filename)) return text
def contrast(img, amount="auto"): """ Modify image contrast Args: img (numpy array) Input image array amount (float or string) Either number (e.g. 1.3) or 'auto' """ pilIMG = Image.fromarray(img) if amount is "auto": pilEnhancedIMG = ImageOps.autocontrast(pilIMG, cutoff=0) return numpy.asarray(pilEnhancedIMG) else: pilContrast = ImageEnhance.Contrast(pilIMG) pilContrasted = pilContrast.enhance(amount) return numpy.asarray(pilContrasted)
def contrast(img, amount='auto'): """ Modify image contrast Args: img (numpy array) Input image array amount (float or string) Either number (e.g. 1.3) or 'auto' """ pilIMG = Image.fromarray(img); if amount is 'auto': pilEnhancedIMG = ImageOps.autocontrast(pilIMG, cutoff = 0); return numpy.asarray(pilEnhancedIMG); else: pilContrast = ImageEnhance.Contrast(pilIMG); pilContrasted = pilContrast.enhance(amount); return numpy.asarray(pilContrasted);
def contrast(img, amount='auto'): """ Modify image contrast Args: img (np array) Input image array amount (float or string) Either number (e.g. 1.3) or 'auto' """ pilIMG = Image.fromarray(img); if amount is 'auto': pilEnhancedIMG = ImageOps.autocontrast(pilIMG, cutoff = 0); return np.asarray(pilEnhancedIMG); else: pilContrast = ImageEnhance.Contrast(pilIMG); pilContrasted = pilContrast.enhance(amount); return np.asarray(pilContrasted);
def operation(self, img): """Returns a version of the *img* with Sepia applied for a vintage look.""" # convert to grayscale orig_mode = img.mode if orig_mode != "L": img = img.convert("L") img = ImageOps.autocontrast(img) # apply sepia palette img.putpalette(self.sepia_palette) # convert back to its original mode if orig_mode != "L": img = img.convert(orig_mode) return img
def enhance(pixbuf, brightness=1.0, contrast=1.0, saturation=1.0, sharpness=1.0, autocontrast=False): """Return a modified pixbuf from <pixbuf> where the enhancement operations corresponding to each argument has been performed. A value of 1.0 means no change. If <autocontrast> is True it overrides the <contrast> value, but only if the image mode is supported by ImageOps.autocontrast (i.e. it is L or RGB.) """ im = pixbuf_to_pil(pixbuf) if brightness != 1.0: im = ImageEnhance.Brightness(im).enhance(brightness) if autocontrast and im.mode in ("L", "RGB"): im = ImageOps.autocontrast(im, cutoff=0.1) elif contrast != 1.0: im = ImageEnhance.Contrast(im).enhance(contrast) if saturation != 1.0: im = ImageEnhance.Color(im).enhance(saturation) if sharpness != 1.0: im = ImageEnhance.Sharpness(im).enhance(sharpness) return pil_to_pixbuf(im)
def scrape_loop(callback=lambda image: None): prevData = None prevText = [] while True: im = ImageGrab.grab((x, y, x+w, y+h)) data = list(im.getdata()) if data != prevData: prevData = data callback(im) continue bw = ImageOps.autocontrast(im, 0).convert('1') text = list(getText(bw)) for diff in difflib.ndiff(prevText, text): if diff.startswith("+"): print diff prevText = text
def topics2image2(ar, zoom = 1): square_size = int(math.sqrt(len(ar[0]))) topics = Image.new("L", ((square_size*zoom+5)* len(ar), square_size*zoom+10),200) imzoom = numpy.ones((zoom,zoom), numpy.uint8) maxval = numpy.ones(square_size) for topic in range(len(ar)): pixels = numpy.zeros((square_size,square_size), numpy.uint8) for i in range(square_size): pixels[i,:] = map(lambda x:255*x, ar[topic][i*square_size:(i+1)*square_size ]) #print pixels pixels = numpy.kron(pixels, imzoom) pixels = (255 - pixels) img = Image.fromarray(pixels,"L")#.convert("RGB") # img = ImageOps.convert(img) img = ImageOps.autocontrast(img) img = ImageOps.colorize(img, (0,0,0), (255,255,255)) topics.paste(img, (1+(square_size*zoom+5)*topic, 5)) return topics
def readNumber( cls, frame, cord, invert=False, minval=200, save=False, zoom=2, zmethod=Image.BICUBIC ): image = ImageOps.grayscale( frame.crop( cord ) ) image = image.convert( 'RGB' ) if invert: image = ImageOps.invert( image ) image = ImageOps.autocontrast( image ) pixdata = image.load() for y in xrange( image.size[1] ): for x in xrange( image.size[0] ): pix = pixdata[x, y] if pix[0] > minval and pix[1] > minval and pix[2] > minval: pixdata[x, y] = ( 255, 255, 255 ) image = image.resize( ( ( cord[2] - cord[0] ) * zoom, ( cord[3] - cord[1] ) * zoom ), zmethod ) image = image.convert( 'RGB' ) if save: image.save( '%s\\tmp\snapshot__%d.png' % ( os.getcwd(), int( time.time() ) ), 'PNG' ) return image_to_string( image ).strip()
def i2m(im, fontsize): """turn an image into ascii like matrix""" im = im.convert('L') im = ImageOps.autocontrast(im) im.thumbnail((im.size[0] / fontsize, im.size[1] / fontsize)) string = '' colors = [(0, i, 0) for i in range(0, 256, 17)] words = '据说只有到了十五字才会有经验的' for y in range(im.size[1]): for x in range(im.size[0]): p = im.getpixel((x, y)) i = 14 while i >= 0: if p >= i * 17: s = makeHTMLascii(words[3 * i:3 * (i + 1)], colors[i]) break i -= 1 if x % im.size[0] == 0 and y > 0: s = s + '<br/>' string = string + s return string
def _apply_sepia_filter(image): """ Apply a sepia-tone filter to the given PIL Image Based on code at: http://effbot.org/zone/pil-sepia.htm """ # make sepia ramp (tweak color as necessary) sepia = _make_linear_ramp((255, 240, 192)) # convert to grayscale orig_mode = image.mode if orig_mode != "L": image = image.convert("L") # apply contrast enhancement here, e.g. image = ImageOps.autocontrast(image) # apply sepia palette image.putpalette(sepia) # convert back to its original mode if orig_mode != "L": image = image.convert(orig_mode) return image
def enhance(pixbuf, brightness=1.0, contrast=1.0, saturation=1.0, sharpness=1.0, autocontrast=False): """Return a modified pixbuf from <pixbuf> where the enhancement operations corresponding to each argument has been performed. A value of 1.0 means no change. If <autocontrast> is True it overrides the <contrast> value, but only if the image mode is supported by ImageOps.autocontrast (i.e. it is L or RGB.) """ im = pixbuf_to_pil(pixbuf) if brightness != 1.0: im = ImageEnhance.Brightness(im).enhance(brightness) if autocontrast and im.mode in ('L', 'RGB'): im = ImageOps.autocontrast(im, cutoff=0.1) elif contrast != 1.0: im = ImageEnhance.Contrast(im).enhance(contrast) if saturation != 1.0: im = ImageEnhance.Color(im).enhance(saturation) if sharpness != 1.0: im = ImageEnhance.Sharpness(im).enhance(sharpness) return pil_to_pixbuf(im)
def topics2image2(ar, zoom=1): square_size = int(math.sqrt(len(ar[0]))) topics = Image.new( "L", ((square_size * zoom + 5) * len(ar), square_size * zoom + 10), 200) imzoom = numpy.ones((zoom, zoom), numpy.uint8) maxval = numpy.ones(square_size) for topic in range(len(ar)): pixels = numpy.zeros((square_size, square_size), numpy.uint8) for i in range(square_size): pixels[i, :] = map( lambda x: 255 * x, ar[topic][i * square_size:(i + 1) * square_size]) #print pixels pixels = numpy.kron(pixels, imzoom) pixels = (255 - pixels) img = Image.fromarray(pixels, "L") #.convert("RGB") # img = ImageOps.convert(img) img = ImageOps.autocontrast(img) img = ImageOps.colorize(img, (0, 0, 0), (255, 255, 255)) topics.paste(img, (1 + (square_size * zoom + 5) * topic, 5)) return topics
def project(self,saveit=False): if self.working==True: print "NOT READY YET" return if self.lim_from.get_value()>self.lim_to.get_value(): self.lim_from.set_value(self.lim_to.get_value()) if self.fname==None: self.stat("no image loaded!") return self.stat("projecting image...") if saveit==True: try:os.mkdir(self.folder+"/stacked/") except:pass #folder there already self.proj,x,imagesUsed,verts=None,0,0,[] ### PER SLICE ########################### while x*self.repeat+self.offset<self.im.shape[1]-self.repeat: if x+1<self.lim_from.get_value() \ or x+1>self.lim_to.get_value() \ and self.adj_togglefirst1.get_active()==True: self.stat("ignoring slice #%02d"%x) else: self.stat("processing slice #%02d"%x) startx=int(x*self.repeat+self.offset) im2=self.im[:,startx:startx+int(self.repeat)] if self.stackvert.get_active(): verts.append(Image.fromarray(numpy.uint8(im2))) if x>0 and self.vertshift<>0: im2=numpy.roll(im2,int(-self.vertshift*x),axis=0) if self.proj==None: self.proj=im2 else: if self.method=="avg": self.proj=self.proj+im2 if self.method=="max": self.proj=numpy.maximum(self.proj,im2) if self.method=="med": self.proj=scipy.minimum(self.proj,im2) if saveit==True and self.toggle_saveslices.get_active()==True: self.stat("saving slice #%02d"%x) scipy.misc.imsave(self.folder+"/stacked/%02d_"%x+self.fnameonly,im2) imagesUsed+=1 x+=1 ########################################## if self.lim_to.get_value()>x: self.lim_to.set_value(x) if imagesUsed==0: print "BLANK" return self.lbl_totimgs.set_text("total images: %d (%d used)"%(x,imagesUsed)) if self.method=="avg": self.proj=self.proj/imagesUsed self.proj=self.proj.astype(int) im = Image.fromarray(numpy.uint8(self.proj)) if self.adj_toggleAutoContrast.get_active()==True: im = ImageOps.autocontrast(im, cutoff=0) if self.stackvert.get_active(): imstack=Image.new("RGB",(im.size[0],im.size[1]*(len(verts)+1))) draw = ImageDraw.Draw(imstack) for i in range(len(verts)): imstack.paste(verts[i],(0,im.size[1]*i)) draw.line((0, im.size[1]*(i+1)-1, im.size[0], im.size[1]*(i+1)-1), fill=(255,255,0)) imstack.paste(im,(0,im.size[1]*(i+1))) im=imstack msg1="VD Labs - QRSS Stacker" msg="average projection of %d images "%imagesUsed msg+="(repeat=%.01fpx, drift=%.01fpx)"%(self.repeat,self.vertshift) draw = ImageDraw.Draw(im) tw,th=draw.textsize(msg) ty=im.size[1]-th for sx in [-1,0,1]: for sy in [-1,0,1]: draw.text((3+sx,3+sy), msg1, (0,0,0)); draw.text((3+sx,ty-3+sy), msg, (0,0,0)) draw.text((3,3), msg1, (255,255,255)) draw.text((3,ty-3), msg, (255,255,255)) if saveit==True: self.stat("saving projected stack...") im.save(self.folder+"/stacked/stacked_"+self.fnameonly,quality=90) os.startfile(self.folder+"/stacked/") self.im_spec.set_from_pixbuf(Image_to_GdkPixbuf(im)) #time.sleep(.1) self.stat("waiting for commands...") self.working==False print "PROJECTED"
def project(self, saveit=False): if self.working == True: print "NOT READY YET" return if self.lim_from.get_value() > self.lim_to.get_value(): self.lim_from.set_value(self.lim_to.get_value()) if self.fname == None: self.stat("no image loaded!") return self.stat("projecting image...") if saveit == True: try: os.mkdir(self.folder + "/stacked/") except: pass #folder there already self.proj, x, imagesUsed, verts = None, 0, 0, [] ### PER SLICE ########################### while x * self.repeat + self.offset < self.im.shape[1] - self.repeat: if x+1<self.lim_from.get_value() \ or x+1>self.lim_to.get_value() \ and self.adj_togglefirst1.get_active()==True: self.stat("ignoring slice #%02d" % x) else: self.stat("processing slice #%02d" % x) startx = int(x * self.repeat + self.offset) im2 = self.im[:, startx:startx + int(self.repeat)] if self.stackvert.get_active(): verts.append(Image.fromarray(numpy.uint8(im2))) if x > 0 and self.vertshift <> 0: im2 = numpy.roll(im2, int(-self.vertshift * x), axis=0) if self.proj == None: self.proj = im2 else: if self.method == "avg": self.proj = self.proj + im2 if self.method == "max": self.proj = numpy.maximum(self.proj, im2) if self.method == "med": self.proj = scipy.minimum(self.proj, im2) if saveit == True and self.toggle_saveslices.get_active( ) == True: self.stat("saving slice #%02d" % x) scipy.misc.imsave( self.folder + "/stacked/%02d_" % x + self.fnameonly, im2) imagesUsed += 1 x += 1 ########################################## if self.lim_to.get_value() > x: self.lim_to.set_value(x) if imagesUsed == 0: print "BLANK" return self.lbl_totimgs.set_text("total images: %d (%d used)" % (x, imagesUsed)) if self.method == "avg": self.proj = self.proj / imagesUsed self.proj = self.proj.astype(int) im = Image.fromarray(numpy.uint8(self.proj)) if self.adj_toggleAutoContrast.get_active() == True: im = ImageOps.autocontrast(im, cutoff=0) if self.stackvert.get_active(): imstack = Image.new("RGB", (im.size[0], im.size[1] * (len(verts) + 1))) draw = ImageDraw.Draw(imstack) for i in range(len(verts)): imstack.paste(verts[i], (0, im.size[1] * i)) draw.line((0, im.size[1] * (i + 1) - 1, im.size[0], im.size[1] * (i + 1) - 1), fill=(255, 255, 0)) imstack.paste(im, (0, im.size[1] * (i + 1))) im = imstack msg1 = "VD Labs - QRSS Stacker" msg = "average projection of %d images " % imagesUsed msg += "(repeat=%.01fpx, drift=%.01fpx)" % (self.repeat, self.vertshift) draw = ImageDraw.Draw(im) tw, th = draw.textsize(msg) ty = im.size[1] - th for sx in [-1, 0, 1]: for sy in [-1, 0, 1]: draw.text((3 + sx, 3 + sy), msg1, (0, 0, 0)) draw.text((3 + sx, ty - 3 + sy), msg, (0, 0, 0)) draw.text((3, 3), msg1, (255, 255, 255)) draw.text((3, ty - 3), msg, (255, 255, 255)) if saveit == True: self.stat("saving projected stack...") im.save(self.folder + "/stacked/stacked_" + self.fnameonly, quality=90) os.startfile(self.folder + "/stacked/") self.im_spec.set_from_pixbuf(Image_to_GdkPixbuf(im)) #time.sleep(.1) self.stat("waiting for commands...") self.working == False print "PROJECTED"
im1.save(prefix + 'OrionBNKL_4096sq_GEMS_mosaic.png') #rgb_pil = ((1-rgb[:,:,:3])*(2**8)) #rgb_pil -= (256*rgb[:,:,3])[:,:,newaxis] #rgb_pil[rgb_pil>255] = 255 #rgb_pil = rgb_pil.astype('uint8') #im1 = PIL.Image.fromarray(rgb_pil) #im1.save(prefix+'OrionBNKL_4096sq_GEMS_mosaic_try2.png') print "Saving GEMS mosaic with white bg ", time.time() - t0 wbackground = PIL.Image.new("RGB", im1.size, (255, 255, 255)) wbackground.paste(im1, mask=im1.split()[3]) wbackground.save(prefix + 'OrionBNKL_4096sq_GEMS_mosaic_whitebg.png') kbackground = PIL.Image.new("RGB", im1.size, (0, 0, 0)) kbackground.paste(im1, mask=im1.split()[3]) print "Saving GEMS mosaic with black bg ", time.time() - t0 kbackground.save(prefix + 'OrionBNKL_4096sq_GEMS_mosaic_blackbg.png') kbackground_contrast = ImageOps.autocontrast(kbackground) kbackground_contrast.save(prefix + 'OrionBNKL_4096sq_GEMS_mosaic_blackbg_contrast.png') kbackground_bright = ImageEnhance.Brightness(kbackground_contrast).enhance(1.5) kbackground_bright.save( prefix + 'OrionBNKL_4096sq_GEMS_mosaic_blackbg_contrast_bright.png') #print "doing Bolocam PIL stuff ",time.time()-t0 #v2pil = (v2img*255).astype('uint8')[pilslice,:,:] #v2pil = v2pil[::-1,:,:] #boloim = PIL.Image.fromarray(v2pil) #boloim.save(prefix+'OrionBNKL_4096sq_bolo.png') #kbackground.paste(boloim, mask=boloim.split()[3]) #print "Saving Bolocam + GEMS mosaic with black bg (PIL) ",time.time()-t0 #kbackground.save(prefix+'OrionBNKL_4096sq_GEMS_bolo_mosaic_blackbg.png')
# Source LIDAR DEM file source = "lidar.asc" # Output image file target = "lidar.bmp" # Load the ASCII DEM into a numpy array arr = np.loadtxt(source, skiprows=6) # Convert the numpy array to a PIL image im = Image.fromarray(arr).convert('L') # Enhance the image im = ImageOps.equalize(im) im = ImageOps.autocontrast(im) # Begin building our color ramp palette = [] # Hue, Saturaction, Value # color space h = .67 s = 1 v = 1 # We'll step through colors from: # blue-green-yellow-orange-red. # Blue=low elevation, Red=high-elevation step = h / 256.0
def imageAutoContrast(img, r=1.0): "Return a blend of image IMG with its auto-contrasted self" if r == 0.0: return img ac = ImageOps.autocontrast(img) return imageBlend(img, ac, r)
def process(self, _edObject=None): """ This is the main method of the plugin, it does the job, not the EDNA name conversions which were done in the preprocess. """ EDPluginExec.process(self) EDVerbose.DEBUG("EDPluginExecThumbnailv10.process") # try: # except Exception: # edfImage = EDF(self.inputFilename) # self.npaImage = edfImage.GetData(0) # Read the image using FABIO isRGB = False pilOutputImage = None if self.inputFilename is not None: try: fabioImage = openimage(self.inputFilename) self.npaImage = fabioImage.data except Exception: pilInputImage = Image.open(self.inputFilename) x, y = pilInputImage.size ImageFile.MAXBLOCK = x * y if pilInputImage.mode == "1": self.npaImage = numpy.asarray(pilInputImage).astype( "uint8") isRGB = False elif pilInputImage.mode == "F": self.npaImage = numpy.asarray(pilInputImage) isRGB = False elif pilInputImage.mode == "L": self.npaImage = numpy.asarray(pilInputImage) isRGB = False elif pilInputImage.mode == "P": self.npaImage = numpy.asarray(pilInputImage.convert("RGB")) isRGB = True elif pilInputImage.mode == "RGB": self.npaImage = numpy.asarray(pilInputImage) isRGB = True elif pilInputImage.mode == "CMJK": self.npaImage = numpy.asarray(pilInputImage.convert("RGB")) isRGB = True dtype = self.npaImage.dtype NPAImageFloat = None # crop border if len(self.cropBorders) > 0: if len(self.cropBorders) == 1: crop0 = self.cropBorders[0] crop1 = self.cropBorders[0] else: crop0 = self.cropBorders[0] crop1 = self.cropBorders[1] if isRGB: self.npaImage = self.npaImage[crop0:-crop0, crop1:crop1, :] else: self.npaImage = self.npaImage[crop0:-crop0, crop1:crop1] # Set maxima and minima if (self.minLevelUnit is not None) or (self.maxLevelUnit is not None): sortedArray = self.npaImage.flatten() sortedArray.sort() if self.minLevel is not None: self.normalize = True if isRGB: EDVerbose.warning( "It is not allowed to set Min with RGB data") else: if self.minLevelUnit in ["%", "percent"]: self.minLevel = sortedArray[int( round(float(self.minLevel) * sortedArray.size / 100.0))] if isinstance(self.npaImage[0, 0], int): self.npaImage = numpy.maximum( self.npaImage, int(self.minLevel) * numpy.ones_like(self.npaImage)) else: self.npaImage = numpy.maximum( self.npaImage, self.minLevel * numpy.ones_like(self.npaImage)) if self.maxLevel is not None: self.normalize = True if isRGB: EDVerbose.warning( "It is not allowed to set Max with RGB data") else: if self.maxLevelUnit in ["%", "percent"]: self.maxLevel = sortedArray[int( round(float(self.maxLevel) * sortedArray.size / 100.0))] if isinstance(self.npaImage[0, 0], int): self.npaImage = numpy.minimum( self.npaImage, int(self.maxLevel) * numpy.ones_like(self.npaImage)) else: self.npaImage = numpy.minimum( self.npaImage, self.maxLevel * numpy.ones_like(self.npaImage)) # Scipy filters come here: if len(self.gaussianBlur) > 0: if len(self.gaussianBlur) == 1: kernel = (self.gaussianBlur[0], self.gaussianBlur[0]) else: kernel = (self.gaussianBlur[0], self.gaussianBlur[1]) if isRGB: kernel = (kernel[0], kernel[1], 0) self.npaImage = scipy.ndimage.gaussian_filter( self.npaImage, kernel) if len(self.dilatation) > 0: if len(self.dilatation) == 1: kernel = (self.dilatation[0], self.dilatation[0]) else: kernel = (self.dilatation[0], self.dilatation[1]) if isRGB: kernel = (kernel[0], kernel[1], 0) self.npaImage = scipy.ndimage.morphology.grey_dilation( self.npaImage, kernel) #Normalization ; equalization if (self.normalize is True) or (self.equalize is True): if isRGB is True: self.npaImage = numpy.asarray( ImageOps.equalize(Image.fromarray(self.npaImage))) else: EDVerbose.DEBUG("EDPluginExecThumbnailv10: Normalization") vmin = self.npaImage.min() vmax = self.npaImage.max() NPAImageFloat = (self.npaImage.astype(numpy.float32) - float(vmin)) / (float(vmax) - float(vmin)) if (self.equalize == True): nbr_bins = 64 NPAImageFloatFlat = NPAImageFloat.flatten() imhist, bins = numpy.histogram( NPAImageFloatFlat, nbr_bins, normed=True) #get image histogram cdf = imhist.cumsum() #cumulative distribution function ncdf = cdf / cdf[ -1] #normalized cumulative distribution function # print ncdf NPAImageFloat2Flat = numpy.interp(NPAImageFloatFlat, bins, [0] + ncdf.tolist()) NPAImageFloat = NPAImageFloat2Flat.reshape( NPAImageFloat.shape) EDVerbose.DEBUG("Equalize: min= %f, max= %f" % (NPAImageFloat.min(), NPAImageFloat.max())) #Gamma and logarithm scale if ((self.log is True) or (self.gamma != 1)) and ( NPAImageFloat is None): # then we need the array in float if dtype == numpy.uint8: NPAImageFloat = self.npaImage.astype(numpy.float32) / 255.0 elif dtype == numpy.uint16: NPAImageFloat = self.npaImage.astype(numpy.float32) / 65535.0 else: NPAImageFloat = self.npaImage.astype(numpy.float32) if self.log is True: NPAImageFloat = numpy.log(1 - NPAImageFloat.min() + NPAImageFloat) vmin = NPAImageFloat.min() vmax = NPAImageFloat.max() NPAImageFloat = (NPAImageFloat - vmin) / (vmax - vmin) if self.gamma != 1: if dtype not in [numpy.uint8, numpy.uint16]: vmin = NPAImageFloat.min() vmax = NPAImageFloat.max() NPAImageFloat = (NPAImageFloat - vmin) / (vmax - vmin) NPAImageInt = (255.0 * (NPAImageFloat**self.gamma)).astype("uint8") else: #if (self.gamma == 1): if NPAImageFloat is None: if dtype == numpy.uint8: NPAImageInt = self.npaImage elif dtype == numpy.uint16: NPAImageInt = (self.npaImage / 256).astype(numpy.uint8) else: #for float or a signed integer vmin = self.npaImage.min() vmax = self.npaImage.max() NPAImageInt = ( (self.npaImage.astype(numpy.float32) - vmin) / (vmax - vmin) * 255.0).astype(numpy.uint8) else: vmin = NPAImageFloat.min() vmax = NPAImageFloat.max() EDVerbose.DEBUG( "EDPluginExecThumbnailv10: NPAImageFloat => NPAImageInt min=%s max =%s" % (vmin, vmax)) NPAImageInt = ((NPAImageFloat - vmin) * 255.0 / (vmax - vmin)).astype(numpy.uint8) #COnversion back to PIL mode if isRGB is True: pilOutputImage = Image.fromarray(NPAImageInt, 'RGB') else: pilOutputImage = Image.fromarray(NPAImageInt, 'L') if (self.autocontrast is not None): pilOutputImage = ImageOps.autocontrast(pilOutputImage, self.autocontrast) if (self.width is not None) or (self.height is not None): if (self.width > 0) and (self.height > 0): if self.keepRatio is True: # PIL takes care of the ratio pilOutputImage.thumbnail((self.width, self.height), Image.ANTIALIAS) else: pilOutputImage = pilOutputImage.resize( (self.width, self.height), Image.ANTIALIAS) else: if self.width is None: pilOutputImage.thumbnail((self.height, self.height), Image.ANTIALIAS) elif self.height is None: pilOutputImage.thumbnail((self.width, self.width), Image.ANTIALIAS) if self.invert == True: pilOutputImage = ImageOps.invert(pilOutputImage) if self.colorize == True: pilOutputImage.putpalette(EDPluginExecThumbnailv10.getPalette()) pilOutputImage = pilOutputImage.convert("RGB") self.synchronizeOn() if self.format == "jpg": self.width, self.height = pilOutputImage.size if self.width * self.height > ImageFile.MAXBLOCK: ImageFile.MAXBLOCK = self.width * self.height try: pilOutputImage.save(self.output, "JPEG", quality=85, optimize=True) except TypeError: pilOutputImage.save(self.output) else: pilOutputImage.save(self.output) self.synchronizeOff()
name.append(infile) hold = orginalColor[1].crop(holdSize) #create image to hold visual results for i in range(len(orginalColor)): #start image analysis filepath, filename = os.path.split(name[i]) #give file path and file name filename, ext = os.path.splitext(filename) #give file name and extension #image processing------------------------------------------------------------------------------------- im = orginalColor[i] #open an color image region = im.rotate(boxR[position - 1], resample=2) #rotate image hold.paste(region.crop(boxesSize), (0, (boxS[1] + 1) * i)) #save cropped original on holder region = ImageOps.grayscale(region) #grayscale image region = ImageOps.autocontrast(region, cutoff=10) #color enhance region = region.filter( ImageFilter.MedianFilter(3)) #median filter for image region = region.filter(ImageFilter.SMOOTH) #smooth the image region = ImageOps.autocontrast(region, cutoff=1) #color enhance edge = region.filter(ImageFilter.MedianFilter(3)) #median filter for image edge = edge.filter(ImageFilter.FIND_EDGES) #edge detection in PIL edge = ImageOps.autocontrast(edge, cutoff=2) #color enhance edge = edge.crop(boxesSize) #crop the edge detected image for analyzing hold.paste( region.crop(boxesSize), (boxS[0] + 1, (boxS[1] + 1) * i)) #save grayscale and color enhanced cropped image
def operation(self, img): return ImageOps.autocontrast(img)