def generate_cloud_nm_channel(srcImg): # channels r, g, b, a = srcImg.split() # helper images gray = Image.new('L', srcImg.size, (127)) yellowRGB = Image.new('RGB', srcImg.size, (255, 255, 0)) # discard 'too yellow' values oneMinusYellowness = ImageChops.difference(Image.merge('RGB', (r, g, b)), yellowRGB) yR, yG, yB = oneMinusYellowness.split() oneMinusYellowness = ImageChops.lighter(yR, yG) yellowness = ImageChops.invert(oneMinusYellowness) yellowness = ImageChops.lighter(yellowness, gray) yellowness = ImageChops.subtract(yellowness, gray) yellowness = ImageChops.add(yellowness, yellowness) #yellowness.save("Y:/art/source/particles/textures/clouds/yellowness.png") halfRed = ImageChops.multiply(r, gray) # 50% red halfGreen = ImageChops.multiply(g, gray) # 50% green # compose dstImg = ImageChops.subtract(ImageChops.add(gray, halfRed), halfGreen) dstImg = ImageChops.composite(gray, dstImg, yellowness) return dstImg
def add_images(self, im1, im2): offset= self.findOffset(im1, im2) if offset==(0,0): return ImageChops.add(im1, im2) else: return ImageChops.add(im1, ImageChops.offset(im2,offset[0], offset[1]))
def main(): # on crée une image à partir de la librairie Image image = Image.new("RGB", (W, H), "black") # on crée la scène lumiere = Lumiere(Vector(-10, -20, 50), 1000000000) origin = Vector(0, 0, 55) fov = 90 * 3.14 / 180 direction = Vector(0, 0, 1).getNormalized up = Vector(0, 1, 0).getNormalized right = direction.cross(up) * (-1) camera = Camera(origin, fov, direction, up, right) materiau_opaque1 = Materiau([1, 0, 0], False, 0) # rouge materiau_opaque2 = Materiau([0, 1, 0], False, 0) # vert materiau_opaque3 = Materiau([0, 0, 1], False, 0) # bleu materiau_opaque4 = Materiau([0, 1, 1], False, 0) # cyan materiau_opaque5 = Materiau([1, 1, 1], False, 0) # blanc materiau_opaque6 = Materiau([1, 1, 0], False, 0) # jaune materiau_reflechissant = Materiau([1, 1, 1], True, 0) materiau_transparent = Materiau([1, 1, 1], False, 1.5) s1 = Sphere(Vector(0, -2, 25), 10, materiau_transparent) s2 = Sphere(Vector(0, 0, 1000), 940, materiau_opaque5) # arrière s3 = Sphere(Vector(0, 0, -1000), 940, materiau_opaque5) # devant s4 = Sphere(Vector(1000, 0, 0), 940, materiau_opaque3) # droite s5 = Sphere(Vector(-1000, 0, 0), 940, materiau_opaque1) # gauche s6 = Sphere(Vector(0, 1000, 0), 990, materiau_opaque6) # dessous s7 = Sphere(Vector(0, -1000, 0), 940, materiau_opaque2) # dessus scene = Scene([s1, s2, s3, s4, s5, s6, s7], lumiere, camera) # variable pour le multiprocessing out_q = Queue() imageProcess = [] imageQuadrant = [] # on lance des process pour chaque quadran for i in xrange(n_quadrants): imageProcess.append(Process(target=scene.getImage, args=(image, n_rebonds, i, out_q, n_echantillons, diffus))) imageProcess[i].start() for i in xrange(n_quadrants): imageQuadrant.append(out_q.get()) for i in xrange(n_quadrants): imageProcess[i].join() # on reconstruit l'image depuis les quadrans image_haute = ImageChops.add(imageQuadrant[0], imageQuadrant[1]) image_basse = ImageChops.add(imageQuadrant[2], imageQuadrant[3]) image = ImageChops.add(image_haute, image_basse) image.show() image.save("image.jpg")
def test_uri(self, name, uri): filename = os.path.join(self.directory, name) file1 = filename+'.runner1.png' file2 = filename+'.runner2.png' self.c1.doURI(uri, file1) self.c2.doURI(uri, file2) while ( file1 not in self.saved_release_images and file2 not in self.saved_nightly_images ) and ( uri not in self.framebusters) and (uri not in self.timeouts): sleep(1) if (uri in self.framebusters): print "FrameBusted "+uri return if (uri in self.timeouts): print "Timeout "+uri return rms = None counter = 0 while rms is None and counter < 6: try: rms, image1, image2, hist1, hist2 = diff_images(file1, file2) except: print 'Image is not ready, waiting 10 seconds.' sleep(10) counter += 1 if counter >= 6: print "Timeout exceeded waiting for image to be saved" return result = {"uri":uri, "release_image":file1, "nightly_image":file2, "difference":rms} if rms != 0: result["images_differ"] = True image1RGB = image1.convert('RGB') image2RGB = image2.convert('RGB') ImageChops.difference(image1RGB, image2RGB).save(filename+'.diff.difference.png') result["diff_difference_image"] = filename+'.diff.difference.png' ImageChops.multiply(image1, image2).save(filename+'.diff.multiply.png') result["diff_multiply_image"] = filename+'.diff.multiply.png' ImageChops.screen(image1, image2).save(filename+'.diff.screen.png') result["diff_screen_image"] = filename+'.diff.screen.png' ImageChops.add(image1, image2).save(filename+'.diff.add.png') result["diff_add_image"] = filename+'.diff.add.png' ImageChops.subtract(image1RGB, image2RGB).save(filename+'.diff.subtract.png') result["diff_subtract_image"] = filename+'.diff.subtract.png' ImageChops.lighter(image1, image2).save(filename+'.diff.lighter.png') result["diff_lighter_image"] = filename+'.diff.lighter.png' ImageChops.darker(image1, image2).save(filename+'.diff.darker.png') result["diff_darker_image"] = filename+'.diff.darker.png' else: result["images_differ"] = False return result
def chooseStrategy(self, figures): # everyone is the same figures_a_ = figures['A'] figures_b_ = figures['B'] figures_c_ = figures['C'] figures_d_ = figures['D'] figures_e_ = figures['E'] figures_f_ = figures['F'] figures_g_ = figures['G'] figures_h_ = figures['H'] # overlays rowAB = ImageChops.add(figures_a_, figures_b_) rowBC = ImageChops.add(figures_b_, figures_c_) rowDE = ImageChops.add(figures_d_, figures_e_) rowEF = ImageChops.add(figures_e_, figures_f_) colAD =ImageChops.multiply(figures_a_, figures_d_) colADG= ImageChops.multiply(colAD, figures_g_) colBE =ImageChops.multiply(figures_b_, figures_e_) colBEH= ImageChops.multiply(colBE, figures_h_) #common permutations ab = ImageChops.multiply(figures_a_,figures_b_) ac = ImageChops.multiply(figures_a_,figures_c_) df = ImageChops.multiply(figures_d_,figures_f_) abc = ImageChops.multiply(ab,figures_c_) de = ImageChops.multiply(figures_d_,figures_e_) de_F=ImageChops.multiply(de,figures_f_) #difs difAB=self.imageUtils.invertGrayScaleImage(ImageChops.difference(figures_a_, figures_b_)) difDE=self.imageUtils.invertGrayScaleImage(ImageChops.difference(figures_d_, figures_e_)) if self.areEqual(figures_a_, figures_b_)[0] and self.areEqual(figures_b_, figures_c_)[0]: if self.areEqual(figures_d_, figures_e_)[0] and self.areEqual(figures_e_, figures_f_)[0]: return 'row_equals' elif ((self.areEqual(figures_a_, figures_d_)[0] or self.areEqual(figures_a_, figures_e_)[0] or self.areEqual(figures_a_,figures_f_)[0]) \ and (self.areEqual(figures_b_, figures_d_)[0] or self.areEqual(figures_b_, figures_e_)[0] or self.areEqual(figures_b_, figures_f_)[0]) \ and (self.areEqual(figures_c_, figures_d_)[0] or self.areEqual(figures_c_, figures_e_)[0] or self.areEqual(figures_c_, figures_f_)[0])): return 'one_of_each' elif self.areEqual(rowAB, rowBC)[0] and self.areEqual(rowDE, rowEF)[0]: return "one_cancels" elif self.areEqual(colADG, colBEH)[0]: return "cancel_out" elif self.areEqual(ab, figures_c_)[0] and self.areEqual(de, figures_f_)[0] : return "productAB" elif self.areEqual(ac, figures_b_)[0] and self.areEqual(df, figures_e_)[0]: return "productAC" elif self.areEqual(difAB, figures_c_)[0] and self.areEqual(difDE, figures_f_)[0]: return "diffAB" elif self.isShared(figures): return "shared" elif self.areEqual(abc, de_F)[0]: return "common_perms"
def execute(self, image, query): athor = get_image_object(self.image, self.storage) x2, y2 = athor.size x1 = get_coords(image.size[0], athor.size[0], self.x) y1 = get_coords(image.size[1], athor.size[1], self.y) box = ( x1, y1, x1 + x2, y1 + y2, ) # Note that if you paste an "RGBA" image, the alpha band is ignored. # You can work around this by using the same image as both source image and mask. image = image.copy() if athor.mode == 'RGBA': if image.mode == 'RGBA': channels = image.split() alpha = channels[3] image = Image.merge('RGB', channels[0:3]) athor_channels = athor.split() athor_alpha = athor_channels[3] athor = Image.merge('RGB', athor_channels[0:3]) image.paste(athor, box, mask=athor_alpha) # merge alpha athor_image_alpha = Image.new('L', image.size, color=0) athor_image_alpha.paste(athor_alpha, box) new_alpha = ImageChops.add(alpha, athor_image_alpha) image = Image.merge('RGBA', image.split() + (new_alpha,)) else: image.paste(athor, box, mask=athor) else: image.paste(athor, box) return image
def autocrop_image(inputfilename, outputfilename = None, color = 'white', newWidth = None, doShow = False ): im = Image.open(inputfilename) try: # get hex colors rgbcolor = hex_to_rgb( color ) except Exception: if color not in _all_possible_colornames: raise ValueError("Error, color name = %s not in valid set of color names.") rgbcolor = webcolors.name_to_rgb(color) bg = Image.new(im.mode, im.size, rgbcolor) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: cropped = im.crop(bbox) if newWidth is not None: height = int( newWidth * 1.0 / cropped.size[0] * cropped.size[1] ) cropped = cropped.resize(( newWidth, height )) if outputfilename is None: cropped.save(inputfilename) else: cropped.save(os.path.expanduser(outputfilename)) if doShow: cropped.show( ) return True else: return False
def trim(im, color): bg = Image.new(im.mode, im.size, 0) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return color.crop(bbox)
def trim(im): bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 50.0, -1) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def DetectBorder(image): """Determine whether or not an image contains a border. Args: image: A PIL Image object. Returns: A boolean representing whether or not the image has a border. """ image_width, image_height = image.size bg = Image.new(image.mode, image.size, image.getpixel((0, 0))) diff = ImageChops.difference(image, bg) # Adjusting the offset to 115 correctly identified one of the clear cases. # Could potentially make this a flag in order to test thresholds. diff = ImageChops.add(diff, diff, 2.0, -115) bbox = diff.getbbox() return all(( # Ensure that the upper-left bounding box coordinate has no value on the # X- or Y-axis (non-zero). bbox[0], bbox[1], # Ensure that the sum of the upper-left X and lower_right X is less than # the original width. (bbox[0] + bbox[2]) <= image_width, # Ensure that the sum of the upper-left Y and lower_right Y is less than # the original height. (bbox[1] + bbox[3]) <= image_height ))
def trim(_directory, legend = True): os.chdir(_directory) for subdir, dirs, files in os.walk(_directory): for fn in files: fn = os.path.join(subdir, fn) if os.path.splitext(fn)[1] == '.png': im = Image.open(fn) bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() im = im.crop(bbox) data = np.asarray(im) if legend == True and (np.sum(data[::, data.shape[1] - 1,0]) > 255*1*data.shape[0]*.1 and \ np.sum(data[::, data.shape[1] - 1,0]) < 255*1*data.shape[0]*.75) or \ np.sum(data[::, data.shape[1] - 1,0]) == 0 or \ re.search("TAT", os.path.splitext(fn)[0]): i = data.shape[1] - 1 while np.sum(data[::,i,::]) <> 255*data.shape[2]*data.shape[0]: i = i - 1 while np.sum(data[::,i,::]) == 255*data.shape[2]*data.shape[0]: i = i - 1 w, h = im.size im = im.crop((0,0,i,h)) im.save(os.path.splitext(fn)[0] + "_cut" + os.path.splitext(fn)[1])
def create_heatmap(xs, ys, imageSize, blobSize, cmap): blob = Image.new('RGBA', (blobSize * 2, blobSize * 2), '#000000') blob.putalpha(0) colour = 255 / int(math.sqrt(len(xs))) draw = ImageDraw.Draw(blob) draw.ellipse((blobSize / 2, blobSize / 2, blobSize * 1.5, blobSize * 1.5), fill=(colour, colour, colour)) blob = blob.filter(ImageFilter.GaussianBlur(radius=blobSize / 2)) heat = Image.new('RGBA', (imageSize, imageSize), '#000000') heat.putalpha(0) xScale = float(imageSize - 1) / (max(xs) - min(xs)) yScale = float(imageSize - 1) / (min(ys) - max(ys)) xOff = min(xs) yOff = max(ys) for i in range(len(xs)): xPos = int((xs[i] - xOff) * xScale) yPos = int((ys[i] - yOff) * yScale) blobLoc = Image.new('RGBA', (imageSize, imageSize), '#000000') blobLoc.putalpha(0) blobLoc.paste(blob, (xPos - blobSize, yPos - blobSize), blob) heat = ImageChops.add(heat, blobLoc) norm = Normalize(vmin=min(min(heat.getdata())), vmax=max(max(heat.getdata()))) sm = ScalarMappable(norm, cmap) heatArray = pil_to_array(heat) rgba = sm.to_rgba(heatArray[:, :, 0], bytes=True) rgba[:, :, 3] = heatArray[:, :, 3] coloured = Image.fromarray(rgba, 'RGBA') return coloured
def trim(origin_im, blur=True, pre_percentage=PERCENTAGE_TO_CROP_SCAN_IMG, upper_lower_cut=True): im = crop_by_percentage(origin_im, pre_percentage) if blur: im_blurred = im.filter(ImageFilter.GaussianBlur(radius=2)) bg = Image.new(im_blurred.mode, im_blurred.size, im.getpixel((0, 0))) diff = ImageChops.difference(im_blurred, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() else: bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) bbox = diff.getbbox() if bbox: if upper_lower_cut: return im.crop(bbox) else: width, height = im.size bbox = list(bbox) bbox[0] = max(0, bbox[0] - int(width * 0.05)) bbox[1] = 0 bbox[2] = min(width, bbox[2] + int(width * 0.05)) bbox[3] = height return im.crop(bbox) else: # raise Exception("Error while cropping image, there is no bounding box to crop") return im
def autoCrop(self, image): bg = Image.new(image.mode, image.size, image.getpixel((0,0))) diff = ImageChops.difference(image, bg) diff = ImageChops.add(diff, diff, 2.0, -200) bbox = diff.getbbox() if bbox: return image.crop(bbox)
def crop_image(self): try: image = Image.open(StringIO(urllib.urlopen(self.image).read())) except: raise else: image_out = StringIO() border = Image.new(image.mode, image.size, image.getpixel((0, 0))) diff = ImageChops.difference(image, border) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: image = image.crop(bbox) image.save(image_out, 'JPEG') else: raise ValueError('Unable to determine image bounding box') s3 = boto3.resource('s3') region = os.environ.get('AWS_DEFAULT_REGION') bucket = s3.Bucket(os.environ.get('AWS_S3_BUCKET')) key = self.venue.replace(" ", "_") + '-' + self.title_raw.replace(" ", "_") + '-' + self.start.strftime("%y-%m-%d") + '.jpg' try: bucket.put_object(Key = key, Body = image_out.getvalue(), ACL='public-read') except: raise else: self.image = 'http://s3-{}.amazonaws.com/{}/{}'.format(region, bucket.name, key) self.save()
def trim(im, color): #crop based on the binary image to zoom into the largest area bg = Image.new(im.mode, im.size, 0) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return color.crop(bbox) #actually returns the cropped image color, not im
def apply_edge_detector(input_image): # Apply median filter with 3x3 window median_image = input_image.filter( ImageFilter.MedianFilter(3)) # Apply 3x3 center difference filters # Apply for X kernel_x = (0, 0, 0, 1, 0, -1, 0, 0, 0) kernelx_image = median_image.filter( ImageFilter.Kernel((3, 3), kernel_x, scale=2)) # Apply for Y kernel_y = (0, 1, 0, 0, 0, 0, 0, -1, 0) kernely_image = median_image.filter( ImageFilter.Kernel((3, 3), kernel_y, scale=2)) # Sum the pixels of X and Y merged = ImageChops.add( kernelx_image, kernely_image) # Thresholding merged = merged.point(lambda x: 0 if x < 10 else 255) return merged
def trim(im): """Trim whitespace from image.""" bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def bounding_box(im, tolerance=0): """ A bounding box algorithm that has some tolerance for fuzziness. """ bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -tolerance) return diff.getbbox()
def trim_img(imageG): # Crop Image image_diff = Image.new(imageG.mode, imageG.size, imageG.getpixel((0,0))) diff = ImageChops.difference(imageG, image_diff) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return imageG.crop(bbox)
def trim(im): '''Trim borders of a picture automatically''' bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 1.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def preprocessing(filename): im = Image.open(filename) bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def _trim(im): px = im.getpixel((0,0)) bg = Image.new(im.mode, im.size, px) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def multiply_image(img, file, amount=1.0): path = "%s%s" % (filter_path, file) filterImg = Image.open(path) if (amount != 1.0): filterImg = ImageChops.add(filterImg, ImageChops.constant(filterImg, (1-amount)*255).convert("RGB")) return ImageChops.multiply(filterImg, img)
def neImage(image1, image2): """Returns a boolean image that is True where image1 != image2.""" resultImage = Image.new('1', image1.size) diffImage = ImageChops.difference(image1, image2).point(lambda x: x != 0 and 255) bands = diffImage.split() for band in bands: resultImage = ImageChops.add(resultImage, band) return resultImage
def trim(image): background = Image.new(image.mode, image.size, image.getpixel((0,0))) difference = ImageChops.difference(image, background) difference = ImageChops.add(difference, difference, 2.0, -100) bbox = difference.getbbox() if bbox: return image.crop(bbox)
def trim(self): bg = Image.new(self.image.mode, self.image.size, self.image.getpixel((0, 0))) diff = ImageChops.difference(self.image, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return self.image.crop(bbox) else: return self.image
def removeFrame(imgArray): bg = Image.new(imgArray.mode, imgArray.size, imgArray.getpixel((0, 0))) diff = ImageChops.difference(imgArray, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return imgArray.crop(bbox) else: return imgArray
def RoundCorner(image, radius): """ Generate the rounded corner image for orgimage. """ image = image.convert('RGBA') #generate the mask image mask = Image.new('RGBA', image.size, (0,0,0,0)) draw = aggdraw.Draw(mask) brush = aggdraw.Brush('black') width, height = mask.size draw.rectangle((0, 0, mask.size[0], mask.size[1]), aggdraw.Brush('white')) #north-west corner draw.pieslice((0,0,radius*2,radius*2), 90, 180, None, brush) #north-east corner draw.pieslice((width-radius*2, 0, width, radius*2), 0, 90, None, brush) #south-west corner draw.pieslice((0, height-radius*2, radius*2, height), 180, 270, None, brush) #south-east corner draw.pieslice((width-radius*2, height-radius*2, width, height), 270, 360, None, brush) #center rectangle draw.rectangle((radius, radius, width-radius, height-radius), brush) #four edge rectangle draw.rectangle((radius, 0, width-radius, radius), brush) draw.rectangle((0, radius, radius, height-radius), brush) draw.rectangle((radius, height-radius, width-radius, height), brush) draw.rectangle((width-radius, radius, width, height-radius), brush) draw.flush() del draw return ImageChops.add(mask, image)
def trim(im): bg = Image.new(im.mode, im.size, im.getpixel((19,19))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 1.0, 0) bbox = diff.getbbox() if bbox: return im.crop((0,0,max(bbox[2],5),max(bbox[3],8))) else: return im.crop((0,0,5,8))
def do_add(self): """usage: add <image:pic1> <image:pic2> <int:offset> <float:scale> Pop the two top images, produce the scaled sum with offset. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() scale = float(self.do_pop()) offset = int(self.do_pop()) self.push(ImageChops.add(image1, image2, scale, offset))
def trim(im): pixel = (255, 255, 255) # ориентируемся на белый цвет # pixel = im.getpixel((0,0)) # ориентируемся на пиксель с левого верхнего края bg = Image.new(im.mode, im.size, pixel) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() logger.info(bbox) if bbox: return im.crop(bbox) else: return im
def black_or_b(diff_image, image, reference, opacity=0.85): """Copied from https://stackoverflow.com/a/30307875 """ thresholded_diff = diff_image for _ in range(3): thresholded_diff = ImageChops.add(thresholded_diff, thresholded_diff) size = diff_image.size mask = new_gray(size, int(255 * (opacity))) shade = new_gray(size, 0) new = reference.copy() new.paste(shade, mask=mask) new.paste(image, mask=thresholded_diff) return new
def trim(img): """removes border around image""" img = img + 1 im = Image.fromarray(img) bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return np.array(im.crop(bbox)) else: return np.array(im)
def gentext(s, h): fn = "../../.fonts/IBMPlexSans-SemiBold.otf" fn = "../../.fonts/Arista-Pro-Alternate-Light-trial.ttf" font = ImageFont.truetype(fn, h) im = Image.new("L", (8000, 1000)) draw = ImageDraw.Draw(im) draw.text((1, 1), s, font=font, fill=255) return im.crop(im.getbbox()) glow = im.filter(ImageFilter.GaussianBlur(10)) im = ImageChops.add(im, glow) return im.crop(im.getbbox())
def trim(self, im): '''removes a border around an image ''' bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) # TODO: Works not with the US format bbox = diff.getbbox() if bbox: bbox = tuple([bbox[0] - 2, bbox[1] - 2, bbox[2] + 2, bbox[3] + 2]) return im.crop(bbox) else: return im
def glowText(img, text=None, font_size=35, font_set=None, alpha=0.5, blur=2, logo=None): width, height = img.size ratio = 2 width = width * ratio height = height * ratio font_size = font_size * ratio blur = blur * ratio if font_set is None: _font = ImageFont.truetype("Arial.ttf", font_size) else: _font = ImageFont.truetype(font_set, font_size) canvas = Image.new('RGBA', (width, height), (0, 0, 0, 0)) draw = ImageDraw.Draw(canvas) if text: w, h = draw.textsize(text, font=_font) else: w, h = 0, 0 xoffset = 0 if logo is not None: lg_w, lg_h = logo.size hoffset = 1 lg_nh = round(font_size * hoffset) lg_nw = round(lg_w * lg_nh / lg_h) logo = logo.resize((lg_nw, lg_nh), Image.ANTIALIAS) xoffset = lg_nw + font_size / 4 w = w + xoffset try: canvas.paste(logo, (round( (width - w) / 2), round(height - 2 * font_size)), logo) except: canvas.paste(logo, (round( (width - w) / 2), round(height - 2 * font_size))) if text: draw.text(((width - w) / 2 + xoffset, height - 2 * font_size), text, fill=(255, 255, 255, 255), font=_font) canvas_blur = canvas.filter(ImageFilter.GaussianBlur(radius=blur)) canvas = ImageChops.add(canvas, canvas_blur) canvas = canvas.resize(img.size, Image.ANTIALIAS) paste_mask = canvas.split()[-1].point(lambda i: i * alpha) img.paste(canvas, (0, 0), mask=paste_mask) return img
def trim(im): bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add( diff, diff, 2.0, -100 ) # Adds two images, dividing the result by scale and adding the offset. bbox = diff.getbbox( ) # Calculates the bounding box of the non-zero regions in the image. if bbox: return im.crop( bbox ) # argument is the crop rectangle, as a (left, upper, right, lower)-tuple.
def hr_addimage(self,A,B,C,problem): a = ImageChops.invert(Image.open(A.visualFilename)) b = ImageChops.invert(Image.open(B.visualFilename)) c = Image.open(C.visualFilename) c1 = ImageChops.invert(ImageChops.add(a,b)) ch = c.histogram() c1h = c1.histogram() rms = np.sqrt(reduce(operator.add,map(lambda a,b: (a-b)**2, ch, c1h))/len(ch)) if rms<=15.0: return 1 else: return 0
def trim(im): """ https://stackoverflow.com/questions/10615901/trim-whitespace-using-pil """ bg = Image.new(im.mode, im.size, im.getpixel((2, 2))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 0.0, 0) bbox = diff.getbbox() if bbox: return im.crop(bbox) else: return im
def get_thumbnail0(img): bg = Image.new(img.mode, img.size, img.getpixel((0, 0))) diff = ImageChops.difference(img, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: img = img.crop(bbox) else: return get_thumbnail(img) size = (128, 128) img.thumbnail(size) return img
def spot_light(pil_img): w, h = pil_img.size effect_img = np.zeros((h, w, 3)) scale_w = random.choice([5, 6, 7, 8, 9]) scale_h = random.choice([5, 6, 7, 8, 9]) x = random.choice(range(w // scale_w, w - w // scale_w)) y = random.choice(range(h // scale_h, h - h // scale_h)) light = random.choice(range(128, 220)) effect_img[y - h // scale_h:y + h // scale_h, x - w // scale_w:x + w // scale_w] = light effect_img = PIL.Image.fromarray(effect_img.astype(np.uint8)) return ImageChops.add(pil_img, effect_img)
def trim_borders(im): ''' Trim image borders. From: http://stackoverflow.com/questions/10615901/trim-whitespace-using-pil ''' bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 1.1, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox) return im
def auto_crop(self): bg = PilImage.new( self._image.mode, self._image.size, self._image.getpixel((1, 1)) ) diff = ImageChops.difference(self._image, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() diff.close() self._image = self._image.crop(bbox) return self
def trims(im): #Crea una nueva imagen bg = Image.new(im.mode, im.size, im.getpixel((1, 1))) #Regresa el valor absoluto de la diferencia pixel x pixel entre dos imagenes diff = ImageChops.difference(im, bg) #Aniade dos imagenes, dividiendo el resultado por escala y agregando el offset diff = ImageChops.add(diff, diff, 2.0, -100) #Corta los bordes bbox = diff.getbbox() if bbox: #Recorta la imagen (devuelve una region rectangular) return im.crop(bbox)
def render(self, process_count, occlusion=False, path=False): # Call ray trace for each pixel in image cam = self.scene.cam cam_to_screen = np.linalg.norm(cam.cam_to - cam.cam_from) screen_width = 2 * math.tan(cam.fov / 2) * cam_to_screen screen_height = screen_width * cam.height / cam.width increment = screen_width / cam.width top_left = cam.cam_to - ((screen_width / 2) * cam.u) - ( (screen_height / 2) * cam.v) ranges = split_range(cam.width, process_count) processes = [] for i, (start, end) in enumerate(ranges): im = Image.new('RGB', (cam.width, cam.height)) o_im = Image.new('RGB', (cam.width, cam.height)) if occlusion else None process = Process(target=self.render_part, args=(cam, top_left, increment, start, end, im, o_im, path, 'tmp' + str(i) + '.png')) processes.append(process) process.start() for process in processes: process.join() im = Image.new('RGB', (cam.width, cam.height)) for i in range(process_count): im_tmp = Image.open('tmp' + str(i) + '.png') im = ImageChops.add(im, im_tmp) os.remove('tmp' + str(i) + '.png') o_im, f_im = None, None if occlusion: o_im = Image.new('RGB', (cam.width, cam.height)) for i in range(process_count): o_im_tmp = Image.open('otmp' + str(i) + '.png') o_im = ImageChops.add(o_im, o_im_tmp) os.remove('otmp' + str(i) + '.png') f_im = ImageChops.add( im, Image.fromarray(np.uint8(self.k_a * np.array(o_im)))) return im, o_im, f_im
def test_sanity(self): im = hopper("L") ImageChops.constant(im, 128) ImageChops.duplicate(im) ImageChops.invert(im) ImageChops.lighter(im, im) ImageChops.darker(im, im) ImageChops.difference(im, im) ImageChops.multiply(im, im) ImageChops.screen(im, im) ImageChops.add(im, im) ImageChops.add(im, im, 2.0) ImageChops.add(im, im, 2.0, 128) ImageChops.subtract(im, im) ImageChops.subtract(im, im, 2.0) ImageChops.subtract(im, im, 2.0, 128) ImageChops.add_modulo(im, im) ImageChops.subtract_modulo(im, im) ImageChops.blend(im, im, 0.5) ImageChops.composite(im, im, im) ImageChops.offset(im, 10) ImageChops.offset(im, 10, 20)
def __call__(self, img): if random.random() < 0.9: img = img.convert("HSV") zero = np.zeros((self.H, self.W, 3)) a = random.random() * 30 zero[:, :, 0] += a zero = Image.fromarray(zero.astype('uint8')) if random.random() < 0.5: img = ImageChops.add(img, zero) else: img = ImageChops.subtract(img, zero) img = img.convert("RGB") return img
def widespace_remover(self, image): ''' Remove widespace before and after text return: cropped image ''' im = self.checkImageOrObject(image) if im: bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def trim(im): bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() lenth=bbox[2]-bbox[0] high=bbox[3]-bbox[1] len=max([lenth,high]) if len < 16: len=16 nbox=(bbox[0],bbox[1],bbox[0]+len,bbox[1]+len) if nbox: return im.crop(nbox)
def trim(img): """ :param img: :return: """ border = Image.new(img.mode, img.size, img.getpixel((0, 0))) diff = ImageChops.difference(img, border) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: img = img.crop(bbox) return img
def trim(im): ''' Trims a uniform background around the mesh to be close to the corners so we can infer the xmax,xmin, ymax, ymin from the geometry of the mesh as the image boundaries. It is also located outside of a class so that it can be used in the multiprocess package to take advantage of multi-core processing. ''' bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def trim(im): bg = Image.new(im.mode, im.size, 'white') diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -55) bbox = diff.getbbox() if bbox: a = max(0, bbox[0] - 20) b = max(0, bbox[1] - 20) c = min(im.size[0], bbox[2] + 20) d = min(im.size[1], bbox[3] + 20) bbox = (a, b, c, d) return im.crop(bbox) return im
def resize_trim_PIL(input_PIL, width, height, border): # resize image_output = scale(input_PIL, [width, height]) image_output = ImageOps.expand(image_output, border=border, fill='white') # trim bg = Image.new(image_output.mode, image_output.size, image_output.getpixel((0, 0))) diff = ImageChops.difference(image_output, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() image_output = image_output.crop(bbox) return image_output
def trim(im): try: img = Image.open("./input/" + im) bg = Image.new(img.mode, img.size, img.getpixel((0, 0))) diff = ImageChops.difference(img, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() os.remove("./input/" + im) return img.crop(bbox) except: print("kan " + str(im) + " niet doen")
def trim(im): ''' This function trims the white space of an image :param im: image :return: trimmed image ''' bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def agfa(img): img = multiply_image(img, "noise.png") img = apply_vignette(img, 0.75) img = contrast(img) img.load() r,g,b = img.split() g = brightness(g, 1.2) b = brightness(b, 1.2) r = brightness(r, 0.7) r = ImageChops.add(r, ImageChops.constant(r, 0.3*255)) img = Image.merge("RGB", (r,g,b)) img = color(img, 0.7) return img
def trim(path): im = Image.open(path) bg = Image.new(im.mode, im.size, (255, 255, 255)) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) # Bounding box given as a 4-tuple defining the left, upper, right, and # lower pixel coordinates. # If the image is completely empty, this method returns None. bbox = diff.getbbox() if bbox: return im.crop(bbox) else: return im
def trim(im): """Trim black borders from an image :param im: :type im: Image :return: """ bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def compositeChromeKey(img1, img2, bg=None): # merge img1 over img2 by using a transparent mask, which generated by chrome key # if bg is valid, we merge img2 and bg firstly img2_ch = splitImageChannels('F', img2) if bg: # get chrome key of img2 to merge with bg bg_ch = splitImageChannels('F', bg) if img2_ch[3] and bg_ch[3]: alpha = ImageChops.add(img2_ch[3].convert('L'), bg_ch[3].convert('L')) img2_mask = chromeKeyBluescreen(img2_ch[0], img2_ch[1], img2_ch[2]) img2 = Image.composite(img2, bg, img2_mask.convert( 'L')) # Image.composite doesn't works with 'F' mode mask img2.putalpha(alpha) img1_ch = splitImageChannels('F', img1) img1_mask = chromeKeyBluescreen(img1_ch[0], img1_ch[1], img1_ch[2]) out = Image.composite(img1, img2, img1_mask.convert('L')) if img2_ch[3] and img1_ch[3]: alpha = ImageChops.add(img1_ch[3].convert('L'), img2_ch[3].convert('L')) out.putalpha(alpha) return out