def zealous_crop(page_groups): # Zealous crop all of the pages. Vertical margins can be cropped # however, but be sure to crop all pages the same horizontally. for idx in (0, 1): # min horizontal extremes minx = None maxx = None width = None for grp in page_groups: for pdf in grp[idx].values(): bbox = ImageOps.invert(pdf.convert("L")).getbbox() if bbox is None: continue # empty minx = min(bbox[0], minx) if minx is not None else bbox[0] maxx = max(bbox[2], maxx) if maxx is not None else bbox[2] width = max(width, pdf.size[0]) if width is not None else pdf.size[0] if width != None: minx = max(0, minx-int(.02*width)) # add back some margins maxx = min(width, maxx+int(.02*width)) # do crop for pg in grp[idx]: im = grp[idx][pg] bbox = ImageOps.invert(im.convert("L")).getbbox() # .invert() requires a grayscale image if bbox is None: bbox = [0, 0, im.size[0], im.size[1]] # empty page vpad = int(.02*im.size[1]) im = im.crop( (0, max(0, bbox[1]-vpad), im.size[0], min(im.size[1], bbox[3]+vpad) ) ) if os.environ.get("HORZCROP", "1") != "0": im = im.crop( (minx, 0, maxx, im.size[1]) ) grp[idx][pg] = im
def strip_processing(path, blur_radius=7, iter_steps=1, binarize_threshold=10): """Strip precessing function. Inputs: string: path of the image blur_radius: radius of gaussian blur iter_steps: execution times of denoise binarize_threshold: used as function lut's input Outputs: Image: the processed image.""" gray=ImageOps.invert(ImageOps.grayscale(Image.open("stripes.jpg"))) gray_blur=gray.filter(ImageFilter.GaussianBlur(blur_radius)) high_freq=ImageMath.eval("25*(a-b)",a=gray,b=gray_blur).convert("L") binarized=ImageOps.invert(high_freq.point(lut(binarize_threshold))).convert("1") binarized_data=binarized.load() #binarized.show() height=binarized.height width=binarized.width pix=np.array([[np.uint8(binarized_data[j,i]) for j in range(width)] for i in range(height)]) for i in range(iter_steps): pix=denoise(pix) answer=Image.fromarray(pix) #answer.show() #answer.save("test.bmp") return answer
def make_inverted(self, image): if image == 1: self.img1.image = ImageOps.invert(self.img1.image) elif image == 2: self.img2.image = ImageOps.invert(self.img2.image) elif image == 3: self.img3.image = ImageOps.invert(self.img3.image)
def invertimage(image): if image.mode == 'RGBA': r,g,b,a = image.split() rgb_image = Image.merge('RGB', (r,g,b)) inverted_image = ImageOps.invert(rgb_image) r2,g2,b2 = inverted_image.split() final_image = Image.merge('RGBA', (r2,g2,b2,a)) else: final_image = ImageOps.invert(image) return final_image
def get_RGB_split_inverted(image): if not is_RGB(image): raise Exception("Not RGB mode") image_r, image_g, image_b = image.split() image_rn = ImageOps.invert(image_r) image_gn = ImageOps.invert(image_g) image_bn = ImageOps.invert(image_b) return image_rn, image_gn, image_bn
def threshold(img, thresh): """Threshold an image""" pilIMG1 = Image.fromarray(img); pilInverted1 = ImageOps.invert(pilIMG1); inverted = numpy.asarray(pilInverted1); r, t = cv2.threshold(inverted, thresh, 0, type=cv.CV_THRESH_TOZERO); pilIMG2 = Image.fromarray(t); pilInverted2 = ImageOps.invert(pilIMG2); thresholded = numpy.asarray(pilInverted2); return thresholded;
def overlay_im_orig(images): if not images: return None min_im = None max_im = None for im in images: min_im = im if min_im == None else ImageChops.darker(im, min_im) max_im = im if max_im == None else ImageChops.lighter(im, max_im) overlay = Image.new("RGBA", images[0].size, (255,255,255,255)) overlay.paste(Image.new("RGB", images[0].size, COLOR1), None, ImageOps.invert(min_im)) overlay.paste(Image.new("RGB", images[0].size, COLOR2), None, ImageOps.invert(max_im)) return overlay
def make_all_night_image(day): # Normalize to midnight day_start = datetime(day.year, day.month, day.day, tzinfo=est) # Find that day's sunset obs = get_observer() sunset_time = sunset(obs, day_start) # And next day's sunrise sunrise_time = sunrise(obs, sunset_time) # One hour after and before to remove all light start_time = normalize_time(sunset_time + timedelta(hours=1)) end_time = normalize_time(sunrise_time - timedelta(hours=1)) normals = Normal.objects.filter(timestamp__gte=start_time, timestamp__lte=end_time, info__isnull=False) img_light = None for normal in normals: source = Image.open(normal.info.filepath) # Make light image if img_light is None: img_light = source else: img_light = ImageChops.lighter(img_light, source) # Put a date on the image daystr = day.strftime('%Y-%m-%d') canvas = ImageDraw.Draw(img_light) canvas.text((20,1500), daystr) # And save filename = daystr + '.png' filename_neg = daystr + '_neg.png' imagepath = os.path.join(TIMELAPSE_DIR, APP_DIR, 'allnight') # Make directory if it doesn't exist if not os.path.exists(imagepath): os.makedirs(imagepath) img_filepath = os.path.join(imagepath, filename) img_light.save(img_filepath) image_record = get_image_product(day_start, start_time, end_time, Product.ALLNIGHT, imagepath, filename) record_size(img_filepath, image_record) img_neg_filepath = os.path.join(imagepath, filename_neg) ImageOps.invert(img_light).save(img_neg_filepath) image_record_neg = get_image_product(day_start, start_time, end_time, Product.ALLNIGHT_NEG, imagepath, filename_neg) record_size(img_neg_filepath, image_record_neg)
def find_image_centroid(target_image, colour_threshold=166, use_mask=False, use_mask_gradient=False, verbose=False): if verbose: print('Converting image to greyscale, and detecting edges...') edge_detected_image = ImageOps.invert(target_image.copy().filter(ImageFilter.FIND_EDGES)) # If we're using a mask (to try to direct detection to the edges of the image) if use_mask == True: # Gradient mask affects detection less, but is more "fair". if use_mask_gradient == True: mask = Image.new('L', (1,511)) for y in range(511): mask.putpixel((0, 510-y), 254-int(fabs(254-y))) mask = mask.resize(target_image.size, Image.NEAREST) # Our normal mask is just a rectangle taking up twice the difference between the two image sizes elif use_mask_gradient == False: mask = Image.new('L', target_image.size) drawmask = ImageDraw.Draw(mask) drawmask.rectangle( [ (fabs(target_image.size[0] - target_width)*2, fabs(target_image.size[1] - target_height)*2), (edge_detected_image.size[0] - fabs(target_image.size[0] - target_width)*2, edge_detected_image.size[1] - fabs(target_image.size[1] - target_height)*2) ], fill=(255) ) del drawmask # Overlaying the mask draw = ImageDraw.Draw(edge_detected_image) draw.bitmap([0,0], mask) del draw # Convert to greyscale, and apply a hard threshold to weed out less stark contrast edge_detected_image = ImageOps.invert(edge_detected_image).convert("L").point(lambda i: i > colour_threshold and 255 or 0) # Centroid logic contributed by Ben Stewart if verbose: print('Finding centroid...') # Sum of colours in image image_sum = sum([sum(row) for row in asarray(edge_detected_image)]) weighted_rows = sum([y * sum(row) for y, row in enumerate(asarray(edge_detected_image))]) centroid_y = weighted_rows / image_sum weighted_cols = sum([sum([x * val for x, val in enumerate(row)]) for row in asarray(edge_detected_image)]) centroid_x = weighted_cols / image_sum return (centroid_x, centroid_y)
def stitchedReverse(self, inputImageName, lookupImageNames): matches = [] a = Image.open(self.getImagePathByName('A')).convert(mode='L', dither=Image.NONE) originalWidth, originalHeight = a.size halfWidth = originalWidth / 2 left = 0 top = 0 right = halfWidth bottom = originalHeight leftHalf = a.crop((left, top, right, bottom)) left = right right = originalWidth rightHalf = a.crop((left, top, right, bottom)) stitched = Image.new('RGB', (originalWidth, originalHeight)) stitched.paste(im=rightHalf, box=(0, 0)) stitched.paste(im=leftHalf, box=(halfWidth, 0)) stitched = stitched.convert(mode='L', dither=Image.NONE) for problemImageName in lookupImageNames: x = Image.open(self.getImagePathByName(problemImageName)).convert(mode='L', dither=Image.NONE) stitched1 = ImageOps.invert(stitched) try: p1, q1, r1, s1 = stitched1.getbbox() a1 = abs(p1 - r1) * abs(r1 - s1) x1 = ImageOps.invert(x) try: p2, q2, r2, s2 = x1.getbbox() a2 = abs(p2 - r2) * abs(r2 - s2) p = q = r = s = None if a1 <= a2: p, q, r, s = p1, q1, r1, s1 else: p, q, r, s = p2, q2, r2, s2 stitched1 = stitched.crop((p, q, r, s)) x1 = x.crop((p, q, r, s)) diff = self.getImageDifference(stitched1, x1) log.info('{} diff: {}'.format(problemImageName, diff)) matches.append([problemImageName, diff]) except TypeError: # empty image pass except TypeError: # empty image pass matches.sort(lambda x, y: cmp(x[1], y[1])) return matches pass
def main(): im = Image.open('3.png') new_im = normalize_image(im) new_im.save('9_new.png') for path in glob.glob('Photo*.jpg'): user_im = Image.open(path) user_im = ImageOps.invert(user_im) user_im = user_im.crop(user_im.getbbox()) user_im = ImageOps.invert(user_im) user_im = normalize_image(user_im) if correct_pixels_ratio(new_im, user_im) > 0.1: user_im.save('new_' + path)
def add_distortion(img, rotation=30, jitter=10, resize=0.05): dx = random.randint(-jitter, +jitter) dy = random.randint(-jitter, +jitter) dr = random.uniform(-rotation, +rotation) ds = random.uniform(1.0 - resize, 1.0 + resize) width, height = img.width, img.height img = ImageOps.invert(img) img = img.rotate(dr) img = ImageChops.offset(img, dx, dy) img = ImageOps.invert(img) return img
def test_sanity(): ImageOps.autocontrast(lena("L")) ImageOps.autocontrast(lena("RGB")) ImageOps.autocontrast(lena("L"), cutoff=10) ImageOps.autocontrast(lena("L"), ignore=[0, 255]) ImageOps.colorize(lena("L"), (0, 0, 0), (255, 255, 255)) ImageOps.colorize(lena("L"), "black", "white") ImageOps.crop(lena("L"), 1) ImageOps.crop(lena("RGB"), 1) ImageOps.deform(lena("L"), deformer) ImageOps.deform(lena("RGB"), deformer) ImageOps.equalize(lena("L")) ImageOps.equalize(lena("RGB")) ImageOps.expand(lena("L"), 1) ImageOps.expand(lena("RGB"), 1) ImageOps.expand(lena("L"), 2, "blue") ImageOps.expand(lena("RGB"), 2, "blue") ImageOps.fit(lena("L"), (128, 128)) ImageOps.fit(lena("RGB"), (128, 128)) ImageOps.fit(lena("RGB").resize((1, 1)), (35, 35)) ImageOps.flip(lena("L")) ImageOps.flip(lena("RGB")) ImageOps.grayscale(lena("L")) ImageOps.grayscale(lena("RGB")) ImageOps.invert(lena("L")) ImageOps.invert(lena("RGB")) ImageOps.mirror(lena("L")) ImageOps.mirror(lena("RGB")) ImageOps.posterize(lena("L"), 4) ImageOps.posterize(lena("RGB"), 4) ImageOps.solarize(lena("L")) ImageOps.solarize(lena("RGB")) success()
def doImage(path,rot): try: from PIL import Image, ImageOps except: err("please install Python Image Library") return try: img = Image.open(path) except: err("file "+path+" not found!") return # convert to greyscale, invert img=ImageOps.invert(img.convert('L')) if rot: img=img.rotate(90) # figure out the desired W and H if img.size[0]<=384: # if the image is less than the max, # round the width up to a multible of 8 img=img.crop((0,0,int(ceil(img.size[0]/8.)*8),img.size[1])) else: # if the image is larger than the max, # scale it down to the max img=img.resize((384,img.size[1]*384/img.size[0])) # if verbose, show the image. if myIO.verbose: ImageOps.invert(img).convert('1').show() # stringify imgStr=img.convert('1').tostring() info(path+' '+str(img.size)) # (GS v) devSend('\x1D\x76\0\0'+twoBytes(img.size[0]/8)+twoBytes(img.size[1])+imgStr) # I think we should wait for the data to send/print time.sleep(6)
def startRecognition( infile, rotation=False, z=0 ): global newImage prefix = infile[:8] im = Image.open(infile) if rotation: im = im.rotate(rotation) width, height = im.size resized = im.resize( ( width/25, height/30 ), Image.BICUBIC ) resized = ImageOps.invert(resized) imgData = barImage( resized ) foundBarcode, newImage, newBar = False, False, False for probable_barcode in imgData.findBarcode(): z+=1 # Try the first 20 barcodes, and see if one of them is legit. if foundBarcode: continue try: x1, y1 = (probable_barcode[1]-3)*25, (probable_barcode[2]) * 30 x2, y2 = x1+635, y1+265 if x2 > im.size[0] or y2 > im.size[1]: x2,y2 = im.size[0], im.size[1] x1,y1 = im.size[0]-800, im.size[1]-265 newImage = im.crop((x1,y1,x2,y2)) newBar = barImage(newImage) foundBarcode = straightenBarcode ( newBar, "Hard", prefix=prefix ) if DEBUG and not foundBarcode: smoo = im.crop( (x1,y1,x2,y2) ) smoo.save("%s.fail.%03d.barcode.png" % (prefix, z) ) print "Z: ", z except: foundBarcode = False raise if foundBarcode: log.info("Found barcode for %s." % prefix ) newImage.putdata(newBar.im.reshape(newBar.stride*newBar.height)) newImage = ImageOps.invert(newImage) newImage.save("%s.barcode.pre.png" % prefix ) try: (x1, y1),(x2,y2) = newBar.findBarcodeLimits("Hard") doPostBarcodeAnalysis(newImage.crop((x1-40,y1+1,x1+520,y1+90)), prefix ) except: pass elif not rotation: startRecognition( infile, rotation=90, z=z ) else: log.info("No barcode found for %s.", prefix)
def main(): if not os.path.exists('emoji'): os.mkdir('emoji') for font, glyphs in fonts.items(): for name, glyph in glyphs.items(): try: colours = glyph.colours font_size = glyph.size glyph = glyph.letter except AttributeError: colours = [Icon.default_colour] font_size = 128 imfont = ImageFont.truetype(font, font_size) for colour_name, colour in colours: im = Image.new("RGBA", (300, 300), (255, 255, 255, 0)) draw = ImageDraw.Draw(im) draw.text((50, 50), glyph, font=imfont, fill=colour) # remove unneccessory whitespaces if needed im = im.crop(ImageOps.invert(im.convert('RGB')).getbbox()) # im = ImageOps.invert(im) im.thumbnail(size, Image.ANTIALIAS) background = Image.new('RGBA', size, (255, 255, 255, 0)) background.paste( im, ((size[0] - im.size[0]) // 2, (size[1] - im.size[1]) // 2)) # write into file background.save(f"emoji/{colour_name}{name}.png")
def update(self, speed): result = None if self.updateFlag: self.im = None self.updateFlag = False self.loadGif(self.gifList[self.gifIndex]) if not self.im == None: if self.newFlag: self.newFlag = False result = self.frame else: currentTime = int(round(time.time() * 1000)) elapsed = currentTime - self.lastTime if elapsed >= self.delay and self.delay > 0: self.lastTime = currentTime self.nextFrame() result = self.frame if result != None: result = result.convert("RGB") if self.invertFlag: result = ImageOps.invert(result) if self.mirrorFlag: result = result.transpose(Image.FLIP_LEFT_RIGHT) result = result.getdata() return result
def segment(image_filepath, **kwargs): input_img = Image.open(image_filepath) # make a '1' bit image with the same size as the input_img mask_img = Image.new('1', input_img.size) # instantiate an ImageDraw object using the mask_img object mask_drawer = ImageDraw.Draw(mask_img) try: json_poly_data = json.loads(kwargs['JSON']) except ValueError: # There's a problem in the JSON - it may be malformed, or empty json_poly_data = [] for polygon in json_poly_data: flattened_poly = [j for i in polygon for j in i] mask_drawer.polygon(flattened_poly, outline=1, fill=1) output_img = ImageMath.eval('b - a', a=input_img, b=mask_img) output_img = ImageOps.invert(output_img.convert('RGB')) encoded = json.dumps(json_poly_data) return { 'tiff': output_img, 'json': encoded }
def _extract_percents(self, image, y_ticks, left_line_x) -> list: percents = [] for tick in y_ticks: # todo 25 here is hardcoded and bug-prone estimation. # In fact we need to look for white strips from left to right. # First such stripe will be border of image and second border of percents. Its right-x coordinate is what we need here box = (25, tick - 5, left_line_x - 3, tick + 5) region = image.crop(box) text = region.crop(ImageOps.invert(region.convert('RGB')).getbbox()) # debug # region.save(str(tick)+'.bmp') word = '' leftmost_unread_pixel = 0 while leftmost_unread_pixel < text.size[0]: x_left = leftmost_unread_pixel x_right = leftmost_unread_pixel + self.tesser.max_width if x_right > text.size[0]: x_right = text.size[0] glyph = Image.new('RGBA', (x_right - x_left, 8), (255, 255, 255)) glyph.paste(text.crop((x_left, 0, x_right, 8))) letter, width = self.tesser.tess(glyph) word += letter leftmost_unread_pixel += width percents.append((float(word), tick)) return sorted(percents)
def createDigitsModel(fontfile, digitheight): ttfont = ImageFont.truetype(fontfile, digitheight) samples = np.empty((0,digitheight*(digitheight/2))) responses = [] for n in range(10): pil_im = Image.new("RGB", (digitheight, digitheight*2)) ImageDraw.Draw(pil_im).text((0, 0), str(n), font=ttfont) pil_im = pil_im.crop(pil_im.getbbox()) pil_im = ImageOps.invert(pil_im) #pil_im.save(str(n) + ".png") # convert to cv image cv_image = cv2.cvtColor(np.array( pil_im ), cv2.COLOR_RGBA2BGRA) gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray,(5,5),0) thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2) roi = cv2.resize(thresh,(digitheight,digitheight/2)) responses.append( n ) sample = roi.reshape((1,digitheight*(digitheight/2))) samples = np.append(samples,sample,0) samples = np.array(samples,np.float32) responses = np.array(responses,np.float32) model = cv2.KNearest() model.train(samples,responses) return model
def __init__(self, img_source): """ Load in an image :param img_source: PIL.Image, or filename to load one from. """ if isinstance(img_source, Image.Image): img_original = img_source else: img_original = Image.open(img_source) # store image for eventual further processing (splitting) self.img_original = img_original # Convert to white RGB background, paste over white background # to strip alpha. img_original = img_original.convert('RGBA') im = Image.new("RGB", img_original.size, (255, 255, 255)) im.paste(img_original, mask=img_original.split()[3]) # Convert down to greyscale im = im.convert("L") # Invert: Only works on 'L' images im = ImageOps.invert(im) # Pure black and white self._im = im.convert("1")
def apply_invert(pixbuf): ''' negative of an image (darkest-->lightest | lightest-->darkest) ''' width,height = pixbuf.get_width(),pixbuf.get_height() y = ImageOps.invert(Image.frombytes(K.ImageConstants.RGB_SHORT_NAME,(width,height),pixbuf.get_pixels() )) return I.fromImageToPixbuf(y)
def invert_and_resize(self,newsize=600): ''' Given a position and uncertainty, create a finding chart from the DSS (Move this to another module) Requires PIL http://pythonmac.org/packages/py25-fat/index.html http://www.p16blog.com/p16/2008/05/appengine-installing-pil-on-os-x-1053.html ''' # SDSS - later http://casjobs.sdss.org/ImgCutoutDR5/getjpeg.aspx?ra=264.191&dec=-25.212&scale=0.8&width=750&height=750 self.img_size = newsize # in pixels try: im1 = Image.open(self.image_path) except: errstring = 'Could not open %s; image may be malformed.' % (self.image_path) raise IOError(errstring) # Invert Colors im1 = ImageOps.invert(im1) # Convert to color mode im1 = im1.convert('RGB') # Resize with a cubic spline - it looks nice. # Other options are NEAREST, BILINEAR, ANTIALIAS im1 = im1.resize((newsize,newsize),Image.BICUBIC) self.save_str = storepath+'FCBase.png' im1.save(self.save_str)
def process_rasters_in_dir(self, rootdir): ''' descends through a TMS tile structure and converts the images to a matrix of dimensions: num_images * width * height, dtype=numpy.uint8 ''' height = self.thumb_size width = self.thumb_size num_images = self.count_rasters_in_dir(rootdir) images = numpy.zeros(num_images * width * height, dtype=numpy.uint8) images = images.reshape(num_images, height, width) index = 0 for folder, subs, files in os.walk(rootdir): for filename in files: if not filename.endswith('.jpg'): continue tile = self.tile_for_folder_and_filename(folder, filename, rootdir) image_filename = os.path.join(folder, filename) with open(image_filename, 'rb') as img_file: with Image.open(img_file) as open_pil_img: pil_image = open_pil_img.convert("L") pil_image = ImageOps.invert(pil_image) image_matrix = numpy.asarray(pil_image, dtype=numpy.uint8) images[index] = image_matrix index += 1 print("Packing {} images to a matrix of size num_images * width * height, dtype=numpy.uint8".format(index)) # Reshape to add a depth dimension return images.reshape(num_images, width, height, 1)
def crop(image): # Invert the image so the bounding box calculation works inverted = ImageOps.invert(image.convert('L')) # Found a bounding box for the non-white parts of the image box = inverted.getbbox() # Return the cropped image and bounding box return image.crop(box), box
def convertImage(self, inputPath, outputPath): r = inputPath path = outputPath for root, dirs, files in os.walk(r): for f in files: if f.endswith('.gif'): if not os.path.exists(path): #Tworzony jest folder wyjsciowy os.makedirs(path) newroot = root.split(r)[1] #Te dwie linijki moga powodowac problemy, dlatego wazne jest, by obrazy do konwersji znajdowaly sie w podfolderach newroot = newroot.split('/')[1] #Podfolder f2 = f.split('.')[0] orgImg = Image.open(root + "/" + f) orgImg = orgImg.convert('L') orgSize = orgImg.size # niech beda tylko kwadratowe obrazy assert orgSize[0] == orgSize[1] assert self.newSize[0] == self.newSize[1] borderSize = int((self.newSize[0] - orgSize[0]) / 2) # dodanie bialej ramki newImg = ImageOps.expand(orgImg, borderSize, 0) # zapisanie wersji bez inwersji newImg.save(path + newroot + f2 + ".pgm") # dodanie wersji z inwersja newImg = ImageOps.invert(newImg) newImg.save(path + newroot + "n" + f2 + ".pgm")
def __call__(self, img_group): if self.scale_worker is not None: img_group = self.scale_worker(img_group) image_w, image_h = img_group[0].size crop_w, crop_h = self.crop_size offsets = GroupMultiScaleCrop.fill_fix_offset(False, image_w, image_h, crop_w, crop_h) oversample_group = list() for o_w, o_h in offsets: normal_group = list() flip_group = list() for i, img in enumerate(img_group): crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) normal_group.append(crop) flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) if img.mode == 'L' and i % 2 == 0: flip_group.append(ImageOps.invert(flip_crop)) else: flip_group.append(flip_crop) oversample_group.extend(normal_group) oversample_group.extend(flip_group) return oversample_group
def filter_contrastToAlpha(image, baseDir): alpha = Image.new('L', image.size, 255) alpha.paste(image, mask=get_alpha(image)) alpha = ImageOps.invert(alpha) alpha = ImageOps.autocontrast(alpha) return Image.merge('LA', [Image.new('L', image.size), alpha])
def tileify(image_data, num_blocks, max_shift): """ Breaks image up into rectangles and shifts them a random distance Areas not covered by the moved rectangles are the negative of the original image """ width = image_data.size[0] height = image_data.size[1] row_block_size = round(height / num_blocks) col_block_size = round(width / num_blocks) negative_image = ImageOps.invert(image_data) pixels = array(image_data) for row in xrange(num_blocks): inner_row_val = int(row_block_size * row) outer_row_val = int(row_block_size * row + row_block_size) for col in xrange(num_blocks): inner_col_val = int(col_block_size * col) outer_col_val = int(col_block_size * col + col_block_size) block = pixels[ inner_row_val:outer_row_val, inner_col_val:outer_col_val ] location = calculate_random_location( inner_row_val, inner_col_val, max_shift ) negative_image.paste(Image.fromarray(block), location) return negative_image
def invert(img): img = Image.fromarray(img) inverted_img = ImageOps.invert(img) inverted_img_array = np.asarray(inverted_img) return inverted_img_array
def invert(img, magnitude): img = ImageOps.invert(img) return img
def invert(img, **__): return ImageOps.invert(img)
import keras import numpy as np from PIL import Image, ImageOps from keras.datasets import mnist from keras.models import Sequential, load_model from keras.layers import Dense, Dropout from keras.optimizers import RMSprop from keras.preprocessing import image print('Loading model...') model = keras.models.load_model('models') img_width, img_height = 28, 28 source = Image.open('./images/0_2.png').convert('L').resize((img_width, img_height), 0) source = ImageOps.invert(source) # pil_image = Image.fromarray(color_coverted) # pil_image = pil_image.convert('L') # pil_image = pil_image.resize((28, 28)) # test_image = image.load_img('./images/5.png', color_mode='grayscale', target_size=(img_width, img_height)) test_image = image.img_to_array(source) test_image = test_image.astype('float32') test_image = test_image.reshape(28, 28) test_image = 255 - test_image test_image /= 255 test_image = test_image.reshape(1, img_width * img_height) print(test_image) # test_image = image.img_to_array(source)
def crop_by_coords(original, coords, inv=False): img = original.crop(coords) if inv: img = ImageOps.invert(img) return img.resize((3 * length(coords), 3 * height(coords)))
os.remove(os.path.join(angle_folder, file)) w = h = args.size b = h / 2 # Height of the triangle A = (w / 2, b) # Tip of the triangle for angle in np.linspace(15, 160, 30): angle_rad = np.deg2rad(angle / 2) name = os.path.join(angle_folder, f'angle{angle:03.0f}-{{}}.{args.filetype}') B = (w / 2 + b * np.tan(angle_rad), 0) C = (w / 2 - b * np.tan(angle_rad), 0) im = Image.new('L', (w, h), color=255) draw = ImageDraw.Draw(im) draw.polygon([C, B, A], fill='black') im.save(name.format('0')) if args.invert: ImageOps.invert(im).save(name.format('0i')) elif args.rotate and args.invert: im = im.rotate(90) im.save(name.format('1')) ImageOps.invert(im).save(name.format('1i')) im = im.rotate(90) im.save(name.format('2')) ImageOps.invert(im).save(name.format('2i')) im = im.rotate(90) im.save(name.format('3')) ImageOps.invert(im).save(name.format('3i')) elif args.rotate: im = im.rotate(90) im.save(name.format('1')) im = im.rotate(90) im.save(name.format('2'))
def invert(image: Image.Image, mode: str = "pillow") -> Image.Image: if 'pil' in mode.lower(): return ImageOps.invert(image.convert('L')).convert('1') else: return Image.fromarray(obj=np.invert(np.array(image)), mode='1')
def main(): args = parse_args() with open(args.font, 'r', encoding='utf-8') as f: font = json.load(f) if args.text == '-': text = sys.stdin.read().rstrip('\n') else: text = args.text if not text: sys.exit('Text is empty.') glyph_lookup = {} for glyph in font['glyphs']: glyph_lookup[glyph['codePoint']] = glyph kerning_lookup = {} for kerning_pair in font['kerningPairs']: cp1 = kerning_pair['codePoint1'] cp2 = kerning_pair['codePoint2'] kerning_lookup[(cp1, cp2)] = kerning_pair['amount'] page_lookup = load_pages_for_text( text, font, os.path.dirname(args.font), glyph_lookup) min_canvas_size = get_image_size_for_text( text, font, glyph_lookup, kerning_lookup) padding_px = round( font['bakingOptions']['fontPxSize'] * args.padding) canvas_size = ( min_canvas_size[0] + padding_px * 2, min_canvas_size[1] + padding_px * 2 ) grayscale_mode = all(p.mode == 'L' for p in page_lookup.values()) if grayscale_mode: canvas_mode = 'L' fill_color = 0 else: canvas_mode = 'RGBA' fill_color = (255, 255, 255, 0) canvas = Image.new(canvas_mode, canvas_size, fill_color) render_text( text, canvas, (padding_px, padding_px), font, glyph_lookup, page_lookup, kerning_lookup) if grayscale_mode: if args.transparent: new_canvas = Image.new('LA', canvas.size, (0, 255)) new_canvas.putalpha(canvas) canvas = new_canvas else: canvas = ImageOps.invert(canvas) elif not args.transparent: bg = Image.new('RGB', canvas.size, (255 ,255, 255)) bg.paste(canvas, mask=canvas.split()[-1]) canvas = bg out_image_path = args.out_image if os.path.splitext(out_image_path)[1].lower() != '.png': out_image_path += '.png' out_image_dir = os.path.dirname(out_image_path) if out_image_dir: os.makedirs(out_image_dir, exist_ok=True) canvas.save(out_image_path, optimize=True)
def __init__(self, mapfile, camera=None, light=None, width=100.0, depth=100.0, height=10.0, divx=0, divy=0, ntiles=1.0, name="", x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0, sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False): """uses standard constructor for Shape Arguments: *mapfile* Greyscale image path/file, string. Keyword arguments: *width, depth, height* Of the map in world units. *divx, divy* Number of divisions into which the map will be divided. to create vertices *ntiles* Number of repeats for tiling the texture image. *smooth* Calculate normals with averaging rather than pointing straight up, slightly faster if false. """ super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz, sx, sy, sz, cx, cy, cz) if VERBOSE: print("Loading height map ...", mapfile) if divx > 200 or divy > 200: print("... Map size can't be bigger than 200x200 divisions") divx = 200 divy = 200 im = Image.open(mapfile) im = ImageOps.invert(im) ix, iy = im.size if (ix > 200 and divx == 0) or (divx > 0): if divx == 0: divx = 200 divy = 200 im = im.resize((divx, divy), Image.ANTIALIAS) ix, iy = im.size if not im.mode == "P": im = im.convert('P', palette=Image.ADAPTIVE) im = im.transpose(Image.FLIP_TOP_BOTTOM) im = im.transpose(Image.FLIP_LEFT_RIGHT) self.pixels = im.load() self.width = width self.depth = depth self.height = height self.ix = ix self.iy = iy self.ttype = GL_TRIANGLE_STRIP if VERBOSE: print("Creating Elevation Map ...", ix, iy) wh = width * 0.5 hh = depth * 0.5 ws = width / ix hs = depth / iy ht = height / 255.0 tx = 1.0 * ntiles / ix ty = 1.0 * ntiles / iy verts = [] norms = [] tex_coords = [] idx = [] for y in xrange(0, iy): for x in xrange(0, ix): hgt = (self.pixels[x, y]) * ht this_x = -wh + x * ws this_z = -hh + y * hs if cubic: """ this is a bit experimental. It tries to make the map either zero or height high. Vertices are moved 'under' adjacent ones if there is a step to make vertical walls. Goes wrong in places - mainly because it doesn't check diagonals """ if hgt > height / 2: hgt = height else: hgt = 0.0 if hgt == 0 and y > 0 and y < iy - 1 and x > 0 and x < ix - 1: if self.pixels[x - 1, y] > 127: this_x = -wh + (x - 1) * ws elif self.pixels[x + 1, y] > 127: this_x = -wh + (x + 1) * ws elif self.pixels[x, y - 1] > 127: this_z = -hh + (y - 1) * hs elif self.pixels[x, y + 1] > 127: this_z = -hh + (y + 1) * hs elif self.pixels[x - 1, y - 1] > 127: this_x = -wh + (x - 1) * ws this_z = -hh + (y - 1) * hs elif self.pixels[x - 1, y + 1] > 127: this_x = -wh + (x - 1) * ws this_z = -hh + (y + 1) * hs elif self.pixels[x + 1, y - 1] > 127: this_x = -wh + (x + 1) * ws this_z = -hh + (y - 1) * hs elif self.pixels[x + 1, y + 1] > 127: this_x = -wh + (x + 1) * ws this_z = -hh + (y + 1) * hs verts.append((this_x, hgt, this_z)) tex_coords.append(((ix - x) * tx, (iy - y) * ty)) s = 0 #create one long triangle_strip by alternating X directions for y in range(0, iy - 1): for x in range(0, ix - 1): i = (y * ix) + x idx.append((i, i + ix, i + ix + 1)) idx.append((i + ix + 1, i + 1, i)) s += 2 self.buf = [] self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))
def __init__(self, Numbers=None, max_Magnitude=None, **kwargs): ''' Custom image data generator. Behaves like ImageDataGenerator, but allows color augmentation. ''' super().__init__(preprocessing_function=self.__call__, **kwargs) self.transforms = ['autocontrast', 'equalize', 'rotate', 'solarize', 'color', 'posterize', 'contrast', 'brightness', 'sharpness', 'shearX', 'shearY', 'translateX', 'translateY'] if Numbers is None: self.Numbers = len(self.transforms) // 2 else: self.Numbers = Numbers if max_Magnitude is None: self.max_Magnitude = 10 else: self.max_Magnitude = max_Magnitude fillcolor = 128 self.ranges = { # these Magnitude range , you must test it yourself , see what will happen after these operation , # it is no need to obey the value in autoaugment.py "shearX": np.linspace(0, 0.3, 10), "shearY": np.linspace(0, 0.3, 10), "translateX": np.linspace(0, 0.2, 10), "translateY": np.linspace(0, 0.2, 10), "rotate": np.linspace(0, 360, 10), "color": np.linspace(0.0, 0.9, 10), "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), "solarize": np.linspace(256, 231, 10), "contrast": np.linspace(0.0, 0.5, 10), "sharpness": np.linspace(0.0, 0.9, 10), "brightness": np.linspace(0.0, 0.3, 10), "autocontrast": [0] * 10, "equalize": [0] * 10, "invert": [0] * 10 } self.func = { "shearX": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), Image.BICUBIC, fill=fillcolor), "shearY": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), Image.BICUBIC, fill=fillcolor), "translateX": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), fill=fillcolor), "translateY": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), fill=fillcolor), "rotate": lambda img, magnitude: self.rotate_with_fill(img, magnitude), # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])), "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( 1 + magnitude * random.choice([-1, 1])), "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( 1 + magnitude * random.choice([-1, 1])), "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( 1 + magnitude * random.choice([-1, 1])), "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), "equalize": lambda img, magnitude: img, "invert": lambda img, magnitude: ImageOps.invert(img) }
life -= ultra_ability_4 life -= ultra_ability_5 first_aibility_damage = first_aibility * 601 / (magic_resistance_true + 601) life -= first_aibility_damage return int(life) os.system('adb shell screencap -p /sdcard/1.png') print("截图完毕") os.system('adb pull /sdcard/1.png .') img = Image.open('C:\\Users\\wkz\\1.png').convert('RGB') inverted_image = ImageOps.invert(img) ability_power_range = np.zeros((6, 4)) enemy_life_range = np.zeros((6, 4)) enemy_magic_resistance_range = np.zeros((6, 4)) level_range = np.zeros((6, 4)) for i in range(1, 6): ability_power_range[i] = np.array([ int(474 * screen_width / 1920), int((133 * i + 210) * screen_height / 1080), int(572 * screen_width / 1920), int((133 * i + 273) * screen_height / 1080) ]) enemy_life_range[i] = np.array([ int(1220 * screen_width / 1920), int((133 * i + 210) * screen_height / 1080),
def invertirImagen(imagen): invertir = ImageOps.invert(imagen) return invertir
import idx2numpy from PIL import Image from PIL import ImageEnhance from PIL import ImageOps coll=array([]) # TBD # 1. Take a file list # 2. Example Data Images # for n in range("""Nimber of Images"""): img=Image.open("""Image Name"""{0}.format(n)).convert("L") #Opening the image in Monochrome version img contr = ImageEnhance.Contrast(img) img = contr.enhance(3) bright = ImageEnhance.Brightness(img) img = bright.enhance(4) #Enhancement of Contrast and Brightness img=ImageOps.invert(img) #Invert Image (White in Black) img=img.resize((28,28)) #Conversion of Resolution to 28 X 28 img arr=array(img) #Conversion of Image to 2D numpy array for i in range(28): for j in range(28): if(arr[i][j]==0): arr[i][j]=1 else: arr[i][j]=255 #Concersion to MNIST Datset format idx2numpy.convert_to_file("""File Name""",arr) #Conversion 2D numpy array to idx2 format
def imgmsg_to_pil(img_msg, rgba=False): try: if img_msg._type == 'sensor_msgs/CompressedImage': pil_img = Image.open(BytesIO(img_msg.data)) if pil_img.mode.startswith('BGR'): pil_img = pil_bgr2rgb(pil_img) pil_mode = 'RGB' else: pil_mode = 'RGB' if img_msg.encoding in ['mono8', '8UC1']: mode = 'L' elif img_msg.encoding == 'rgb8': mode = 'RGB' elif img_msg.encoding == 'bgr8': mode = 'BGR' elif img_msg.encoding in [ 'bayer_rggb8', 'bayer_bggr8', 'bayer_gbrg8', 'bayer_grbg8' ]: mode = 'L' elif img_msg.encoding in [ 'bayer_rggb16', 'bayer_bggr16', 'bayer_gbrg16', 'bayer_grbg16' ]: pil_mode = 'I;16' if img_msg.is_bigendian: mode = 'I;16B' else: mode = 'I;16L' elif img_msg.encoding == 'mono16' or img_msg.encoding == '16UC1': pil_mode = 'F' if img_msg.is_bigendian: mode = 'F;16B' else: mode = 'F;16' elif img_msg.encoding == '32FC1': pil_mode = 'F' if img_msg.is_bigendian: mode = 'F;32BF' else: mode = 'F;32F' elif img_msg.encoding == 'rgba8': mode = 'BGR' elif img_msg.encoding == 'bgra8': mode = 'RGB' else: raise Exception("Unsupported image format: %s" % img_msg.encoding) pil_img = Image.frombuffer(pil_mode, (img_msg.width, img_msg.height), img_msg.data, 'raw', mode, 0, 1) # 16 bits conversion to 8 bits if pil_mode == 'I;16': pil_img = pil_img.convert('I').point(lambda i: i * (1. / 256.)).convert('L') if pil_img.mode == 'F': pil_img = pil_img.point(lambda i: i * (1. / 256.)).convert('L') pil_img = ImageOps.autocontrast(pil_img) pil_img = ImageOps.invert(pil_img) if rgba and pil_img.mode != 'RGBA': pil_img = pil_img.convert('RGBA') return pil_img except Exception as ex: print('Can\'t convert image: %s' % ex, file=sys.stderr) return None
def invert_alpha(self): if self.alpha: self.alpha = ImageOps.invert(self.alpha) self.clear_cache()
def conv_image_to_module(name, scale_factor): module = header % {"name": name.upper()} front_image = Image.open("%s_front.png" % name).transpose(Image.FLIP_TOP_BOTTOM) print("Reading image from \"%s_front.png\"" % name) front_image_red, front_image_green, front_image_blue, front_image_alpha = front_image.split() # Soldermask needs to be inverted front_image_red = ImageOps.invert(front_image_red) front_image_red = Image.composite(front_image_red, front_image_alpha, front_image_alpha) front_image_red = front_image_red.point(lambda i: 0 if i < 127 else 1) red_array = np.array(front_image_red) bmp_red = potrace.Bitmap(red_array) path_red = bmp_red.trace(alphamax = 0.0, opttolerance = 50) # Soldermask needs to be inverted front_image_green = ImageOps.invert(front_image_green) front_image_green = Image.composite(front_image_green, front_image_alpha, front_image_alpha) front_image_green = front_image_green.point(lambda i: 0 if i < 127 else 1) green_array = np.array(front_image_green) bmp_green = potrace.Bitmap(green_array) path_green = bmp_green.trace(alphamax = 0.0, opttolerance = 50) front_image_blue = front_image_blue.point(lambda i: 0 if i < 127 else 1) blue_array = np.array(front_image_blue) bmp_blue = potrace.Bitmap(blue_array) path_blue = bmp_blue.trace(alphamax = 0.0, opttolerance = 50) front_image_alpha = front_image_alpha.point(lambda i: 0 if i < 127 else 1) front_image_alpha_array = np.array(front_image_alpha) bmp_alpha = potrace.Bitmap(front_image_alpha_array) path_alpha = bmp_alpha.trace(alphamax = 0.0, opttolerance = 50) w, h = front_image.size # print("Generating Outline layer from front alpha channel") # module += render_path_to_layer(path_alpha, "line", "20", scale_factor) print("Generating tKeepout layer from front red channel") module += render_path_to_layer(path_red, "poly", "39", scale_factor) print("Generating tStop layer from front green channel") module += render_path_to_layer(path_green, "poly", "29", scale_factor) print("Generating tPlace layer from front blue channel") module += render_path_to_layer(path_blue, "poly", "21", scale_factor) try: back_image = Image.open("%s_back.png" % name).transpose(Image.FLIP_TOP_BOTTOM) back_image = ImageOps.mirror(back_image) print("Reading image from \"%s_back.png\"" % name) back_image_red, back_image_green, back_image_blue, back_image_alpha = back_image.split() back_image_red = back_image_red.point(lambda i: 0 if i < 127 else 1) red_array = np.array(back_image_red) bmp_red = potrace.Bitmap(red_array) path_red = bmp_red.trace(alphamax = 0.0, opttolerance = 50) # Soldermask needs to be inverted back_image_green = ImageOps.invert(back_image_green) back_image_green = back_image_green.point(lambda i: 0 if i < 127 else 1) green_array = np.array(back_image_green) bmp_green = potrace.Bitmap(green_array) path_green = bmp_green.trace(alphamax = 0.0, opttolerance = 50) back_image_blue = back_image_blue.point(lambda i: 0 if i < 127 else 1) blue_array = np.array(back_image_blue) bmp_blue = potrace.Bitmap(blue_array) path_blue = bmp_blue.trace(alphamax = 0.0, opttolerance = 50) print("Generating bKeepout layer from back red channel") module += render_path_to_layer(path_red, "poly", "40", scale_factor) print("Generating bStop layer from back green channel") module += render_path_to_layer(path_green, "poly", "30", scale_factor) print("Generating bPlace layer from back blue channel") module += render_path_to_layer(path_blue, "poly", "22", scale_factor) except IOError: pass module += footer % {"name": name.upper()} return module, (w * 25.4 / scale_factor, h * 25.4 / scale_factor)
def trim(image): inverted = ImageOps.invert(image.convert("RGB")) return image.crop(inverted.getbbox())
def start(): startbutton.config(state='disabled') previewbutton.config(state='disabled') browsebutton.config(state='disabled') mvar = 'Creating wallpaper. Please wait...' messvar.set(mvar) ### CODE ### print('>> Opening image...') img = i.open(filename) origimg = img # Original image to be used later wid, hei = math.ceil(img.size[1]*(16/9)), img.size[1] # Working out 16:9 width for original height # print(f'>> Orig size: {img.size}') # print(f'>> New size: {wid, hei}') mode = 'RGBA' global bkg bkg = i.new(mode, (wid, hei)) # Background if cmvar.get() == 1: # Colour map if invvar.get() == 1: cmblack = cmwvar.get() cmwhite = cmbvar.get() else: cmblack = cmbvar.get() cmwhite = cmwvar.get() img = img.convert('L') # Convert to greyscale img = ImageOps.colorize(img, black=cmblack, white=cmwhite) # colour map ## Matplotlib colour map if plbvar.get() == 0: pass else: print(f'>> Applying {pltvar.get()} colour map...') pltcm = matplotlib.cm.get_cmap(pltvar.get()) # color map img = img.convert('L') img = np.array(img) img = pltcm(img) img = np.uint8(img*255) img = i.fromarray(img) ## Invert colours if icvar.get() == 1: print('>> Inverting image...') img = img.convert('RGB') # print(img.mode) img = ImageOps.invert(img) ## Gaussian Blur blurval = int(blurslider.get()) # Slider value if blurval == 0: # Filter options pass else: print('>> Applying Gaussian Blur...') # blur = hei/()*8 img = img.filter(ImageFilter.GaussianBlur(radius=(blurval*0.1))) # Gaussian blur ## Darken if fopt.get() == 2: # If radio button 2 not chosen # https://stackoverflow.com/questions/43618910/pil-drawing-a-semi-transparent-square-overlay-on-image img = i.eval(img, lambda x: x/1.5) ## Split image def split(pos): # Splits image in 2 and puts top half on the left and bottom on the right imgcent = math.ceil(hei/2) # Image center global fopt if pos == 't': print('>> Creating left background...') s = img.crop(box=(0,0,img.size[0],imgcent)) # Cropping image - 4 tuple: left,top,right,bottom elif pos == 'b': print('>> Creating right background...') s = img.crop(box=(0,imgcent,img.size[0],img.size[1])) sr = s.resize(tuple(z*2 for z in s.size)) # Scaling up by 2 because img was split in two return sr ## Working out where top and bottom splits are to be pasted print('>> Calculating dimensions...') sw = math.ceil((wid-img.size[0])/2) # split width tspos = (math.ceil((sw/2)-img.size[0]),0) # Top split position bspos = (math.ceil((wid-sw/2) - img.size[0]),0) # Bottom split position print('>> Creating background...') bkg.paste(split('t'), box=tspos) bkg.paste(split('b'), box=bspos) bkg.paste(origimg, box=(math.ceil((wid/2)-(img.size[0]/2)), 0)) # Paste original image in the center of new image # Add border if bordvar.get() == 1: bkg = ImageOps.expand(bkg,border=wid//100,fill='black') elif bordvar.get() == 0: pass prevbkg = bkg.resize((800,450)) # Resizing preview image to fit on canvas previmg = ImageTk.PhotoImage(image=prevbkg) # Creating a PhotoImage of image object to load on canvas preview.create_image(0,0, image=previmg, anchor='nw') # Loading photoimage on canvas preview.image = previmg # https://web.archive.org/web/20201111190625id_/http://effbot.org/pyfaq/why-do-my-tkinter-images-not-appear.htm # preview.itemconfig(setprev) # tkinter.Tk() # master.mainloop() # ~ Resetting GUI ~ startbutton.config(state='normal') previewbutton.config(state='normal') browsebutton.config(state='normal') bordbutton.config(state='normal')
def inverse_color(image): if np.random.random() < 0.4: image = ImageOps.invert(image) return image
lambda pil_img, level, _: pil_img.transpose(Image.FLIP_LEFT_RIGHT)) flip_ud = TransformT( 'FlipUD', lambda pil_img, level, _: pil_img.transpose(Image.FLIP_TOP_BOTTOM)) # pylint:disable=g-long-lambda auto_contrast = TransformT( 'AutoContrast', lambda pil_img, level, _: ImageOps.autocontrast( pil_img.convert('RGB')).convert('RGBA')) equalize = TransformT( 'Equalize', lambda pil_img, level, _: ImageOps.equalize( pil_img.convert('RGB')).convert('RGBA')) invert = TransformT( 'Invert', lambda pil_img, level, _: ImageOps.invert( pil_img.convert('RGB')).convert('RGBA')) # pylint:enable=g-long-lambda blur = TransformT( 'Blur', lambda pil_img, level, _: pil_img.filter(ImageFilter.BLUR)) smooth = TransformT( 'Smooth', lambda pil_img, level, _: pil_img.filter(ImageFilter.SMOOTH)) def _rotate_impl(pil_img, level, _): """Rotates `pil_img` from -30 to 30 degrees depending on `level`.""" degrees = int_parameter(level, 30) if random.random() > 0.5: degrees = -degrees return pil_img.rotate(degrees)
import struct from PIL import Image, ImageOps import os target_name = 'ETL9B' for root, dirs, files in os.walk(target_name): for i, file in enumerate(sorted(files)): filepath = os.path.join(root, file) print(file) sum_datasets = 40 sum_words = 3036 record_size = 576 with open(filepath, 'rb') as f: for ds_idx in range(1, sum_datasets * sum_words + 1): f.seek(ds_idx * record_size) s = f.read(record_size) r = struct.unpack('>2H4s504s64x', s) i1 = Image.frombytes('1', (64, 63), r[3], 'raw') img = ImageOps.invert(i1.convert('L')) file_name = '{}_{}_{}_{}_{}.png'.format( target_name, i, r[0], hex(r[1])[2:], ds_idx) dir_name = "./{}_img/{}".format(target_name, hex(r[1])[2:]) os.makedirs(dir_name, exist_ok=True) img.save(os.path.join(dir_name, file_name), 'PNG')
def char_to_picture(text="", font_name="宋体", background_color=(255, 255, 255), text_color=(0, 0, 0), pictrue_size=400, text_position=(0, 0), in_meddium=False, reverse_color=False, smooth_times=0, noise=0): pictrue_shape = (pictrue_size, pictrue_size) im = Image.new("RGB", pictrue_shape, background_color) dr = ImageDraw.Draw(im) # 由于系统内部不是使用汉字文件名,而是英文名,在此转换 if font_name == "宋体": font_name = "SIMSUN.ttc" if font_name == "楷体": font_name = "SIMKAI.ttf" if font_name == "黑体": font_name = "SIMHEI.ttf" if font_name == "等线": font_name = "DENG.ttf" if font_name == "仿宋": font_name = "SIMFANG.ttf" # 取得字体文件的位置 font_dir = "C:\Windows\Fonts\\" + font_name font_size = int(pictrue_size * 0.8 / len(text)) # 设定文字的大小 font = ImageFont.truetype(font_dir, font_size) # 开始绘图 # 如果设置了居中,那么就居中 # 英文字母的对齐方式并不一样 char_dict = [] for i in range(26): char_dict.append(chr(i + ord('a'))) char_dict.append(chr(i + ord('A'))) if in_meddium: char_num = len(text) text_position = (pictrue_shape[0] / 2 - char_num * font_size / 2, pictrue_shape[1] / 2 - font_size / 2) # 中文 if text in char_dict: text_position = (pictrue_shape[0] / 2 - char_num * font_size / 4, pictrue_shape[1] / 2 - font_size / 2) # 英文 # 开始绘制图像 dr.text(text_position, text, font=font, fill=text_color) if reverse_color: im = ImageOps.invert(im) # 随机扰动 if noise > 0: print("adding noise...") im_array = np.array(im) noise_num = noise * pictrue_size for i in range(noise_num): pos = (random.randint(0, pictrue_size - 1), random.randint(0, pictrue_size - 1)) color = [ random.randint(0, 255), random.randint(0, 255), random.randint(0, 255) ] im_array[pos[0], pos[1], :] = color im = Image.fromarray(im_array) # 模糊化图片 ''' for i in range(smooth_times): im =im.filter(ImageFilter.GaussianBlur) ''' im_array = np.array(im) for i in range(smooth_times): im_array = smooth(im_array) im = Image.fromarray(im_array) # 图片经过模糊后略有缩小 im = im.resize(pictrue_shape) print("文字转换图片成功") return im
def invert_color(image): image = image.convert('L') image = ImageOps.invert(image) image = image.convert('1') return image
def __init__(self, dataset, transformations=None, n_classes=8000, n_trans=100, max_elms=10, p=0.5): """ExemplarNet dataset. Args: dataset (torch.utils.data.Dataset): The dataset to train on. transformations (list, optional): Type of elementar transformations to use. n_classes (int, optional): Number of classes, i.e. the subset size of the dataset. Defaults to 8000. n_trans (int, optional): Number of combined transformations. Defaults to 100. max_elms (int, optional): Number of elementar transformations per combined transformation. Defaults to 10. p (float, optional): Prob. of an elmentar transformation to be part of a combined transformation. Defaults to 0.5. """ pool = [ transforms.RandomRotation( # Rotation 30, resample=False, expand=False, center=None, fill=None), transforms.RandomAffine( # Shearing 0, translate=None, scale=None, shear=30, resample=False, fillcolor=0), transforms.RandomAffine( # Translate 0, translate=(0.3, 0.3), scale=None, shear=None, resample=False, fillcolor=0), transforms.Lambda(lambda x: imo.autocontrast(x)), # Autocontrast transforms.Lambda(lambda x: imo.invert(x)), # Invert transforms.Lambda(lambda x: imo.equalize(x)), # Equalize transforms.Lambda(lambda x: imo.solarize(x)), # Solarize transforms.Lambda(lambda x: imo.posterize( x, bits=int(np.random.randint(4, 8) + 1))), # Posterize transforms.Lambda( lambda x: ime.Color(x).enhance(np.random.uniform())), # Color transforms.Lambda(lambda x: ime.Brightness(x).enhance( np.random.uniform())), # Brightness transforms.Lambda(lambda x: ime.Contrast(x).enhance( np.random.uniform())), # Contrast transforms.Lambda(lambda x: ime.Sharpness(x).enhance( np.random.uniform())), # Sharpness transforms.Compose( # Set black [ transforms.ToTensor(), transforms.RandomErasing(1.0), transforms.ToPILImage() ]), transforms.Lambda( lambda x: transforms.functional.to_grayscale( # Grayscale x, num_output_channels=3)), transforms.Lambda(lambda x: elastic_transform(x, sigma=10)) ] # Processes full images and apply random cropping instead of gradient based sampling. indices = torch.randint(len(dataset), (n_classes, )).long() self.dataset = Subset(dataset, indices) self.p = p self.n_trans = n_trans elm_transformations = transformations if transformations is not None else pool self.transformations = [] for _ in range(self.n_trans): transformation = [] for t in range(max_elms): if random.random() < self.p: transformation.append( transforms.RandomChoice(elm_transformations)) self.transformations.append(transforms.Compose(transformation))
def read(location, source): img = source.crop(location) img = RGBtoBW(img) img = ImageOps.invert(img) return img
def updateDisplay(config, pricestack, whichcoin, fiat): symbolstring = currency.symbol(fiat.upper()) if fiat == "jpy": symbolstring = "¥" pricenow = pricestack[-1] currencythumbnail = 'currency/' + whichcoin + '.bmp' tokenimage = Image.open(os.path.join(picdir, currencythumbnail)) sparkbitmap = Image.open(os.path.join(picdir, 'spark.bmp')) pricechange = str("%+d" % round( (pricestack[-1] - pricestack[0]) / pricestack[-1] * 100, 2)) + "%" if pricenow > 1000: pricenowstring = format(int(pricenow), ",") else: pricenowstring = str(float('%.5g' % pricenow)) if config['display']['orientation'] == 0 or config['display'][ 'orientation'] == 180: epd = epd2in7.EPD() epd.Init_4Gray() image = Image.new('L', (epd.width, epd.height), 255) # 255: clear the image with white draw = ImageDraw.Draw(image) draw.text((110, 80), "7day :", font=font_date, fill=0) draw.text((110, 95), pricechange, font=font_date, fill=0) # Print price to 5 significant figures draw.text((15, 200), symbolstring + pricenowstring, font=font, fill=0) draw.text((10, 10), str(time.strftime("%a %H:%M %d %b %Y")), font=font_date, fill=0) image.paste(tokenimage, (10, 25)) image.paste(sparkbitmap, (10, 125)) if config['display']['orientation'] == 180: image = image.rotate(180, expand=True) if config['display']['orientation'] == 90 or config['display'][ 'orientation'] == 270: epd = epd2in7.EPD() epd.Init_4Gray() image = Image.new('L', (epd.height, epd.width), 255) # 255: clear the image with white draw = ImageDraw.Draw(image) draw.text((100, 100), "7day : " + pricechange, font=font_date, fill=0) # Print price to 5 significant figures # draw.text((20,120),symbolstring,font =fonthiddenprice,fill = 0) draw.text((10, 120), symbolstring + pricenowstring, font=fontHorizontal, fill=0) image.paste(sparkbitmap, (80, 50)) image.paste(tokenimage, (0, 0)) draw.text((85, 5), str(time.strftime("%a %H:%M %d %b %Y")), font=font_date, fill=0) if config['display']['orientation'] == 270: image = image.rotate(180, expand=True) # This is a hack to deal with the mirroring that goes on in 4Gray Horizontal image = ImageOps.mirror(image) # If the display is inverted, invert the image usinng ImageOps if config['display']['inverted'] == True: image = ImageOps.invert(image) # Send the image to the screen epd.display_4Gray(epd.getbuffer_4Gray(image)) epd.sleep()
def invert_img(imdir,outdir): im = Image.open(imdir) out_filename = outdir ImageOps.invert(im).save(out_filename, 'JPEG', quality = 100)
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128), magnitude_factor=1): ranges = { "shearX": np.linspace(0, 0.3, 10), "shearY": np.linspace(0, 0.3, 10), "translateX": np.linspace(0, 150 / 331, 10), "translateY": np.linspace(0, 150 / 331, 10), "rotate": np.linspace(0, 30, 10), "color": np.linspace(0.0, 0.9, 10), "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), "solarize": np.linspace(256, 0, 10), "contrast": np.linspace(0.0, 0.9, 10), "sharpness": np.linspace(0.0, 0.9, 10), "brightness": np.linspace(0.0, 0.9, 10), "autocontrast": [0] * 10, "equalize": [0] * 10, "invert": [0] * 10 } # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand def rotate_with_fill(img, magnitude): rot = img.convert("RGBA").rotate(magnitude) return Image.composite(rot, Image.new("RGBA", rot.size, (128, ) * 4), rot).convert(img.mode) func = { "shearX": lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, magnitude * random.choice( [-1, 1]), 0, 0, 1, 0), Image.BICUBIC, fillcolor=fillcolor), "shearY": lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, 0, magnitude * random. choice([-1, 1]), 1, 0), Image.BICUBIC, fillcolor=fillcolor), "translateX": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice( [-1, 1]), 0, 1, 0), fillcolor=fillcolor), "translateY": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random. choice([-1, 1])), fillcolor=fillcolor), "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), "color": lambda img, magnitude: ImageEnhance.Color(img).enhance( 1 + magnitude * random.choice([-1, 1])), "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( 1 + magnitude * random.choice([-1, 1])), "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( 1 + magnitude * random.choice([-1, 1])), "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( 1 + magnitude * random.choice([-1, 1])), "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), "equalize": lambda img, magnitude: ImageOps.equalize(img), "invert": lambda img, magnitude: ImageOps.invert(img) } magnitude_1 = max(0, min(9, int(magnitude_factor * magnitude_idx1))) magnitude_2 = max(0, min(9, int(magnitude_factor * magnitude_idx2))) self.p1 = p1 self.operation1 = func[operation1] self.magnitude1 = ranges[operation1][magnitude_1] self.p2 = p2 self.operation2 = func[operation2] self.magnitude2 = ranges[operation2][magnitude_2]
for l in range(LAYERS): l0 = PARTICLES_PER_LAYER * l l1 = min(PARTICLES_PER_LAYER * (l+1), PARTICLES) pixelLayer = getPixelData(windData, nx, ny, scaledW, scaledH, particleProperties[l0:l1], POINTS_PER_PARTICLE, VELOCITY_MULTIPLIER, MAGNITUDE_RANGE, LINE_WIDTH_RANGE, ALPHA_RANGE, BRIGHTNESS) pixelLayers[:,:,l] = pixelLayer sys.stdout.write('\r') sys.stdout.write("%s%%" % round(1.0*(l+1)/LAYERS*100,1)) sys.stdout.flush() pixels = np.amax(pixelLayers, axis=2) else: pixels = getPixelData(windData, nx, ny, scaledW, scaledH, particleProperties, POINTS_PER_PARTICLE, VELOCITY_MULTIPLIER, MAGNITUDE_RANGE, LINE_WIDTH_RANGE, ALPHA_RANGE, BRIGHTNESS) print("Building image...") # add pixels and invert im = Image.fromarray(pixels, mode="L") im = ImageOps.invert(im) if SMOOTH_FACTOR > 0.0: im = im.resize((contentWidth, contentHeight), Image.LANCZOS) if BLUR_RADIUS > 0: im = im.filter(ImageFilter.GaussianBlur(BLUR_RADIUS)) # crop if necessary if cropToWidth and cropToHeight: im = im.crop((roundInt(OFFSETX), roundInt(OFFSETY), roundInt(OFFSETX+cropToWidth), roundInt(OFFSETY+cropToHeight))) # add margin base = Image.new('L', (roundInt(WIDTH), roundInt(HEIGHT)), 255) cw, ch = im.size base.paste(im, (contentX, contentY, contentX+cw, contentY+ch)) # add label
def invert(img: Image.Image) -> Image.Image: if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") return ImageOps.invert(img)
def test_sanity(self): ImageOps.autocontrast(hopper("L")) ImageOps.autocontrast(hopper("RGB")) ImageOps.autocontrast(hopper("L"), cutoff=10) ImageOps.autocontrast(hopper("L"), ignore=[0, 255]) ImageOps.autocontrast_preserve(hopper("L")) ImageOps.autocontrast_preserve(hopper("RGB")) ImageOps.autocontrast_preserve(hopper("L"), cutoff=10) ImageOps.autocontrast_preserve(hopper("L"), ignore=[0, 255]) ImageOps.colorize(hopper("L"), (0, 0, 0), (255, 255, 255)) ImageOps.colorize(hopper("L"), "black", "white") ImageOps.crop(hopper("L"), 1) ImageOps.crop(hopper("RGB"), 1) ImageOps.deform(hopper("L"), self.deformer) ImageOps.deform(hopper("RGB"), self.deformer) ImageOps.equalize(hopper("L")) ImageOps.equalize(hopper("RGB")) ImageOps.expand(hopper("L"), 1) ImageOps.expand(hopper("RGB"), 1) ImageOps.expand(hopper("L"), 2, "blue") ImageOps.expand(hopper("RGB"), 2, "blue") ImageOps.fit(hopper("L"), (128, 128)) ImageOps.fit(hopper("RGB"), (128, 128)) ImageOps.flip(hopper("L")) ImageOps.flip(hopper("RGB")) ImageOps.grayscale(hopper("L")) ImageOps.grayscale(hopper("RGB")) ImageOps.invert(hopper("L")) ImageOps.invert(hopper("RGB")) ImageOps.mirror(hopper("L")) ImageOps.mirror(hopper("RGB")) ImageOps.posterize(hopper("L"), 4) ImageOps.posterize(hopper("RGB"), 4) ImageOps.solarize(hopper("L")) ImageOps.solarize(hopper("RGB"))