def locateAll(needleImage, haystackImage, grayscale=False, limit=None): needleFileObj = None haystackFileObj = None if isinstance(needleImage, str): # 'image' is a filename, load the Image object needleFileObj = open(needleImage, 'rb') needleImage = Image.open(needleFileObj) if isinstance(haystackImage, str): # 'image' is a filename, load the Image object haystackFileObj = open(haystackImage, 'rb') haystackImage = Image.open(haystackFileObj) if grayscale: needleImage = ImageOps.grayscale(needleImage) haystackImage = ImageOps.grayscale(haystackImage) needleWidth, needleHeight = needleImage.size haystackWidth, haystackHeight = haystackImage.size needleImageData = tuple(needleImage.getdata()) # TODO - rename to needleImageData?? haystackImageData = tuple(haystackImage.getdata()) needleImageRows = [needleImageData[y * needleWidth:(y+1) * needleWidth] for y in range(needleHeight)] # LEFT OFF - check this needleImageFirstRow = needleImageRows[0] assert len(needleImageFirstRow) == needleWidth assert [len(row) for row in needleImageRows] == [needleWidth] * needleHeight numMatchesFound = 0 for y in range(haystackHeight): for matchx in _kmp(needleImageFirstRow, haystackImageData[y * haystackWidth:(y+1) * haystackWidth]): foundMatch = True for searchy in range(1, needleHeight): haystackStart = (searchy + y) * haystackWidth + matchx if needleImageData[searchy * needleWidth:(searchy+1) * needleWidth] != haystackImageData[haystackStart:haystackStart + needleWidth]: foundMatch = False break if foundMatch: # Match found, report the x, y, width, height of where the matching region is in haystack. numMatchesFound += 1 yield (matchx, y, needleWidth, needleHeight) if limit is not None and numMatchesFound >= limit: # Limit has been reached. Close file handles. if needleFileObj is not None: needleFileObj.close() if haystackFileObj is not None: haystackFileObj.close() # There was no limit or the limit wasn't reached, but close the file handles anyway. if needleFileObj is not None: needleFileObj.close() if haystackFileObj is not None: haystackFileObj.close()
def test_sanity(): ImageOps.autocontrast(lena("L")) ImageOps.autocontrast(lena("RGB")) ImageOps.autocontrast(lena("L"), cutoff=10) ImageOps.autocontrast(lena("L"), ignore=[0, 255]) ImageOps.colorize(lena("L"), (0, 0, 0), (255, 255, 255)) ImageOps.colorize(lena("L"), "black", "white") ImageOps.crop(lena("L"), 1) ImageOps.crop(lena("RGB"), 1) ImageOps.deform(lena("L"), deformer) ImageOps.deform(lena("RGB"), deformer) ImageOps.equalize(lena("L")) ImageOps.equalize(lena("RGB")) ImageOps.expand(lena("L"), 1) ImageOps.expand(lena("RGB"), 1) ImageOps.expand(lena("L"), 2, "blue") ImageOps.expand(lena("RGB"), 2, "blue") ImageOps.fit(lena("L"), (128, 128)) ImageOps.fit(lena("RGB"), (128, 128)) ImageOps.fit(lena("RGB").resize((1, 1)), (35, 35)) ImageOps.flip(lena("L")) ImageOps.flip(lena("RGB")) ImageOps.grayscale(lena("L")) ImageOps.grayscale(lena("RGB")) ImageOps.invert(lena("L")) ImageOps.invert(lena("RGB")) ImageOps.mirror(lena("L")) ImageOps.mirror(lena("RGB")) ImageOps.posterize(lena("L"), 4) ImageOps.posterize(lena("RGB"), 4) ImageOps.solarize(lena("L")) ImageOps.solarize(lena("RGB")) success()
def image_to_vector(filename): img = Image.open(filename) img = ImageOps.grayscale(img) img = img.resize(IMAGE_SIZE) img = list(img.getdata()) img = np.array(img) return img
def load_image(self): self.image = Image.open(self.file) self.image = ImageOps.grayscale(self.image) brightness = ImageEnhance.Brightness(self.image) contrast = ImageEnhance.Contrast(self.image) self.image = contrast.enhance(20.0) self.image = brightness.enhance(2.0) x_ratio = self.width / float(self.image.size[0]) y_ratio = self.height / float(self.image.size[1]) ratio = min(x_ratio, y_ratio) #multiply so each row = 1 line width ratio = ratio / self.line_width self.image = self.image.resize((int(self.image.size[0]*ratio), int(self.image.size[1]*ratio))) self.image.save(self.file + "-resized.png") self.pixels = self.image.load() self.width = self.image.size[0] * self.line_width self.height = self.image.size[1] * self.line_width for i in range(self.image.size[0]): for j in range(self.image.size[1]): if self.test_pixel(i, j): self.pixels[i,j] = 0 else: self.pixels[i,j] = 255 self.image.save(self.file + "-printable.png")
def cheese(z): i = 0 while (i < (RESW*RESH*65/100) or i > (RESW*RESH*95/100) ): im1 = cam.get_image() time.sleep(0.055) p.ChangeDutyCycle(12) time.sleep(0.055) im2 = cam.get_image() time.sleep(0.055) p.ChangeDutyCycle(0) time.sleep(0.055) pygame.image.save(im1, "b%08d.jpg" % z) pygame.image.save(im2, "a%08d.jpg" % z) im2 = Image.open("b%08d.jpg" % z).rotate(ROT) im1 = Image.open("a%08d.jpg" % z).rotate(ROT) draw = ImageDraw.Draw(im2) draw.rectangle([0,0,RESW,CROPH], fill=0) draw = ImageDraw.Draw(im1) draw.rectangle([0,0,RESW,CROPH], fill=0) draw.line((int(RESW/2), 0,int(RESW/2),CROPH),fill=255) diff = ImageChops.difference(im2, im1) diff = ImageOps.grayscale(diff) diff = ImageOps.posterize(diff, 6) v = diff.getcolors() i= v[0][0] print i im1.save("b%08d.jpg" % z, quality= 90) im1 = Image.new("RGB", (RESW,RESH)) im1.paste(diff) im1.save("%08d.jpg" % z, quality= 90) im2.save("a%08d.jpg" % z, quality= 90)
def loadjpeg(self,filename,threshold=0.5): threshold *= 255 image = Image.open(filename) image = ImageOps.grayscale(image) array = numpy.array(map(binarize,image.getdata())) array = numpy.reshape(array,image.size) return BinaryArrayImage(array)
def grayscale(cls, pic): if pic == None: raise PictureError("it is not exists.") try: return ImageOps.grayscale(pic).convert("RGB") except IOError as e: cls.L.warning("I/O Error : %s" % str(e)) raise PictureError("it is not success of converting grayscale. %s" % pic)
def image_tint(image, tint=None): if tint is None: return image if image.mode not in ['RGB', 'RGBA']: image = image.convert('RGBA') tr, tg, tb = ImageColor.getrgb(tint) tl = ImageColor.getcolor(tint, "L") # tint color's overall luminosity if not tl: tl = 1 # avoid division by zero tl = float(tl) # compute luminosity preserving tint factors sr, sg, sb = map(lambda tv: tv / tl, (tr, tg, tb) ) # per component adjustments # create look-up tables to map luminosity to adjusted tint # (using floating-point math only to compute table) luts = (tuple(map(lambda lr: int(lr * sr + 0.5), range(256))) + tuple(map(lambda lg: int(lg * sg + 0.5), range(256))) + tuple(map(lambda lb: int(lb * sb + 0.5), range(256)))) l = ImageOps.grayscale(image) # 8-bit luminosity version of whole image if Image.getmodebands(image.mode) < 4: merge_args = (image.mode, (l, l, l)) # for RGB verion of grayscale else: # include copy of image's alpha layer a = Image.new("L", image.size) a.putdata(image.getdata(3)) merge_args = (image.mode, (l, l, l, a)) # for RGBA verion of grayscale luts += tuple(range(256)) # for 1:1 mapping of copied alpha values return Image.merge(*merge_args).point(luts)
def image_tint(src, tint=None): r = lambda: random.randint(0,255) tint = tint or '#{:02X}{:02X}{:02X}'.format(r(), r(), r()) if Image.isStringType(src): # file path? src = Image.open(src) if src.mode not in ['RGB', 'RGBA']: raise TypeError('Unsupported source image mode: {}'.format(src.mode)) src.load() tr, tg, tb = ImageColor.getrgb(tint) tl = ImageColor.getcolor(tint, "L") # tint color's overall luminosity if not tl: tl = 1 # avoid division by zero tl = float(tl) # compute luminosity preserving tint factors sr, sg, sb = map(lambda tv: tv/tl, (tr, tg, tb)) # per component adjustments # create look-up tables to map luminosity to adjusted tint # (using floating-point math only to compute table) luts = (map(lambda lr: int(lr*sr + 0.5), range(256)) + map(lambda lg: int(lg*sg + 0.5), range(256)) + map(lambda lb: int(lb*sb + 0.5), range(256))) l = ImageOps.grayscale(src) # 8-bit luminosity version of whole image if Image.getmodebands(src.mode) < 4: merge_args = (src.mode, (l, l, l)) # for RGB verion of grayscale else: # include copy of src image's alpha layer a = Image.new("L", src.size) a.putdata(src.getdata(3)) merge_args = (src.mode, (l, l, l, a)) # for RGBA verion of grayscale luts += range(256) # for 1:1 mapping of copied alpha values return (Image.merge(*merge_args).point(luts), tint)
def utilisation_densifie_probabiliste(): a = Image.open("C:\Users\Clément\Desktop\\ny\collection\shen_1.jpg") a = ImageOps.grayscale(a) # a = algo_complexes.algo_sobel(a) a = densifie_probabiliste(a, 200, 50, 50, 50) a.save("C:\Users\Clément\Desktop\oo.jpg")
def ocr_cell(im, cells, x, y): """Return OCRed text from this cell""" fbase = PATH_TEMP + "/%d-%d" % (x, y) ftif = "%s.tif" % fbase ftxt = "%s.txt" % fbase cmd = "tesseract -l rus -psm 7 %s %s" % (ftif, fbase) # extract cell from whole image, grayscale (1-color channel), monochrome region = im.crop(cells[x][y]) region = ImageOps.grayscale(region) region = region.point(lambda p: p > 200 and 255) # determine background color (most used color) histo = region.histogram() if histo[0] > histo[255]: bgcolor = 0 else: bgcolor = 255 # trim borders by finding top-left and bottom-right bg pixels pix = region.load() x1,y1 = 0,0 x2,y2 = region.size x2,y2 = x2-1,y2-1 while pix[x1,y1] != bgcolor: x1 += 1 y1 += 1 while pix[x2,y2] != bgcolor: x2 -= 1 y2 -= 1 # save as TIFF and extract text with Tesseract OCR trimmed = region.crop((x1,y1,x2,y2)) trimmed.save(ftif, "TIFF") subprocess.call([cmd], shell=True, stderr=subprocess.PIPE) lines = [l.strip() for l in open(ftxt).readlines()] if len(lines) == 0: return "0" else: return lines[0]
def get_noise_from_file(file_name): original = Image.open(file_name) greyscale = ImageOps.grayscale(original) greyscale_vector = numpy.fromstring(greyscale.tostring(), dtype=numpy.uint8) greyscale_matrix = numpy.reshape(greyscale_vector, (original.size[1], original.size[0])) return get_noise(greyscale_matrix)
def display_file(epd, file_name): """display centre of image then resized image""" image = Image.open(file_name) image = ImageOps.grayscale(image) # crop to the middle w,h = image.size x = w / 2 - epd.width / 2 y = h / 2 - epd.height / 2 cropped = image.crop((x, y, x + epd.width, y + epd.height)) bw = cropped.convert("1", dither=Image.FLOYDSTEINBERG) epd.display(bw) epd.update() time.sleep(3) # delay in seconds rs = image.resize((epd.width, epd.height)) bw = rs.convert("1", dither=Image.FLOYDSTEINBERG) epd.display(bw) epd.update() time.sleep(3) # delay in seconds
def run_algo(self, s1, s2, pattern, stdout=None, timeout=False): """ the core algo runner could also be called by a batch processor this one needs no parameter """ ## DEALING WITH RGB= > grayscale conversion. im = Image.open(self.work_dir + 'input_0.sel.png') (ImageOps.grayscale(im)).save(self.work_dir + 'input_0.sel.png') ## CONTRAST ADJUSTEMENT p = self.run_proc(['balance', 'irgb', str(s1), str(s2), \ 'input_0.sel.png','input_0.sel_normalized.png']) self.wait_proc(p, timeout=timeout) ##MAIN PROGRAM CALL if pattern == 'Columns': p = self.run_proc(['demo_MIRE', 'input_0.sel_normalized.png', \ 'output.png'], stdout=stdout, stderr=None) self.wait_proc(p, timeout) else: im = Image.open(self.work_dir + 'input_0.sel_normalized.png') (im.rotate(90)).save(self.work_dir + 'input_0.sel_rot.png') p = self.run_proc(['demo_MIRE', 'input_0.sel_rot.png', \ 'output.png'], stdout=stdout, stderr=None) self.wait_proc(p, timeout) im = Image.open(self.work_dir + 'output.png') (im.rotate(-90)).save(self.work_dir + 'output.png')
def hole_detection(image_name, output="output.png", size=(128, 128)): image = Image.open(image_name) original_image = image.copy() image.thumbnail(size, Image.ANTIALIAS) image = ImageOps.grayscale(image) median_filter(image) horizontal_hist = horizontal_histogram(image) vertical_hist = vertical_histogram(image) horizontal = [ y for y in find_local_minimums(horizontal_hist)] vertical = [ x for x in find_local_minimums(vertical_hist)] call(['gnuplot', 'hole.plot']) razon = image.size image = original_image razon = (float(image.size[0])/razon[0], float(image.size[1])/razon[1]) draw = ImageDraw.Draw(image) for x in horizontal: x = int(x*razon[0]) draw.line((x, 0, x, image.size[1]), fill=(0, 255, 0)) for y in vertical: y = int(y*razon[1]) draw.line((0, y, image.size[0], y), fill=(0, 0, 255)) image.save(output)
def grayscale(image, amount=100): grayscaled = ImageOps.grayscale(image) if amount < 100: grayscaled = imtools.blend(image, grayscaled, amount / 100.0) if image.mode == 'RGBA': grayscaled.putalpha(imtools.get_alpha(image)) return grayscaled
def cheese(z): i = 0 while (i < (RESW*RESH*65/100) or i > (RESW*RESH*95/100) ): urllib.urlretrieve("http://127.0.0.1:8081/?action=snapshot", "b%08d.jpg" % z) time.sleep(0.055) p.ChangeDutyCycle(12) time.sleep(0.055) urllib.urlretrieve("http://127.0.0.1:8081/?action=snapshot", "a%08d.jpg" % z) time.sleep(0.055) p.ChangeDutyCycle(0) time.sleep(0.055) im2 = Image.open("b%08d.jpg" % z).rotate(ROT) im1 = Image.open("a%08d.jpg" % z).rotate(ROT) draw = ImageDraw.Draw(im2) draw.rectangle([0,0,RESW,CROPH], fill=0) draw = ImageDraw.Draw(im1) draw.rectangle([0,0,RESW,CROPH], fill=0) draw.line((int(RESW/2), 0,int(RESW/2),CROPH),fill=128) diff = ImageChops.difference(im2, im1) diff = ImageOps.grayscale(diff) diff = ImageOps.posterize(diff, 6) v = diff.getcolors() i= v[0][0] #print i im1.save("b%08d.jpg" % z, quality= 90) im1 = Image.new("RGB", (RESW,RESH)) im1.paste(diff) im1.save("%08d.jpg" % z, quality= 90) im2.save("a%08d.jpg" % z, quality= 90)
def evaluate(request): """ This will attempt a passive evaluation of the image reduce the size if it is larger than 2000 in any direction greyscale all images""" if request.method == 'POST': IMAGE_RANDOM = hashlib.sha224(datetime.datetime.now().isoformat()).hexdigest()[:8] sb = stickybits.Stickybits(apikey=TEST_KEY) sb.base_url = 'http://dev.stickybits.com/api/2/' current = "%s/%s.jpg" % (TEMP_DIR, IMAGE_RANDOM) post = request.POST image = request.FILES['img'] imagen = Image.open(image) imagen = ImageOps.grayscale(imagen) sz = imagen.size while sz[0] > 2000 or sz[1] > 2000: imagen = imagen.resize([x/2 for x in list(sz)]) sz = imagen.size imagen.save(current, "JPEG") cont = upload_image(sb, current) if len(cont["codes"]) > 0: result = json.dumps({'success': True, 'codes':cont['codes'],'method':'greyscale and scale only'}) return HttpResponse(result) return adjust(request, current) else: return HttpResponse({'success': False, 'message': 'Post an image to evaluate'})
def main(): """main program - display list of images""" epd = EPD() epd.clear() print( "panel = {p:s} {w:d} x {h:d} version={v:s} COG={g:d} FILM={f:d}".format( p=epd.panel, w=epd.width, h=epd.height, v=epd.version, g=epd.cog, f=epd.film ) ) while True: for cam in cams: fp = urllib2.urlopen(cam) file_name = cStringIO.StringIO(fp.read()) image = Image.open(file_name) image = ImageOps.grayscale(image) rs = image.resize((epd.width, epd.height)) bw = rs.convert("1", dither=Image.FLOYDSTEINBERG) epd.display(bw) epd.update() time.sleep(5) # delay in seconds
def main(): """main program - display list of images""" # Hardware SPI usage: disp = LCD.PCD8544(DC, RST, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=4000000)) # Initialize library. disp.begin(contrast=60) # Clear display. disp.clear() disp.display() while True: for cam in cams: fp = urllib2.urlopen(cam) file_name = cStringIO.StringIO(fp.read()) image = Image.open(file_name) image = ImageOps.grayscale(image) rs = image.resize((84, 48)) bw = rs.convert("1") # bw = rs.convert("1", dither=Image.FLOYDSTEINBERG) disp.image(bw) disp.display() time.sleep(5) # delay in seconds
def is_valid_image(self, path): #from PIL import Image #from PIL import ImageOps from PIL import Image from PIL import ImageOps try: ''' Check if file is readable by PIL. ''' trial_image = Image.open(path) trial_image.verify() trial_image = Image.open(path) if trial_image.size[0] != 5001 or trial_image.size[1] != 5001: return "image wrong dimensions" bands = trial_image.getbands() if bands[:3] == ('R','G','B'): # it's an RGP, lets just convert it to grayscale # output_gr = trial_image.convert("LP") #trial_image.convert("LP") output_gr = ImageOps.grayscale(trial_image) output_gr.save(path,"PNG") return "ok" elif bands[0] in 'LP': #actual lgrayscale return "ok" # Image not grayscale else: return "image is not a grayscale or RGB (?)" except ImportError: # Under PyPy, it is possible to import PIL. However, the underlying # _imaging C module isn't available, so an ImportError will be # raised. Catch and re-raise. raise except Exception, valerror: # Python Imaging Library doesn't recognize it as an image return "image verification failed"+str(valerror)
def getPixelAv(box): im = ImageOps.grayscale(ImageGrab.grab(box)) im.save(os.getcwd() + '\\Grab001.png', 'PNG') a = array(im.getcolors()) a = a.sum() #im.save(os.getcwd() + '\\Grab001.png', 'PNG') return a
def set_layer(self, image): for s in range(self.n_scale): size = self.get_size(s) gray_img = np.asarray(ImageOps.grayscale(image).resize((size, size))).T*1.0/255 for x in range(size): for y in range(size): self.array[s][x, y, 0] = gray_img[x, y]
def det_agujeros(image_name, size=(128, 128)): image = Image.open(image_name) original_image = image.copy() image = ImageOps.grayscale(image) filtro(image) hist_hor = horizontalh(image) hist_vert = verticalh(image) horizontal = mins(hist_hor) vertical = mins(hist_vert) call(["gnuplot", "plot.gnu"]) r = image.size image = original_image draw = ImageDraw.Draw(image) for x in horizontal: draw.line((x, 0, x, image.size[0]), fill=(255, 0, 0)) print "pasa" for y in vertical: draw.line((0, y, image.size[1], y), fill=(0, 0, 255)) print "pasa" image.save("lin.png")
def find_lines(inpath, ulx, uly, lrx, lry, save_file=False, show_file=False): # These shouldn't really be global; it could be cleaned up. global xsize global ysize global pix # Load into PIL im = ImageOps.invert(ImageOps.grayscale(Image.open(inpath))) pix = im.load() xsize = lrx - ulx ysize = lry - uly line_height = 73 fudge = 70 start_y = uly boxes = [] for i in range(100): new_box = line_in_range(start_y, line_height, fudge) start_y = new_box[0] + line_height if get_box_val(new_box[0], new_box[0] + line_height) == 0: break boxes.append(new_box[0]) box_vals = [get_box_val(y, y+line_height) for y in boxes] med = np.median(box_vals) filtered_boxes = filter( lambda y: get_box_val(y,y+line_height) > med/2.0 and get_box_val(y,y+line_height) < med*2, boxes) # left, upper, right, and lower final_boxes = [(ulx, y, lrx, y+line_height) for y in filtered_boxes] return final_boxes
def generate_achieve_on_image(image, achievements, name, pos): ach = Achievement.objects.get(name=name) picture = Image.open(ach.imageUrl) picture.thumbnail((100, 100)) if not achievements.filter(achievement=ach).exists(): picture = ImageOps.colorize(ImageOps.grayscale(picture), (0, 0, 0), (50, 50, 50)) image.paste(picture, pos)
def run(self): while True: try: camera = WebCamera.objects.get(pk = self._camera.id) if camera.motion_control: now = datetime.now() request = get_pool().request("GET", "%s?action=snapshot" % camera.internal_url) try: source = Image.open(BytesIO(request.data)) img = ImageOps.equalize(ImageOps.grayscale(source)) if self._previous is not None: out = ImageMath.eval("convert(a - b, 'L')", a = img, b = self._previous) out = out.filter(MedianFilter()) total = 0 for idx, val in enumerate(out.histogram()): total += val * idx if total > 3000000: camera.last_motion = now camera.save() filename = os.path.join(camera.motion_folder, "{:%Y%m%d-%H%M%S}.jpg".format(now)) source.save(filename) filesize = os.path.getsize(filename) if filesize < 6700: os.remove(filename) self._previous = img finally: request.close() else: self._previous = None except: print("Ignore Exception") sleep(1)
def generate_picture_from_user_info(username, statistics, achievements): image = Image.new("RGB", (450, 470), color=(180, 180, 180)) draw = ImageDraw.Draw(image) color = (94, 73, 15) username = ((10, 10), username) stats = [ ((10, 50), "Создано задач: {}".format(statistics['task_count'])), ((10, 70), "Решено задач: {}".format(statistics['solved_task_count'])), ((10, 90), "Процент правильных ответов: {}%".format(statistics['percentage'])), ((10, 110), "Рейтинг: {}".format(statistics['rating'])), ] header_font_size, statistic_font_size = 30, 15 header_font = ImageFont.truetype("static/arial.ttf", header_font_size) statistic_font = ImageFont.truetype("static/arial.ttf", statistic_font_size) draw.text(username[0], username[1], fill=color, font=header_font) for stat in stats: draw.text(stat[0], stat[1], fill=color, font=statistic_font) ach_first = Achievement.objects.get(name='First') first = Image.open(ach_first.imageUrl) first.thumbnail((100, 100)) if achievements.filter(achievement=ach_first).exists(): image.paste(first, (340, 30)) draw.text((430, 110), str(achievements.get(achievement=ach_first).count), fill=(255, 0, 0), font=statistic_font) else: com = ImageOps.colorize(ImageOps.grayscale(first), (0, 0, 0), (50, 50, 50)) image.paste(com, (340, 30)) pictures_and_positions = (('Creator1', (10, 140)), ('Creator2', (120, 140)), ('Creator3', (230, 140)), ('Creator4', (340, 140)), ('Solver1', (10, 250)), ('Solver2', (120, 250)), ('Solver3', (230, 250)), ('Solver4', (340, 250)), ('Commentator1', (10, 360)), ('Commentator2', (120, 360)), ('Commentator3', (230, 360)), ('Commentator4', (340, 360)),) for pp in pictures_and_positions: generate_achieve_on_image(image, achievements, pp[0], pp[1]) return image
def tats(image): image = image.convert('RGB') colours = util.get_dominant_colours(image, 9) colours = util.order_colours_by_brightness(colours) bg = random.choice(colours[:3]) light = random.choice(colours[3:6]) dark = random.choice(colours[6:]) dist = math.sqrt(sum(map(lambda (a, b): math.pow(a - b, 2), zip(light, dark)))) if dist < 100: light = util.modify_hls(light, l=lambda l: l + 100) light = util.modify_hls(light, s=lambda s: s + 100) dark = util.modify_hls(dark, s=lambda s: s + 100) layer = Image.open(os.path.dirname(os.path.abspath(__file__)) + '/' + 'assets/tats.png') layer.load() r, g, b, a = layer.split() layer = layer.convert('RGB') layer = ImageOps.grayscale(layer) layer = ImageOps.colorize(layer, tuple(dark), tuple(light)) layer.putalpha(a) im = Image.new('RGB', layer.size, tuple(bg)) im.paste(layer, mask=layer) return im
def decoder(file): image = Image.open(file).convert('RGB') image = ImageOps.invert(image) image = ImageOps.grayscale(image) image = ImageOps.autocontrast(image) return pytesseract.image_to_string(image)
if __name__ == '__main__': # R = Variable(torch.FloatTensor([ # [5, 3, 0, 1], # [4, 0, 0, 1], # [1, 1, 0, 5], # [1, 0, 0, 4], # [0, 1, 5, 4], # ] # )) from PIL import Image from PIL import ImageOps import torchvision.transforms as transforms img = Image.open('./Buson_Nopperabo.jpg') img = ImageOps.grayscale(img) img = transforms.ToTensor()(img) R = Variable(img.squeeze(0)) mf = MF(R.size(0), R.size(1), 50) adam_params = {"lr": 0.0005} optimizer = Adam(adam_params) svi = SVI(mf.model, mf.guide, optimizer, loss='ELBO', num_particles=5) losses = [] for epoch in range(10000): loss = svi.step(R) losses.append(loss) if epoch % 100 == 0: print(epoch, loss)
from PIL import ImageOps import cntk.io.transforms as xforms import cntk from cntk import cntk_py from cntk.layers import Convolution, MaxPooling, Dense from cntk.initializer import glorot_uniform from cntk.ops import relu, sigmoid, input_variable #https://github.com/usuyama/cntk_unet/blob/master/cntk_unet.py np.set_printoptions(threshold=np.inf) AnsPath = "/home/ys/Share/7_DL_model_set/ver20170413/15Z32/DL_Ans_half" BasePath = "/home/ys/Share/7_DL_model_set/ver20170413/15Z32/trn_half" list = os.listdir(AnsPath) testimg = Image.open(AnsPath + "/" + list[0]) testimg = testimg.resize((572, 572)) testimg = ImageOps.grayscale(testimg) test_image = np.array(testimg) # .transpose(2,0,1) test_image = np.array([test_image]) shape = test_image.shape data_size = test_image.shape[0] x = C.input_variable(shape) y = C.input_variable(shape) print(test_image.shape) z0 = C.splice(test_image, test_image, axis=0) z1 = C.splice(test_image, test_image, -3) z2 = C.splice(test_image, test_image, 2) z = np.array([test_image, test_image])
def duck_obstacle(): box = (Coord.dino_duck[0] - 54, Coord.dino_duck[1], Coord.dino_duck[0] + 30, Coord.dino_duck[1] + 10) image = ImageGrab.grab(box) grayimage = ImageOps.grayscale(image) a = array(grayimage.getcolors()) return a.sum()
def locateAll(needleImage, haystackImage, grayscale=False, limit=None, region=None): needleFileObj = None haystackFileObj = None if isinstance(needleImage, str): # 'image' is a filename, load the Image object needleFileObj = open(needleImage, 'rb') needleImage = Image.open(needleFileObj) if isinstance(haystackImage, str): # 'image' is a filename, load the Image object haystackFileObj = open(haystackImage, 'rb') haystackImage = Image.open(haystackFileObj) if grayscale: needleImage = ImageOps.grayscale(needleImage) haystackImage = ImageOps.grayscale(haystackImage) needleWidth, needleHeight = needleImage.size haystackWidth, haystackHeight = haystackImage.size needleImageData = tuple( needleImage.getdata()) # TODO - rename to needleImageData?? haystackImageData = tuple(haystackImage.getdata()) needleImageRows = [ needleImageData[y * needleWidth:(y + 1) * needleWidth] for y in range(needleHeight) ] # LEFT OFF - check this needleImageFirstRow = needleImageRows[0] assert len(needleImageFirstRow) == needleWidth assert [len(row) for row in needleImageRows] == [needleWidth] * needleHeight numMatchesFound = 0 for y in range(haystackHeight): for matchx in _kmp( needleImageFirstRow, haystackImageData[y * haystackWidth:(y + 1) * haystackWidth]): foundMatch = True for searchy in range(1, needleHeight): haystackStart = (searchy + y) * haystackWidth + matchx if needleImageData[searchy * needleWidth:(searchy + 1) * needleWidth] != haystackImageData[ haystackStart:haystackStart + needleWidth]: foundMatch = False break if foundMatch: # Match found, report the x, y, width, height of where the matching region is in haystack. numMatchesFound += 1 yield (matchx, y, needleWidth, needleHeight) if limit is not None and numMatchesFound >= limit: # Limit has been reached. Close file handles. if needleFileObj is not None: needleFileObj.close() if haystackFileObj is not None: haystackFileObj.close() # There was no limit or the limit wasn't reached, but close the file handles anyway. if needleFileObj is not None: needleFileObj.close() if haystackFileObj is not None: haystackFileObj.close()
def get_gray_image(rgb_image): return ImageOps.grayscale(rgb_image) # return gray-scaled image
def detection_area(): image = ImageGrab.grab(area) gray_img = ImageOps.grayscale(image) arr = np.array(gray_img.getcolors()) return arr.mean()
def sketch(img): edge_img = img.filter(ImageFilter.CONTOUR) return ImageOps.grayscale(edge_img)
def emboss(img): edge_img = img.filter(ImageFilter.EMBOSS) return ImageOps.grayscale(edge_img)
async def grayscale(imagefile, endname): image = Image.open(imagefile) inverted_image = ImageOps.grayscale(image) inverted_image.save(endname)
classs = { 1:"Left", 2:"Center", 3:"Right" } #Retrieving the images and their labels print("Obtaining Images & its Labels..............") for i in range(classes): path = os.path.join(cur_path,'data/train/',str(i)) images = os.listdir(path) for a in images: try: image = Image.open(path + '\\'+ a) image=ImageOps.grayscale(image) image = image.resize((64,64)) image = np.array(image) data.append(image) labels.append(i) print("{0} Loaded".format(a)) except: print("Error loading image") print("Dataset Loaded") #Converting lists into numpy arrays data = np.array(data) labels = np.array(labels) print(data.shape, labels.shape) #Splitting training and testing dataset
def detect(image, confidence_threshold=0.5, debug=False, debug_image=False): """Find marker glyphs in the image. Try different thresholds, accumulate the results and return the best. TODO: Look at local neighborhoods to find the best per-pixel threshold. Note: ARToolkitPlus autothresh is lame.""" # Make grayscale if necessary grayimage = image if image.mode == "L" else ImageOps.grayscale(image) # Create the detector detector = _create(grayimage.size[0], grayimage.size[1], debug) # Build a map from marker-id to a list of markers with that id # detected at each successive threshold. allmarkers = {} # Systematically try different thresholds data = grayimage.tobytes() for thresh in xrange(16, 255, 16): # Set the current threshold and extract the markers _set_thresholds(detector, thresh, False, 0) num = _detect(detector, data) markers = [_get_marker(detector, m).contents for m in xrange(num)] if debug: msg = str( sorted([(m.id, m.confidence) for m in markers if m.confidence > 0])) logging.debug("Thresh {0} found {1} {2}".format(thresh, num, msg)) # Add markers with high enough confidence to the map for marker in markers: if marker.confidence >= confidence_threshold: # Copy because it will be overwritten on the next detection marker = copy.deepcopy(marker) xform = Mat4() _get_marker_transform(detector, marker, xform.ctypes) xform.transpose() marker.init(xform) # If confidence is higher than current highest, replace list if (not marker.id in allmarkers or marker.confidence > allmarkers[marker.id][0].confidence): allmarkers[marker.id] = [marker] # Otherwise append to the list else: allmarkers[marker.id].append(marker) # At this point, the markers in each individual list have the same # confidence. To pick the 'best' marker, first throw out any # markers that don't share the median direction, then pick the # marker with the median area. This should exclude bad detections. # For clarity, use a loop rather than list comprehensions. markerlists = allmarkers.values() markers = [] for markerlist in markerlists: markerlist = sorted(markerlist, key=lambda m: m.direction) direction = markerlist[len(markerlist) / 2].direction markerlist = [ marker for marker in markerlist if marker.direction == direction ] markerlist = sorted(markerlist, key=lambda m: m.area) markers.append(markerlist[len(markerlist) / 2]) # Print found markers and draw on the original image. if debug: logging.debug("Final markers:") logging.debug(str_markers(markers)) if debug_image: draw_markers(markers, image) return markers
def _locateAll_python(needleImage, haystackImage, grayscale=None, limit=None, region=None, step=1): # setup all the arguments if grayscale is None: grayscale = GRAYSCALE_DEFAULT needleFileObj = None if isinstance(needleImage, str): # 'image' is a filename, load the Image object needleFileObj = open(needleImage, 'rb') needleImage = Image.open(needleFileObj) haystackFileObj = None if isinstance(haystackImage, str): # 'image' is a filename, load the Image object haystackFileObj = open(haystackImage, 'rb') haystackImage = Image.open(haystackFileObj) if region is not None: haystackImage = haystackImage.crop( (region[0], region[1], region[0] + region[2], region[1] + region[3])) else: region = (0, 0 ) # set to 0 because the code always accounts for a region if grayscale: # if grayscale mode is on, convert the needle and haystack images to grayscale needleImage = ImageOps.grayscale(needleImage) haystackImage = ImageOps.grayscale(haystackImage) else: # if not using grayscale, make sure we are comparing RGB images, not RGBA images. if needleImage.mode == 'RGBA': needleImage = needleImage.convert('RGB') if haystackImage.mode == 'RGBA': haystackImage = haystackImage.convert('RGB') # setup some constants we'll be using in this function needleWidth, needleHeight = needleImage.size haystackWidth, haystackHeight = haystackImage.size needleImageData = tuple(needleImage.getdata()) haystackImageData = tuple(haystackImage.getdata()) needleImageRows = [ needleImageData[y * needleWidth:(y + 1) * needleWidth] for y in range(needleHeight) ] # LEFT OFF - check this needleImageFirstRow = needleImageRows[0] assert len( needleImageFirstRow ) == needleWidth, 'For some reason, the calculated width of first row of the needle image is not the same as the width of the image.' assert [len(row) for row in needleImageRows] == [ needleWidth ] * needleHeight, 'For some reason, the needleImageRows aren\'t the same size as the original image.' numMatchesFound = 0 # NOTE: After running tests/benchmarks.py on the following code, it seem that having a step # value greater than 1 does not give *any* significant performance improvements. # Since using a step higher than 1 makes for less accurate matches, it will be # set to 1. step = 1 # hard-code step as 1 until a way to improve it can be figured out. if step == 1: firstFindFunc = _kmp else: firstFindFunc = _steppingFind for y in range(haystackHeight): # start at the leftmost column for matchx in firstFindFunc( needleImageFirstRow, haystackImageData[y * haystackWidth:(y + 1) * haystackWidth], step): foundMatch = True for searchy in range(1, needleHeight, step): haystackStart = (searchy + y) * haystackWidth + matchx if needleImageData[searchy * needleWidth:(searchy + 1) * needleWidth] != haystackImageData[ haystackStart:haystackStart + needleWidth]: foundMatch = False break if foundMatch: # Match found, report the x, y, width, height of where the matching region is in haystack. numMatchesFound += 1 yield (matchx + region[0], y + region[1], needleWidth, needleHeight) if limit is not None and numMatchesFound >= limit: # Limit has been reached. Close file handles. if needleFileObj is not None: needleFileObj.close() if haystackFileObj is not None: haystackFileObj.close() raise StopIteration() # There was no limit or the limit wasn't reached, but close the file handles anyway. if needleFileObj is not None: needleFileObj.close() if haystackFileObj is not None: haystackFileObj.close() if RAISE_IF_NOT_FOUND and numMatchesFound == 0: raise ImageNotFoundException('Could not locate the image.')
#!/usr/bin/env python # coding: utf-8 # # Fractal dimension regression import numpy as np import numpy.linalg as linalg import matplotlib.pyplot as plt from PIL import Image, ImageFilter, ImageOps from scipy import interpolate from scipy import integrate from src.intensity_entropy import * from src.kernels import * plt.rcParams['image.cmap'] = 'inferno' img = ImageOps.grayscale(Image.open('test.jpg')) scale = max(np.shape(img)) data = np.array(img) img # ## Box-counting dimension def boxdim(data): εs = np.linspace(2, min(np.shape(data))) boxes = [ np.log(np.sum(mapblocks(ε, ε, lambda x: 1 if np.any(x) else 0, data))) for ε in εs ] logεs = np.log(εs) endεs = logεs[[0, -1]]
thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') # map each array index to its type class_names = [ 'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot' ] my_labels = [] my_images = [] sleeveless = Image.open("sleeveless.png") sleeveless_grey = ImageOps.invert(ImageOps.grayscale(sleeveless)) sleeveless_grey = sleeveless_grey.resize((28, 28)) arr = np.asarray(sleeveless_grey) / 255.0 sleeveless_grey = ImageOps.invert(sleeveless_grey) sleeveless_grey.save("sleeveless_grey.png", "PNG") my_images.append(arr) # The top is technically a top, but I expect it to be mapped to a dress. my_labels.append(0) sleeveless.close() yeezy = Image.open("yeezy.png") yeezy_grey = ImageOps.invert(ImageOps.grayscale(yeezy)) yeezy_grey = yeezy_grey.resize((28, 28)) arr = np.asarray(yeezy_grey) / 255.0 yeezy_grey = ImageOps.invert(yeezy_grey) yeezy_grey.save("yeezy_grey.png", "PNG")
def grayscale(f): im = Image.open(f) im_convert = ImageOps.grayscale(im) im_convert.save(get_uploaded_file(f))
def gray_scale(self): image = Image.fromarray(self.array) image = ImageOps.grayscale(image) return np.asarray(image)
def filter(self, kind, param=None): """Filter the image. :param kind: The kind of filter to use on the image. Should be one of { 'threshold', 'gray', 'opaque', 'invert', 'posterize', 'blur', 'erode', 'dilate', } :type kind: str :param param: optional parameter for the filter in use (defaults to None). Only required for 'threshold' (the threshold to use, param should be a value between 0 and 1; defaults to 0.5), 'posterize' (limiting value for each channel should be between 2 and 255), and 'blur' (gaussian blur radius, defaults to 1.0). :type param: int | float | None """ filter_name = kind.lower() if param is None: default_values = { 'threshold': 0.5, 'blur': 1.0, 'gaussian_blur': 1.0, 'box_blur': 1.0, 'posterize': 1, 'opacity': 0.5, } param = default_values.get(filter_name, None) if filter_name in ['blur', 'gaussian_blur']: fim = self._img.filter(ImageFilter.GaussianBlur(radius=param)) self._img = fim elif filter_name == 'box_blur': self._img = self._img.filter(ImageFilter.BoxBlur(param)) elif filter_name in ['gray', 'grey', 'grayscale']: self._img = ImageOps.grayscale(self._img) elif filter_name == 'opaque': self._img.putalpha(255) elif filter_name == 'opacity': self._img.putalpha(int(param * 255)) elif filter_name == 'invert': self._img = ImageOps.invert(self._img) elif filter_name == 'posterize': nbits = 0 while int(param) != 0: param = param >> 1 nbits = nbits + 1 nbits = constrain(nbits, 1, 8) self._img = ImageOps.posterize(self._img, nbits) elif filter_name == 'threshold': dat = np.asarray(ImageOps.grayscale(self._img)).copy() dat[dat < int(128 * param)] = 0 dat[dat >= int(128 * param)] = 255 self._img = Image.fromarray(dat) elif filter_name in ['erode', 'dilate']: raise NotImplementedError else: raise ValueError("Unknown filter") self._reload = True
def toGrayscale(imgs): return [ImageOps.grayscale(img) for img in imgs]
sharpen.enhance(2).save('pic_sharpen_enhance.png') print('Stage 3:') try: s3 = pytesseract.image_to_string(Image.open('pic_sharpen_enhance.PNG')) except UnicodeError: print( 'The Input text consists a character which is not supported by the current CODEC installed please upload a better Image' ) print('<', s3, '>') ############################################################## inverted_image = ImageOps.invert(Image.open('pic_sharpen_enhance.png')) for i in range(10): ImageEnhance.Sharpness(inverted_image).enhance(2).save( 'pic_sharpen_enhance_sharpen.png') k = Image.open('pic.jpg') k = ImageOps.grayscale(k) k = ImageOps.invert(k) k = ImageEnhance.Contrast(k).enhance(3) k.save('pic_sharpen_enhance_sharpen.png') ImageEnhance.Sharpness(inverted_image).enhance(2).save( 'pic_sharpen_enhance_sharpen.png') print('Stage 4:') try: s4 = pytesseract.image_to_string( Image.open('pic_sharpen_enhance_sharpen.png')) except UnicodeError: print( 'The Input text consists a character which is not supported by the current CODEC installed please upload a better Image' ) print('<', s4, '>')
import pyautogui import time import os from PIL import ImageGrab, ImageOps from numpy import * pyautogui.click(341, 210) while True: box = (242, 477, 523, 559) im = ImageOps.grayscale(ImageGrab.grab(box)) t = array(im.getcolors()) t = t.sum() if (t == 44084 or t == 26742): pyautogui.press("f") print("F") elif (t == 45745 or t == 32727): pyautogui.press("d") print("D") elif (t == 47745 or t == 35466): pyautogui.press("s") print("S") elif (t == 47410 or t == 35639): pyautogui.press("a") print("A")
def image_to_numpy(image, order='F'): image = ImageOps.grayscale(image) array = np.array(image.getdata(), np.float64) / 255 array = array.reshape(image.size[0], image.size[1], order='C') return array.reshape(1, array.size, order=order)
def grab(box): im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() #print(a) return a
def predict(): imageArray = [] if request.method == 'POST': canvasData = request.form['canvasimg'] binaryImg = b64decode(canvasData.split(',')[1]) filename = "digito.png" #escribiendo los datos de la imagen al VM with open(filename, 'wb') as f: f.write(binaryImg) f.close() #img = cv2.imread(filename,cv2.IMREAD_UNCHANGED) image_file = Image.open(filename) # opens image resized_im = image_file.resize((28, 28)) #if resized_im.mode == 'RGBA': # Create a blank background image bg = Image.new('RGB', resized_im.size, (255, 255, 255)) # Paste image to background image bg.paste(resized_im, (0, 0), resized_im) g = bg.convert('L') gg = ImageOps.invert(g) gg = ImageOps.grayscale(gg) gg = ImageOps.equalize(gg, mask=None) resImg = np.array(gg) print(resImg.shape) #plt.figure(figsize=(10,10)) #plt.hist(resImg.ravel(),256,[0,256]); #plt.show() plt.imshow(gg, cmap='gray') plt.show() imageArray = np.array(gg) print(imageArray.shape) #<<<<<<<<<<< En caso de Usar Modelo de Pytorch >>>>>>>> #Crear columnas adicionales para la entrada al modelo #ia = np.array(imageArray[1000,None,:,:], copy=True) #print(ia.shape) #pasando numpy array a tensor #imgTensor = torch.from_numpy(ia) #with torch.no_grad(): # output = network(imgTensor.to(device)) # print(output.shape) #<<<<<<<<<<<< En caso de Usar Modelo de Keras >>>>>>>>>>>><< prediccion = model.predict(imageArray[None, :, :, None]) df = pd.DataFrame({ "Digito": list(range(0, 10)), "Probabilidad": prediccion.ravel() }) df["Digito"] = df["Digito"].astype(str) df = df.sort_values(by="Probabilidad", ascending=False) sns.barplot(x="Probabilidad", y="Digito", data=df) plt.title("EL digito es un " + df.iloc[0, 0] + " con " + str(round(float(df.iloc[0, 1]) * 100, 1)) + "% de Probabilidad") plt.show() return render_template('index.html', name=None, pred=None, w=260, h=260, lw=15)
def DrawWordcloud(read_name): image = Image.open('timg.jpg') #作为背景形状的图 graph = np.array(image) #参数分别是指定字体、背景颜色、最大的词的大小、使用给定图作为背景形状 # max_words 要显示的词的最大个数 # scale: float(default=1) # 按照比例进行放大画布,如设置为1.5,则长和宽都是原来画布的1.5倍 # mask: 如果参数为空,则使用二维遮罩绘制词云。如果 mask 非空,遮罩形状被 mask 取代 wc = WordCloud( font_path= 'C:\\windows\\Fonts\\msyhbd.ttc', #'C:\\windows\\Fonts\\simhei.ttf' background_color='White', max_words=3000, mask=graph, random_state=30, scale=1) tb = pd.read_csv(read_name) #读取词频文件CSV(逗号分割)到DataFrame words = list(tb.word) #词表 values = tb.val #词频表 dic = {} for key, val in zip(words, values): dic[key] = val wc.generate_from_frequencies(dic) #根据给定词频生成词云 image_color = ImageColorGenerator(graph) plt.imshow(wc) plt.axis("off") #不显示坐标轴 plt.show() img3 = wc.to_image() img3 = img3.convert('RGBA') #img3.show() wc.to_file('Wordcloud.png') #保存的图片命名为Wordcloud.png img1 = Image.open('timg.jpg').convert('L') # 读图片并转化为灰度图 edges = filters.sobel(img1) e_img = Image.fromarray(edges) e_img.show() #plt.imshow(edges, plt.cm.gray) #img1.show() img_array = np.array(img1) # 转化为数组 w, h = img_array.shape img_border = np.zeros((w + 1, h + 1)) for x in range(1, w - 1): for y in range(1, h - 1): Sx = img_array[x + 1][y - 1] + 2 * img_array[x + 1][y] + img_array[x + 1][y + 1] - \ img_array[x - 1][y - 1] - 2 * \ img_array[x - 1][y] - img_array[x - 1][y + 1] Sy = img_array[x - 1][y + 1] + 2 * img_array[x][y + 1] + img_array[x + 1][y + 1] - \ img_array[x - 1][y - 1] - 2 * \ img_array[x][y - 1] - img_array[x + 1][y - 1] img_border[x][y] = (Sx * Sx + Sy * Sy)**0.5 #img_border[x-1][y-1] = img_border[x][y] img2 = Image.fromarray(img_border) #img2.convert('RGBA') #img2.show() i_img2 = ImageOps.grayscale(img2) #i_img2.show() i_img3 = ImageOps.invert(i_img2) i_img3 = i_img3.convert('RGBA') #r, g, b, alpha = i_img3.split() #r, g, b, alpha = i_img3.split() #i_img3.show() #alpha = alpha.point(lambda i: i > 0 and 204) #i_img3.size = img3.size #img4 = img3.paste(i_img3,(0,0)) #img4 = Image.composite(img3, i_img3, alpha) i_img3 = i_img3.resize(img3.size) img4 = Image.blend(img3, i_img3, 0.3) img4.show()
async def _process_commands(self, command: str, path: str): ar = [x.split(" ") for x in [a.strip(" ") for a in command.split(',')]] im: Image = PIL.Image.open(path) # im.load() im = im.convert("RGBA") print(ar) fmt = "png" for e in ar: for i in range(3): e.append(None) if e[0] == "convert": mode = e[1] if not mode or mode not in "bw,rgb,rgba,luma,web,adaptive": raise commands.ConversionError("Unknown conversion mode", original=None) if mode == "bw": im = im.convert("1") if mode in ["rgb", "rgba"]: im = im.convert(mode.upper()) if mode == "luma": im = im.convert("L") if mode == "web": im = im.convert("P") if mode == "adaptive": im = im.convert("P", palette=ADAPTIVE, colors=min(int(e[2]), 256) if e[2] else 256) if e[0] == "format": if not e[1] or e[1] not in "png,jpg,gif".split(","): raise commands.ConversionError(f"Invalid output format {e[1]}", original=None) fmt = e[1] if e[0] == "bw": im = im.convert("1").convert("RGBA") if e[0] == "luma": im = im.convert("L").convert("RGBA") if e[0] == "autocontrast": im = im.convert("RGB") im = ImageOps.autocontrast(im, int(e[1]) if e[1] else 0) im = im.convert("RGBA") if e[0] == "rotate": im = im.rotate(int(e[1])) if e[0] == "invert": im = ImageOps.invert(im) if e[0] == "grayscale": im = ImageOps.grayscale(im) if e[0] == "equalize": im = ImageOps.equalize(im) if e[0] == "sepia": im = ImageOps.colorize(ImageOps.grayscale(im), (0, 0, 0), (255, 255, 255), mid=(112, 66, 20)) if e[0] == "colorize": im = ImageOps.colorize(ImageOps.grayscale(im), (0, 0, 0), (255, 255, 255), mid=self._color(e[1])) if e[0] == "posterize": im = ImageOps.posterize(im, int(e[1]) if e[1] else 128) if e[0] == "solarize": im = im.convert("RGB") if len(e) > 1: a = int(e[1]) else: a = 128 im = ImageOps.solarize(im, a) im = im.convert("RGBA") if e[0] == "flip": im = ImageOps.flip(im) if e[0] == "mirror": im = ImageOps.mirror(im) if e[0] == "blur": if len(e) > 1: a = int(e[1]) else: a = 2 im = im.filter(ImageFilter.GaussianBlur(a)) if e[0] == "boxblur": if len(e) > 1: a = int(e[1]) else: a = 2 im = im.filter(ImageFilter.BoxBlur(a)) if e[0] == "sharpen": im = im.filter(ImageFilter.UnsharpMask(int(e[1]) if e[1] else 2, int(e[2]) if e[2] else 150, int(e[3]) if e[3] else 3)) if e[0] == "scale": im = ImageOps.scale(im, float(e[1])) if e[0] == "pscale": im = ImageOps.scale(im, float(e[1]), PIL.Image.NEAREST) if e[0] == "scalexy": im = im.resize((int(im.width * float(e[1])), int(im.height * float(e[2])))) if e[0] == "pscalexy": im = im.resize((int(im.width * float(e[1])), int(im.height * float(e[2]))), PIL.Image.NEAREST) if e[0] == "scaleto": im = im.resize((int(e[1]), int(e[2])), PIL.Image.BICUBIC) if e[0] == "pscaleto": im = im.resize((int(e[1]), int(e[2])), PIL.Image.NEAREST) if e[0] == "potografy": im = im.resize((int(im.width / 20), int(im.height / 4)), PIL.Image.NEAREST) im = im.resize((int(im.width * 20), int(im.height * 4)), PIL.Image.NEAREST) im = ImageOps.posterize(im, 2) im = ImageOps.colorize(ImageOps.grayscale(im), (0, 0, 0), (255, 255, 255), mid=(112, 66, 20)) im = im.rotate(25) if e[0] == "matrix": size = (im.width, im.height) im = im.resize((int(e[1]), int(e[2])), PIL.Image.NEAREST) im = im.resize(size, PIL.Image.NEAREST) a = path.split(".") async with self.lock: self.counter += 1 b = str(self.counter) + "." + fmt # + a[-1] im.save(b) return True, b, os.path.getsize(b)
def received_message(self, m): payload = m dat = msgpack.unpackb(payload, raw=False) image = [] depth = [] agent_count = len(dat['image']) for i in range(agent_count): img = Image.open(io.BytesIO(bytearray(dat['image'][i]))) img = img.convert('L') img = img.resize((84, 84)) img = np.array(img, dtype=np.float32) image.append(img) if (self.depth_image_count == 1): depth_dim = len(dat['depth'][0]) temp = (Image.open(io.BytesIO(bytearray(dat['depth'][i])))) depth.append( np.array(ImageOps.grayscale(temp)).reshape( self.depth_image_dim)) if (self.ir_count == 1): ir = dat['ir'] ir_dim = len(ir[0]) else: ir = [] ir_dim = 0 if (self.ground_count == 1): ground = dat['ground'] ground_dim = len(ground[0]) else: ground = [] ground_dim = 0 if (self.compass_count == 1): compass = dat['compass'] compass_dim = len(compass[0]) else: compass = [] compass_dim = 0 if (self.target_count == 1): target = dat['target'] target_dim = len(target[0]) else: target = [] target_dim = 0 observation = { "image": image, "depth": depth, "ir": ir, "ground": ground, "compass": compass, "target": target } reward = np.array(dat['reward'], dtype=np.float32) end_episode = np.array(dat['endEpisode'], dtype=np.bool) if args.model == 'None': action = self.agent.select_action(observation, end_episode) self.agent.step(observation, action, reward, end_episode, 0.5) print(action, '\n', reward) self.send_action(action)
#variable names (set here) image = "q4.jpg" text_file = "text" new_image = "formatted1.jpg" #clicking image pygame.camera.init() cam = pygame.camera.Camera(pygame.camera.list_cameras()[0]) cam.start() img = cam.get_image() pygame.image.save(img, "q4.jpg") pygame.camera.quit() #Image formatting im = Image.open(image) im_g = ImageOps.grayscale(im) im_gc = ImageOps.autocontrast(im_g, cutoff=10, ignore=0) im_gc.save(new_image) #image to text print("tesseract " + new_image + " " + text_file) call("tesseract " + new_image + " " + text_file, shell=True) print("OCR complete") #Text to speech #Open the text file and split the paragraph to Sentences fname = text_file + ".txt" f = open(fname) content = f.read() print(content)
# path_model = '/app/models/5b96af9c0354c9000b0aea36_VGG6_20181207_151757.h5' print('loading model') # model = tf.keras.models.load_model(path_model) tic = time.time() model = load_model(path_model) toc = time.time() print(f'done. took {toc-tic} seconds') model_input_shape = model.input_shape[1:3] # Compute ML scores one by one: tic = time.time() for path_streak in path_streaks: x = np.array( ImageOps.grayscale(Image.open(path_streaks[0])).resize( model_input_shape, Image.BILINEAR)) / 255. x = np.expand_dims(x, 2) x = np.expand_dims(x, 0) score = model.predict(x) # print(os.path.basename(path_streaks[0]), score) toc = time.time() print(f'running prediction one by one took {toc-tic} seconds.') batch_size = 32 tic = time.time() scores = model.predict(images, batch_size=batch_size) toc = time.time() print( f'running prediction with batch_size={batch_size} took {toc-tic} seconds.'
def sum_of_rect(a, b, c, d): img = ImageGrab.grab((a, b, c, d)) grayImg = ImageOps.grayscale(img) arr = array(grayImg.getcolors()) return arr.sum()
def test_sanity(self): ImageOps.autocontrast(hopper("L")) ImageOps.autocontrast(hopper("RGB")) ImageOps.autocontrast(hopper("L"), cutoff=10) ImageOps.autocontrast(hopper("L"), ignore=[0, 255]) ImageOps.autocontrast_preserve(hopper("L")) ImageOps.autocontrast_preserve(hopper("RGB")) ImageOps.autocontrast_preserve(hopper("L"), cutoff=10) ImageOps.autocontrast_preserve(hopper("L"), ignore=[0, 255]) ImageOps.colorize(hopper("L"), (0, 0, 0), (255, 255, 255)) ImageOps.colorize(hopper("L"), "black", "white") ImageOps.crop(hopper("L"), 1) ImageOps.crop(hopper("RGB"), 1) ImageOps.deform(hopper("L"), self.deformer) ImageOps.deform(hopper("RGB"), self.deformer) ImageOps.equalize(hopper("L")) ImageOps.equalize(hopper("RGB")) ImageOps.expand(hopper("L"), 1) ImageOps.expand(hopper("RGB"), 1) ImageOps.expand(hopper("L"), 2, "blue") ImageOps.expand(hopper("RGB"), 2, "blue") ImageOps.fit(hopper("L"), (128, 128)) ImageOps.fit(hopper("RGB"), (128, 128)) ImageOps.flip(hopper("L")) ImageOps.flip(hopper("RGB")) ImageOps.grayscale(hopper("L")) ImageOps.grayscale(hopper("RGB")) ImageOps.invert(hopper("L")) ImageOps.invert(hopper("RGB")) ImageOps.mirror(hopper("L")) ImageOps.mirror(hopper("RGB")) ImageOps.posterize(hopper("L"), 4) ImageOps.posterize(hopper("RGB"), 4) ImageOps.solarize(hopper("L")) ImageOps.solarize(hopper("RGB"))