def enhance(pixbuf, brightness=1.0, contrast=1.0, saturation=1.0, sharpness=1.0, autocontrast=False): """Return a modified pixbuf from <pixbuf> where the enhancement operations corresponding to each argument has been performed. A value of 1.0 means no change. If <autocontrast> is True it overrides the <contrast> value, but only if the image mode is supported by ImageOps.autocontrast (i.e. it is L or RGB.) """ im = pixbuf_to_pil(pixbuf) if brightness != 1.0: im = ImageEnhance.Brightness(im).enhance(brightness) if autocontrast and im.mode in ('L', 'RGB'): im = ImageOps.autocontrast(im, cutoff=0.1) elif contrast != 1.0: im = ImageEnhance.Contrast(im).enhance(contrast) if saturation != 1.0: im = ImageEnhance.Color(im).enhance(saturation) if sharpness != 1.0: im = ImageEnhance.Sharpness(im).enhance(sharpness) return pil_to_pixbuf(im)
def composeForTwitter(self,images): print("composing for twitter") strip = Image.new('RGB', (self.twitterLayout["width"], self.twitterLayout["photoDim"]), (0,0,0)) count=0 dim=self.twitterLayout["photoDim"] #for inFile in glob.glob(os.path.join(imagedir, '*.JPG')): for img in images: if count>1: break #print("\t"+str(inFile)) #img=Image.open(inFile) posX=5 if count>0:posX=855 posY=dim/2 bbox=img.getbbox() img=img.crop(((bbox[2]/2)-(bbox[3]/2),0,(bbox[2]/2)+(bbox[3]/2),bbox[3])) img=img.resize((dim-10,dim-10)) #img = ImageOps.autocontrast(img, cutoff=2) if self.grey: img=ImageOps.grayscale(img) enh=ImageEnhance.Brightness(img) img=enh.enhance(0.8) enh=ImageEnhance.Contrast(img) img=enh.enhance(1.3) strip.paste(img,(posX,5)) count=count+1 overlay=self.twitterLayout["overlay"] strip.paste(overlay,None,overlay) #path=os.path.join(imagedir, 'twitterStrip.PNG') #path=self.saveImageToOutgoing(strip,"twitter") #dateString=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M') #path=os.path.join(self.outgoingPath,dateString+'_twitter.PNG') #strip.save(path, 'PNG') print("\n") return [strip,"twitter"]
#coding=utf-8 import Image, ImageEnhance pic = Image.open("../images/test2.jpg") #亮度增强 brightness = ImageEnhance.Brightness(pic) bright_pic = brightness.enhance(2.0) bright_pic.show() bright_pic.save("../images/test6.jpg") #图像尖锐化 sharpness = ImageEnhance.Sharpness(pic) sharp_pic = sharpness.enhance(5.0) sharp_pic.show() sharp_pic.save("../images/test7.jpg") #对比度增强 contrast = ImageEnhance.Contrast(pic) contrast_pic = contrast.enhance(3.0) contrast_pic.show() contrast_pic.save("../images/test8.jpg")
def update(self,frame,events): if self.running: for key,data in events.iteritems(): if key not in ('dt'): try: self.data[key] += data if (key == 'pupil_positions'): try: pupilDict = data[0] self.pupil_diameter.append(pupilDict.get('diameter')) except: print('error'); elif (key == 'gaze_positions'): try: gazeDict = data[0] self.gaze_point_x.append(gazeDict.get('norm_pos')[0]) self.gaze_point_y.append(gazeDict.get('norm_pos')[1]) self.gaze_point_z.append(gazeDict.get('gaze_point_3d')[2]) except: print('error') #image process (OCR) find the condion that show use OCR except KeyError: self.data[key] = [] self.data[key] += data print str(self.cnum) self.cnum = self.cnum + 1 if len(self.gaze_point_x) > 0 and self.cnum == 30: print('Enter TO the block') height = (1 - self.gaze_point_y[-1]) * 720 height = int(height) image = frame.img if (height < 100): image = frame.img[0:height+200, 300:1000] elif (height > 520): image = frame.img[height-100:720, 300:1000] else: image = frame.img[height-100:height+200, 300:1000] cv2.imwrite('/home/eyetracking/pupil/'+"frame%d.jpg" %self.cnum,image) image=Image.open('/home/eyetracking/pupil/'+"frame%d.jpg" %self.cnum) enhancer = ImageEnhance.Contrast(image) image = enhancer.enhance(4) totemp = sys.stdout sys.stdout = codecs.open('/home/eyetracking/pupil/ocr.txt', encoding='utf-8', mode='w+') #save the OCR print image_to_string(image) sys.stdout.close() sys.stdout = totemp try: i=300 for line in open('/home/eyetracking/pupil/ocr.txt'): i=i+20 cv2.putText(dst, line,(0,i), cv2.FONT_HERSHEY_COMPLEX,0.6,(0,0,255)) cv2.imshow("window",dst) cv2.waitKey(33) except Exception as error: print(error) self.timestamps.append(frame.timestamp) self.writer.write_video_frame(frame) self.frame_count += 1 # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100)) self.button.status_text = self.get_rec_time_str()
#fbase="/media/1A8F-0227/Landsat/LT50150282011282EDC00/LT50150282011282EDC00" #fbase="/cygdrive/d/Landsat/LT50190272011278EDC00/LT50190272011278EDC00" #fbase="/media/Acer/cygwin/home/Petey/data/landsat/LE70160282011169EDC00" #outfile="image2.pgm" fbase = sys.argv[1] outfile = sys.argv[2] factor = 1 fblue = fbase + "_B1.TIF" fgreen = fbase + "_B2.TIF" fred = fbase + "_B3.TIF" iblue = Image.open(fblue) contr = ImageEnhance.Contrast(iblue) iblue = contr.enhance(factor) igreen = Image.open(fgreen) contr = ImageEnhance.Contrast(igreen) igreen = contr.enhance(factor) ired = Image.open(fred) contr = ImageEnhance.Contrast(ired) ired = contr.enhance(factor) irgb = Image.merge("RGB", (ired, igreen, iblue)) #color=ImageEnhance.Color(irgb) #final=color.enhance(5) #final.save(outfile) irgb.save(outfile)
greenF = gl.glob( '/home/jghao/research/data/des_realimage/des-google/testrun_stripe82_fieldF_r/*.fits' ) redF = gl.glob( '/home/jghao/research/data/des_realimage/des-google/testrun_stripe82_fieldF_i/*.fits' ) blueF.sort() greenF.sort() redF.sort() scale = [0.025 / 8., 0.025 / 8., 0.05 / 8.] img = colorImg(redF[i], greenF[i], blueF[i], scale=[0.025 / 8., 0.025 / 8., 0.05 / 8.]) ehImg = ImageEnhance.Contrast(img) newImg = ehImg.enhance(1.2) newImg.crop((5, 10, 2044, 4090)) newImg.save('descolor.png', format='PNG') #img.save('/home/jghao/research/data/des_realimage/des-google/testrun_stripe82_color/'+blueF[i][-30:-21]+'.PNG',format="PNG") #-----sdss image ---- redF = 'fpC-100006-i1-0062.fit.gz' greenF = 'fpC-100006-r1-0062.fit.gz' blueF = 'fpC-100006-g1-0062.fit.gz' scale = [0.025, 0.025, 0.045] img = colorImg(redF, greenF, blueF, scale=[0.025, 0.025, 0.045]) #---des image ---
def __call__(self, im): contrast = ImageEnhance.Contrast(im) im = contrast.enhance(np.random.normal(1, self.sigma)) return im
def visualize_image(self, importances, img_name, image_title, vmin, vmax): """Render the visualization for a single image and save it to a file. Args: self: The Visualization instance to use. importances: Array of importance values. img_name: Name of imagefile to visualize. image_title: Title that gets displayed above the visualization. vmin: Importances with an absolute value below vmin won't get rendered. vmax: Importances with an absolute value above vmax won't get rendered. """ img_path = os.path.join(self.datamanager.PATHS["IMG"], img_name) if not os.path.isfile(img_path): return img = Image.open(os.path.join(self.datamanager.PATHS["IMG"], img_name)) # convert image to grayscale if img.mode != "L": grayscale = img.convert('L').convert('RGB') else: img = img.convert('RGB') grayscale = img # reduce contrast of grayscale image contrast = ImageEnhance.Contrast(grayscale) grayscale = contrast.enhance(0.5) heatmap = self.heatmap_data(img_name, importances, (img.size[1], img.size[0])) has_negative = (heatmap < 0.0).any() # is this an image with negative weights? pos_heat = np.ma.masked_less(heatmap, vmin) if has_negative: pos_colormap = cm.Reds neg_heat = np.ma.masked_greater(heatmap, -vmin) neg_colormap = self.reverse_color_map(cm.Blues) else: pos_colormap = cm.Reds # create visualization gridspec = GridSpec(2, 2, height_ratios=[1,3]) # original thumbnail img_subplot = pl.subplot(gridspec[0, 0]) img_subplot.axis('off') img_subplot.imshow(img) # heatmap thumbnail heat_subplot = pl.subplot(gridspec[0, 1]) heat_subplot.axis('off') axes_heat = heat_subplot.imshow(pos_heat, cmap=pos_colormap, alpha=1.0, vmin=vmin, vmax=vmax) ticks = np.linspace(vmin, vmax, 10) if has_negative: axes_neg = heat_subplot.imshow(neg_heat, cmap=neg_colormap, alpha=1.0, vmin=-vmax, vmax=-vmin) # combined subplot of grayscale image and heatmap combined_subplot = pl.subplot(gridspec[1, :]) combined_subplot.axis('off') combined_subplot.imshow(grayscale) combined_subplot.imshow(pos_heat, cmap=pos_colormap, alpha=0.55) divider = make_axes_locatable(combined_subplot) cax = divider.append_axes("right", size=0.25, pad=0.33) pl.colorbar(axes_heat, cax=cax, ticks=ticks) if has_negative: combined_subplot.imshow(neg_heat, cmap=neg_colormap, alpha=0.55) cax = divider.append_axes("left", size=0.25, pad=0.33) neg_cb = pl.colorbar(axes_neg, cax=cax, ticks=-ticks) neg_cb.ax.yaxis.set_ticks_position("left") # save figure fig_path = os.path.join(self.datamanager.PATHS["RESULTS"], "_".join(image_title.lower().split()[0:2])) if not os.path.isdir(fig_path): os.makedirs(fig_path) pl.suptitle(image_title) pl.savefig(os.path.join(fig_path, img_name + ".png"), format="png") pl.clf()
def enhance(eimage): enh = ImageEnhance.Contrast(eimage) enh.enhance(1.3).show("30% more contrast")
from pytesser import * import ImageEnhance image = Image.open('C:/Users/Administrator/Desktop/snap.png') enhancer = ImageEnhance.Contrast(image) image_enhancer = enhancer.enhance(4) str = image_to_string(image_enhancer) print(str)
elif y + linewidth/2 >= im.shape[1]: im[:, y:im.shape[1]] = color else: im[:, (y-linewidth/2):(y+linewidth/2)] = color return im if __name__ == '__main__': im1 = cv2.cvtColor(cv2.imread(sys.argv[1]), cv2.COLOR_BGR2RGB) SIZE_X = 740 SIZE_Y = 880 # matrix = perspectiveTransform([(628, 163), (1666, 147), (2716, 128), \ # (632, 1237), (1675, 1266), (2734, 1219), \ # (632, 2346), (1683, 2337), (2745, 2341)], \ # [(0, 0), (SIZE/2, 0), (SIZE, 0), \ # (0, SIZE/2), (SIZE/2, SIZE/2), (SIZE, SIZE/2), \ # (0, SIZE), (SIZE/2, SIZE), (SIZE, SIZE)]) # (-3, -6) matrix = perspectiveTransform([(51, 18), (734, 42), \ (9, 878), (745, 871)], \ [(0, 0), (SIZE_X, 0), \ (0, SIZE_Y), (SIZE_X, SIZE_Y)]) #im2 = cv2.warpPerspective(im1, linalg.inv(matrix), (800, 800)) im2 = cv2.warpPerspective(im1, matrix, (SIZE_X, SIZE_Y)) #im2 = drawLines(im2, linewidth=6, interval=SIZE/10, color=np.array([127,255,127])) im_pil = Image.fromarray(im2) im_pil = ImageEnhance.Contrast(im_pil).enhance(1.5) im_pil = ImageEnhance.Brightness(im_pil).enhance(1.1) # Rotate # im_pil.show() im_pil.save('armyCard.png')
def application(environ,start_response): request = environ['QUERY_STRING'] left, right, top, bottom, background, print_type = request.split(';', 6) if print_type == 'big' : zoom= 14 # 35 tiles else : zoom= 15 # 63 tiles left=float(left) right=float(right) top=float(top) bottom=float(bottom) # we print only pistes, here db='pistes-mapnik' conn = psycopg2.connect("dbname="+db+" user=mapnik") cur = conn.cursor() sql=" SELECT count(*) FROM planet_osm_line WHERE st_intersects(planet_osm_line.way,st_transform( ST_MakeEnvelope("+str(left)+","+ str(bottom)+","+ str(right)+","+ str(top)+", 4326),900913));" cur.execute(" \ SELECT count(*) \ FROM planet_osm_line WHERE \ st_intersects(\ planet_osm_line.way,\ st_transform( ST_MakeEnvelope(%s,%s,%s,%s, 4326),900913)) and \"piste:type\" = 'nordic'; "\ , (left, bottom, right, top)) result=cur.fetchall() cur.close() conn.close() if result[0][0]==0L: response_body=str(result[0][0])+' pistes found' status = '200 OK' response_headers = [('Content-Type', 'text/plain'),('Content-Length', str(len(response_body)))] start_response(status, response_headers) return [response_body] #~ if background == 'osm': #~ url='http://tile.openstreetmap.org/' #~ ext='png' #~ else : #~ url= 'http://otile1.mqcdn.com/tiles/1.0.0/osm/' #~ ext='jpg' url='http://tile.openstreetmap.org/' ext='png' bg = getImage(url, left, right, top, bottom, zoom,'osm',ext) enhancer = ImageEnhance.Brightness(bg) bg = enhancer.enhance(1.2) enhancer = ImageEnhance.Contrast(bg) bg = enhancer.enhance(0.8) contours = getImage('http://tiles.pistes-nordiques.org/tiles-contours/', left, right, top, bottom, zoom,'osm',ext) r, g, b, a = contours.split() mask = Image.merge("L", (a,)) v=numpy.asarray(mask)*0.5 v=numpy.uint8(v) value=Image.fromarray(v) bg.paste(contours.convert('RGB'),None,value) hs = getImage('http://tiles2.pistes-nordiques.org/hillshading/', left, right, top, bottom, zoom,'tms',ext) r, g, b, a = hs.split() mask = Image.merge("L", (a,)) v=numpy.asarray(mask)*0.5 v=numpy.uint8(v) value=Image.fromarray(v) bg.paste(hs.convert('RGB'),None,value) pistes = getImage('http://tiles.pistes-nordiques.org/tiles-pistes2/', left, right, top, bottom, zoom,'osm',ext) r, g, b, a = pistes.split() mask = Image.merge("L", (a,)) bg.paste(pistes.convert('RGB'),None,mask) bg=bg.convert('RGB') cartouche= Image.open(os.path.dirname(__file__)+'/cartouche.png') cartouche.load() r, g, b, a = cartouche.split() mask = Image.merge("L", (a,)) v=numpy.asarray(mask) v=numpy.uint8(v) value=Image.fromarray(v) bg.paste(cartouche.convert('RGB'),(0,0),value) s = bg.size printout = Image.new('RGB', (s[0]+40, s[1]+40), 'white') printout.paste(bg,(20,20)) randFilename = random.randrange(0, 100001, 2) PIL_images_dir = '/var/www/tmp/' printout_filename = 'printout'+str(randFilename)+'.pdf' outname=PIL_images_dir + printout_filename #bg.save(outname,'png', ppi=pistes.info['ppi']) printout.save(outname,'PDF', resolution=200.0, quality=90) response_body='/tmp/'+printout_filename status = '200 OK' response_headers = [('Content-Type', 'text/plain'),('Content-Length', str(len(response_body)))] start_response(status, response_headers) return [response_body]
def contrast(image, factor): enh = ImageEnhance.Contrast(image) return enh.enhance(factor)
def processImageFile(im_file, out_file): im = Image.open(im_file) enh = ImageEnhance.Contrast(im) eim = enh.enhance(1.25) #.show("30% more contrast"); eim.save(out_file)
def contrast_img(img_name, new_img='new_img.jpg'): img = Image.open(img_name) contrast = ImageEnhance.Contrast(img) contrast_img = contrast.enhance(2.0) contrast_img.save(new_img)
def cont(imagefile, factor=10): im=Image.open(imagefile) enhancer = ImageEnhance.Contrast(im) enhancer.enhance(factor).show("Contrast %f" % factor) return
# Calculate the shaded relief shaded = np.sin(altitude * deg2rad) * np.sin(slope * deg2rad) \ + np.cos(altitude * deg2rad) * np.cos(slope * deg2rad) \ * np.cos((azimuth - 90.0) * deg2rad - aspect) shaded = shaded * 255 # Convert the numpy array back to an image relief = Image.fromarray(shaded).convert("L") # Smooth the image several times so it's not pixelated for i in range(10): relief = relief.filter(ImageFilter.SMOOTH_MORE) log.info("Creating map image") # Increase the hillshade contrast to make # it stand out more e = ImageEnhance.Contrast(relief) relief = e.enhance(2) # Crop the image to match the SRTM image. We lose # 2 pixels during the hillshade process base = Image.open(osm_img + ".jpg").crop((0, 0, w - 2, h - 2)) # Enhance base map contrast before blending e = ImageEnhance.Contrast(base) base = e.enhance(1) # Blend the the map and hillshade at 90% opacity topo = Image.blend(relief.convert("RGB"), base, .9) ## Draw the GPX tracks # Convert the coordinates to pixels points = [] for x, y in zip(lons, lats):
enhancer = ImageEnhance.Color(im) imGray = enhancer.enhance(0.).convert("L").resize( (options.res_size, options.res_size), Image.ANTIALIAS).filter(ImageFilter.MedianFilter(options.kernel_size)) print "done." print "applying threshold... ", hist = scipy.ndimage.filters.gaussian_filter1d(np.array( imGray.histogram()[100:200]), sigma=10) val = np.argmin(hist) + options.threshold contrast = ImageEnhance.Contrast(imGray) imGray = contrast.enhance(2.5) sharper = ImageEnhance.Sharpness(imGray) imGray = sharper.enhance(0.5) imGray.save("temp.bmp") im.convert("L").resize((options.res_size, options.res_size), Image.ANTIALIAS).save("temp2.bmp") Img = plt.imread("temp.bmp")[::-1] #Img -= val #Img = Img.clip(min=val) Img2 = plt.imread("temp2.bmp")[::-1] plt.imshow(Img, cmap='gray') plt.draw()
from PIL import Image import ImageEnhance im = Image.open("captcha.jpg") nx, ny = im.size im2 = im.resize((int(nx * 5), int(ny * 5)), Image.BICUBIC) im2.save("final_pic.png") enh = ImageEnhance.Contrast(im) enh.enhance(1.3).show("30% more contrast")
import ImageEnhance import urllib2 pcurl = 'http://www.pythonchallenge.com/pc/return/' img_url = 'http://www.pythonchallenge.com/pc/return/cave.jpg' request = urllib2.Request(img_url) base64string = base64.encodestring('{0}:{1}'.format('huge', 'file')).replace('\n', '') request.add_header("Authorization", "Basic {0}".format(base64string)) img = urllib2.urlopen(request) with open('cave.jpg', 'wb') as img_file: img_file.write(img.read()) image = Image.open('cave.jpg') for x in xrange(image.size[0]): for y in xrange(image.size[1]): if (x + y) % 2 != 0: image.putpixel((x, y), (0, 0, 0)) bright = ImageEnhance.Brightness(image) image = bright.enhance(2.5) contrast = ImageEnhance.Contrast(image) image = contrast.enhance(3.0) image.show() image.save('cave.png') # the image shows the word 'evil' print "The next url is {0}{1}.html".format(pcurl, 'evil')
toint=False) kss_red = kss #kss_hsv = rgb_to_hsv(kss) #kss_hsv[:,:,0] = 0/360. #kss_red = hsv_to_rgb(kss_hsv) print "Downsample: ", downsample, " size: ", size, " style: ", txt print "Memory Check (ps): ", get_mem() / 1024.**3 redblueorange = kss_red + h2s_orange + fes_blue redblueorange[redblueorange > 1] = 1 im = PIL.Image.fromarray( (redblueorange * 255).astype('uint8')[::-1, :]) im.save(prefix + 'Trapezium_GEMS_mosaic_redblueorange_%s%s.png' % (txt, size)) im = ImageEnhance.Contrast(im).enhance(1.5) im.save(prefix + 'Trapezium_GEMS_mosaic_redblueorange_%s%s_contrast.png' % (txt, size)) im = ImageEnhance.Brightness(im).enhance(1.5) im.save( prefix + 'Trapezium_GEMS_mosaic_redblueorange_%s%s_contrast_bright.png' % (txt, size)) print "Downsample: ", downsample, " size: ", size, " style: ", txt print "Memory Check (ps): ", get_mem() / 1024.**3 output = prefix + 'Trapezium_GEMS_mosaic_redblueorange_%s%s.png' % ( txt, size) avm.embed(output, output)
# -*- coding:utf-8 -*- import Image import ImageEnhance import ImageFilter import sys image_name = "./testpic/1.jpg" # 去除干扰点 im = Image.open(image_name) im = im.filter(ImageFilter.MedianFilter()) enchancer = ImageEnhance.Contrast(im) im = im.convert('1') im.show() s = 12 #启始 切割点 x t = 2 #启始 切割点 y w = 10 #切割 宽 +y h = 15 #切割 长 +x im_new = [] for i in range(4): #验证码切割 im1 = im.crop((s + w * i + i * 2, t, s + w * (i + 1) + i * 2, h)) im_new.append(im1) im_new[0].show() #测试查看 xsize, ysize = im_new[0].size
def _contrast_fired(self): print "Contrast enhancing the image!" im = Image.open("image.jpg") im_contrast_enhanced = ImageEnhance.Contrast(im).enhance(2) im_contrast_enhanced.save("image.jpg", "JPEG") self.display_image()
print s if do_view == True: ratio = float(MAXWIDTH) / s[0] print ratio wi = s[0] * ratio print wi hi = s[1] * ratio print int(hi) im.thumbnail((int(wi), int(hi)), Image.ANTIALIAS) brightness = 1.15 enhancer = ImageEnhance.Brightness(im) bright = enhancer.enhance(brightness) contrast = 1.7 enhancer = ImageEnhance.Contrast(bright) im = enhancer.enhance(contrast) #im = ImageOps.autocontrast(im) im.save(outfile, "JPEG") except IOError: print "cannot create thumbnail for '%s'" % zipdest #fm.unzipFile(ziploc, location_to+"/"+os.path.splitext(zip)[0]) print "done!" print fm.list_dir(location) sys.exit(0) for infile in sys.argv[1:]: outfile = os.path.splitext(infile)[0] + ".thumbnail"
def enhance(image, factor): image = ImageEnhance.Color(image).enhance(factor / 5) image = ImageEnhance.Contrast(image).enhance(factor) return image
def ocr_word(mouse_x, mouse_y): # Return None if occur unsupported language. src_lang = setting_config.get_translate_config("src_lang") if not LANGUAGE_OCR_DICT.has_key(src_lang): show_message(_("Sorry, select-and-translate does not support %s yet") % _(src_lang), _("Cancel"), _("Ok, I understand"), lambda : ocr_log(src_lang)) return None # Return None if found any ocr package need install before continue. ocr_pkg_name = LANGUAGE_OCR_DICT[src_lang] pkg_names = get_install_packages([ocr_pkg_name]) if len(pkg_names): show_message(_("An OCR package is required to enable word recognition"), _("Cancel"), _("Install"), lambda : install_packages(pkg_names)) return None # Return None if mouse at trayicon area. if constant.TRAYAREA_TOP < mouse_y < constant.TRAYAREA_BOTTOM: return None # Ocr word under cursor. lang = ocr_pkg_name.split("tesseract-ocr-")[1].replace("-", "_") x = max(mouse_x - screenshot_width / 2, 0) y = max(mouse_y - screenshot_height / 2, 0) width = min(mouse_x + screenshot_width / 2, screen_width) - x height = min(mouse_y + screenshot_height / 2, screen_height) - y scale = 2 tool = pyocr.get_available_tools()[0] output_format = xcb.xproto.ImageFormat.ZPixmap plane_mask = 2**32 - 1 reply = conn.core.GetImage( output_format, root, x, y, width, height, plane_mask).reply() # Get screenshot image data. image_data = reply.data.buf() image = Image.frombuffer("RGBX", (width, height), image_data, "raw", "BGRX").convert("RGB") # First make image grey and scale bigger. image = image.convert("L").resize((width * scale, height * scale)) # image.save("old.png") # debug # Second enhance image with contrast and sharpness. image = ImageEnhance.Contrast(image).enhance(1.5) # I found uncomment below code have better result. ;) # image = ImageEnhance.Sharpness(image).enhance(2.0) # image.save("new.png") # debug word_boxes = tool.image_to_string( image, lang=lang, builder=pyocr.builders.WordBoxBuilder()) cursor_x = (mouse_x - x) * scale cursor_y = (mouse_y - y) * scale for word_box in word_boxes[::-1]: ((left_x, left_y), (right_x, right_y)) = word_box.position if (left_x <= cursor_x <= right_x and left_y <= cursor_y <= right_y): word = filter_punctuation(word_box.content) # Return None if ocr word is space string. if word.isspace(): return None else: return word return None
logexp=logii) rgb_small[:, :, 3] = 255 rgb_small_pil = rgb_small[:: -1, :, :] # reverse y axis because PIL is backwards #rgb_small_pil[np.max(rgb_small_pil,axis=2)>=255,:] = 255 #rgb_small_pil[:,:,3] = np.uint8(256)-rgb_small_pil[:,:,3] im = PIL.Image.fromarray(rgb_small_pil) print "Saving GEMS mosaic ", time.time() - t0 im.save(prefix + 'OrionBNKL_GEMS_mosaic_logexp%ii_small.png' % logii) print "Fin small image %i. t=%0.1f s" % (logii, time.time() - t0) kbackground = PIL.Image.new("RGB", im.size, (0, 0, 0)) kbackground.paste(im, mask=im.split()[3]) print "Saving GEMS mosaic with black bg ", time.time() - t0 #kbackground.save(prefix+'OrionBNKL_GEMS_mosaic_logexp%ii_blackbg_small.png' % (logii)) kbackground_contrast = ImageEnhance.Contrast(kbackground).enhance(1.5) kbackground_contrast.save( prefix + 'OrionBNKL_GEMS_mosaic_logexp%ii_blackbg_contrast_small.png' % (logii)) kbackground_bright = ImageEnhance.Brightness(kbackground_contrast).enhance( 1.5) kbackground_bright.save( prefix + 'OrionBNKL_GEMS_mosaic_logexp%ii_blackbg_contrast_bright_small.png' % (logii)) for logii in xrange(0, 2): print "Making large image %i. t=%0.1f s" % (logii, time.time() - t0) rgb_large = np.ones([myshape[0], myshape[1], 4], dtype='uint8') rgb_large[:, :, 1] = logscale(ks, xmin=ksmin, xmax=ksmax, logexp=logii) rgb_large[:, :, 0] = logscale(h2, xmin=h2min, xmax=h2max, logexp=logii)
def brightness(self, windowCenter): enhancer = ImageEnhance.Contrast(self.img) self.img = enhancer.enhance(float(windowCenter))
def other(im): #out=im.rotate(45) #out=im.filter(ImageFilter.DETAIL) out = ImageEnhance.Contrast(im) out.enhance(1.5).show('50% more contrast')
#coding:utf-8 import Image import pytesseract import ImageEnhance img = Image.open('11.png') img = img.convert('RGBA') img = img.convert('L') img.save('end_9.png') sharpness = ImageEnhance.Sharpness(img) # Sharpened img = sharpness.enhance(7.0) img.save('end_0.png') #img = ImageEnhance.Color(img) # Black and white #img = img.enhance(0) img.save('end_1.png') #img = ImageEnhance.Brightness(img) # Increase brightness #img = img.enhance(3) img.save('end_2.png') img = ImageEnhance.Contrast(img) # High contrast img = img.enhance(8) img.save('end_3.png') #img = ImageEnhance.Color(img) #img = img.convert('L')