def Contraste(caminho, name): img = cv2.imread(caminho) cv2.imwrite('Imagens/contraste.jpg', img) img2 = cv2.imread(caminho) img2 = (255 - img2) cv2.imwrite('Imagens/contraste2.jpg', img2) #im2 = Image.open("Imagens/contraste.jpg") #abre a imagem alvo #enb = ImageEnhance.Brightness(im2) #enb.enhance(2.2).save("Imagens/contraste.jpg", quality=100) #contraste de 750%, salva a imagem im3 = Image.open("Imagens/contraste.jpg") #abre a imagem alvo enh2 = ImageEnhance.Contrast(im3) enh2.enhance(10.5).save("Imagens/contraste.jpg", quality=100) #contraste de 750%, salva a imagem img = cv2.imread("Imagens/contraste.jpg") #abre a imagem salva cinza = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #converte em tons de cinza cv2.imwrite("Imagens/" + name + "/" + str(name) + ".jpg", cinza) #salva em tons de cinza #im2 = Image.open("Imagens/contraste2.jpg") #abre a imagem alvo #enb = ImageEnhance.Brightness(im2) #enb.enhance(2.2).save("Imagens/contraste2.jpg", quality=100) #contraste de 750%, salva a imagem im3 = Image.open("Imagens/contraste2.jpg") #abre a imagem alvo enh2 = ImageEnhance.Contrast(im3) enh2.enhance(10.5).save("Imagens/contraste2.jpg", quality=100) #contraste de 750%, salva a imagem img = cv2.imread("Imagens/contraste2.jpg") #abre a imagem salva cinza = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #converte em tons de cinza cv2.imwrite("Imagens/" + name + "/" + str(name) + "2.jpg", cinza) #salva em tons de cinza
def __init__(self, **kwargs): kwargs.setdefault('filename', os.path.join('..', 'photoo', 'photo1.jpg')) if kwargs.get('filename') is None: raise Exception('No filename given to MTScatterImage') kwargs.setdefault('loader', None) super(ImageScatter, self).__init__(**kwargs) self.touch_positions = {} self.pim = Image.open(kwargs.get('filename')) self.contrast_enh = ImageEnhance.Contrast(self.pim) self.pim = self.contrast_enh.enhance(1.0) self.bright_enh = ImageEnhance.Brightness(self.pim) self.pim = self.bright_enh.enhance(1.0) self.color_enh = ImageEnhance.Color(self.pim) self.pim = self.color_enh.enhance(1.0) self.sharp_enh = ImageEnhance.Sharpness(self.pim) self.pim = self.sharp_enh.enhance(1.0) self.bdata = self.pim.tostring() self.img = ImageData(self.pim.size[0], self.pim.size[1], 'RGB', self.bdata, pitch=-self.pim.size[0] * 3) self.image = pyglet.sprite.Sprite(self.img) self.width = self.pim.size[0] self.height = self.pim.size[1]
def CodeFilter(fromfile='code.gif', tofile='code_temp.png'): ''' pre-process the image. ''' import Image,ImageEnhance,ImageFilter, ImageDraw import sys #image_name = "D:\\SUN\\HOME\\python\\guahao\\code.gif" image_name = 'code.gif' #image_name = "D:\\SUN\\HOME\\python\\guahao\\code.png" im = Image.open(image_name) #print im.format, im.size, im.mode #im.show() #print im.info #print im.palette im_new = im.convert('L') #im_new.show() #im_new.save('dd.png') # ɾ³ý±ß¿ò draw = ImageDraw.Draw(im_new) draw.line((0, 0, im.size[0], 0), fill=255) draw.line((0, 0, 0, im.size[1]), fill=255) draw.line((0, im.size[1]-1, im.size[0], im.size[1]-1), fill=255) draw.line((im.size[0]-1, 0, im.size[0]-1, im.size[1]-1), fill=255) enhancer = ImageEnhance.Brightness(im_new) im_new = enhancer.enhance(2.0) #¼ÓÁÁ£¬Ð§¹û¼ûͼ1 enhancer = ImageEnhance.Contrast(im_new) im_new = enhancer.enhance(4) #Ìá¸ß¶Ô±È¶È£¬Ð§¹û¼ûͼ2 im_new.convert('1') im_new.filter(ImageFilter.MedianFilter) im_new.save(tofile) return im_new
def composeForFacebook(self,images): print("composing for facebook") strip = Image.new('RGB', (self.facebookLayout["width"], self.facebookLayout["width"]), (0,0,0)) count=0 dim=self.facebookLayout["photoDim"] positions={0:[5,5],1:[dim,5],2:[5,dim],3:[dim,dim]} #for inFile in glob.glob(os.path.join(imageDir, '*.JPG')): for img in images: if count>3:break # print("\t"+str(inFile)) #img=Image.open(inFile) posX,posY=positions[count] bbox=img.getbbox() img=img.crop(((bbox[2]/2)-(bbox[3]/2),0,(bbox[2]/2)+(bbox[3]/2),bbox[3])) img=img.resize((dim-10,dim-10)) #img = ImageOps.autocontrast(img, cutoff=2) if self.grey: img=ImageOps.grayscale(img) enh=ImageEnhance.Brightness(img) img=enh.enhance(0.8) enh=ImageEnhance.Contrast(img) img=enh.enhance(1.3) strip.paste(img,(posX,posY)) count=count+1 overlay=self.facebookLayout["overlay"] strip.paste(overlay,None,overlay) #path=os.path.join(imageDir, 'facebookStrip.PNG') #path=self.saveImageToOutgoing(strip,"facebook") #dateString=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M') #path=os.path.join(self.outgoingPath,dateString+'_facebook.PNG') #strip.save(path, 'PNG') print("\n") return [strip,"facebook"]
def setup(self): for st in self._imgs: w, h = self.pos['width'], self.pos['height'] x = (w - self._img_source.size[0]) / 2 y = (h - self._img_source.size[1]) / 2 buttonbg = image.hex_to_rgb(self.frame.colors[st]['buttonbg']) buttonfg = image.hex_to_rgb(self.frame.colors[st]['buttonfg']) # Create the "base" image normal = Image.new('RGBA', (w, h), color=buttonbg) normal.paste(self._img_source, box=(x, y), mask=self._img_source) imgd = ImageDraw.Draw(normal) imgd.bitmap((x, y), self._img_source, fill=buttonfg) # Make other two states before beveling hover = normal.copy() click = normal.copy() # Now add effects that differentiate the states rendering.bevel_up(normal) bright = ImageEnhance.Brightness(click) click = bright.enhance(1.2) rendering.bevel_down(click) bright = ImageEnhance.Brightness(hover) hover = bright.enhance(1.2) rendering.bevel_up(hover) self._imgs[st]['normal'] = image.get_data(normal) self._imgs[st]['click'] = image.get_data(click) self._imgs[st]['hover'] = image.get_data(hover)
def equalize(self): if self.image1 is not None: contr = ImageEnhance.Contrast(self.image1) image = contr.enhance(self.contrast) bright = ImageEnhance.Brightness(image) image = bright.enhance(self.brightness) image = image.resize((int(image.size[0] * self.scale), int(image.size[1] * self.scale))) self.wif = image.convert('1')
def equalize(image1, contrast, brightness): contr = ImageEnhance.Contrast(image1) image = contr.enhance(contrast) bright = ImageEnhance.Brightness(image) image = bright.enhance(brightness) image = image.resize((int(image.size[0]), int(image.size[1]))) out = image.convert('1') return out
def myEqualize(im, contrast=1, brightness=1): if im is not None: im = im.convert('L') contr = ImageEnhance.Contrast(im) im = contr.enhance(contrast) bright = ImageEnhance.Brightness(im) im = bright.enhance(brightness) #im.show() return im
def adjustImage(infile): im = Image.open(infile) for f in range(40, 20, -1): factor = f / 20.0 outfile = os.path.splitext(infile)[0] + ('-%f.png' % factor) enhancer = ImageEnhance.Brightness(im) tmp = enhancer.enhance(factor) enhancer = ImageEnhance.Contrast(tmp) tmp = enhancer.enhance(1.15) enhancer = ImageEnhance.Color(tmp) enhancer.enhance(1.15).save(outfile)
def from_filename(name, width=None, brightness=None, contrast=None): '''Open an image file and return a string of its ASCII Art.''' image = Image.open(name) if width is not None: scale = float(width) / image.size[0] else: scale = 1 if contrast is not None: image = ImageEnhance.Contrast(image).enhance(contrast) if brightness is not None: image = ImageEnhance.Brightness(image).enhance(brightness) return ''.join(generate_art(image, int(image.size[0] * scale), int(image.size[1] * scale)))
def get_code_str_from_image_qq(image_name): im = Image.open(image_name) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(3) filter_line(im) codestr = image_to_string(im) return codestr
def add_watermark_text(im, text, angle=23, opacity=0.25, font='/Library/Fonts/Copperplate.ttc'): if im.mode != 'RGBA': im = im.convert('RGBA') watermark = Image.new('RGBA', im.size, (0, 0, 0, 0)) size = 2 n_font = ImageFont.truetype(font, size) n_width, n_height = n_font.getsize(text) # grow size until the limit is reached while n_width + n_height < watermark.size[0]: size += 2 n_font = ImageFont.truetype(font, size) n_width, n_height = n_font.getsize(text) draw = ImageDraw.Draw(watermark, 'RGBA') draw.text(((watermark.size[0] - n_width) / 2, (watermark.size[1] - n_height) / 2), text, (0, 0, 0), font=n_font) watermark = watermark.rotate(angle, Image.BICUBIC) alpha = watermark.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(opacity) watermark.putalpha(alpha) return Image.composite(watermark, im, watermark)
def Pic_Reg(image_name=None): im = Image.open(image_name) im = im.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(2) im = im.convert('1') im.show() #all by pixel s = 12 #start postion of first number w = 10 #width of each number h = 15 #end postion from top t = 2 #start postion of top im_new = [] #split four numbers in the picture for i in range(4): im1 = im.crop((s+w*i+i*2,t,s+w*(i+1)+i*2,h)) im_new.append(im1) s = "" for k in range(4): l = [] #im_new[k].show() for i in range(13): for j in range(10): if (im_new[k].getpixel((j,i)) == 255): l.append(0) else: l.append(1) s+=str(Get_Num(l)) return s
def ehance_picture(img): try: with open_image(img) as image: new_img = ImageEnhance.Contrast(image.im) new_img.enhance(1.8).show("30% more contrast") except Exception as e: print e
def image_manipulate(image_url): file_path, file_name = os.path.split(image_url) rotated_file_name = 'rotated_' + file_name resized_file_name = 'resized_' + file_name contrasted_file_name = 'contrasted' + file_name rotated_file_path = os.path.join(file_path, rotated_file_name) resized_file_path = os.path.join(file_path, resized_file_name) contrasted_file_path = os.path.join(file_path, contrasted_file_name) try: image = Image.open(image_url) print('Image type: {}'.format(image.format)) enh = ImageEnhance.Contrast(image) contrasted_image = enh.enhance(1.5) image_size = image.size doubled_size = double_size(image_size) resized_image = image.resize(doubled_size) rotated_image = image.rotate(45) rotated_image.save(rotated_file_path, image.format) resized_image.save(resized_file_path, image.format) contrasted_image.save(contrasted_file_path, image.format) except IOError as e: print('Can\'t manipulate with image: {}'.format(e))
def main(): Im = Image.open(image_name) print Im.mode, Im.size, Im.format # 1.GrayScale Im = Image.open(image_name).convert('L') #Convert to grayscale Im.save('g_' + image_name) # 2. Filter noise #filterNoise(Im,70,2,4) #slightly [1] //Prefered Value: G = 50,N = 4,Z = 4 #Im = Im.filter(ImageFilter.MedianFilter()) #//Dis: Slow Im = Im.point(table, '1') #strongly //Ad: Fast Dis: Over Filter Im.save('b_' + image_name) # 3. Enhance contrast enhancer = ImageEnhance.Contrast( Im) #Comment this while use Strongly Filter #Im = enhancer.enhance(1.5) #slightly [2] #Im.save('En_'+image_name) # 4. 倾斜矫正技术。 # Output Im.save('Treated.jpg') text = pytesseract.image_to_string(Im, 'chi_sim') print text print "_________________Original Text___________________" processed_string = string_process(text) print simplejson.dumps(processed_string, encoding="UTF-8", ensure_ascii=False) print "_________________Processed Text___________________"
def adjustImage(infile, factor): outfile = os.path.splitext(infile)[0] + '-updated.png' im = Image.open(infile) width, height = im.size print im.size, im.mode enhancer = ImageEnhance.Brightness(im) enhancer.enhance(factor).save(outfile)
def split_pic_save(image_name): im = Image.open(image_name) im = im.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(2) im = im.convert('1') #im.show() #all by pixel s = 4 #start postion of first number w = 7 #width of each number h = 15 #end postion from top im_new = [] #split four numbers in the picture for i in range(4): im1 = im.crop((s + w * i, 0, s + w * (i + 1), 15)) im_new.append(im1) f = file("data.txt", "a") for k in range(4): l = [] #im_new[k].show() for i in range(15): for j in range(7): if (im_new[k].getpixel((j, i)) == 255): l.append(0) else: l.append(1) f.write("l=[") n = 0 for i in l: if (n % 10 == 0): f.write("\n") f.write(str(i) + ",") n += 1 f.write("]\n")
def HighlightRects(self, pngfile, highlightrects): # large libraries. load only if necessary import Image, ImageDraw, ImageEnhance, ImageChops dkpercent = 70 p1 = Image.new("RGB", (500, 500)) ff = StringIO() pfp = Image.open(pngfile) swid, shig = pfp.getbbox()[2:] dpfp = ImageEnhance.Brightness(pfp).enhance(dkpercent / 100.0) ddpfp = ImageDraw.Draw(dpfp) for highlightrect in highlightrects: mrect = re.match("rect_(\d+),(\d+)_(\d+),(\d+)$", highlightrect) if mrect: rect = (int(mrect.group(1)), int(mrect.group(2)), int(mrect.group(3)), int(mrect.group(4))) srect = (rect[0] * swid / 1000, rect[1] * swid / 1000, rect[2] * swid / 1000, rect[3] * swid / 1000) ddpfp.rectangle(srect, (255, 255, 255)) cpfp = ImageChops.darker(pfp, dpfp) ff = StringIO() cpfp.save(ff, "png") return ff.getvalue()
def brig(imagefile,factor=10,brightfile=None): im=Image.open(imagefile) enhancer = ImageEnhance.Brightness(im) if brightfile==None: brightfile=imagefile[:-4]+"_br"+str(factor)+".png" else: brightfile=os.path.dirname(imagefile)+"/Enhanced_Original/"+os.path.basename(imagefile)[:-4]+"_br"+str(factor)+".png" enhancer.enhance(factor).save(brightfile) return
def run(self): ''' Limpia las imagenes empleando PIL ''' # notificar el inicio de operaciones self.notificador.inicio_operacion.emit() import Image, ImageChops, ImageOps, ImageEnhance for i in range(len(self.imagenesIn)): im = Image.open(self.imagenesIn[i]) if im: im1 = ImageChops.invert(im) im = ImageChops.subtract(im1, im) im = im.convert('L') im = ImageChops.invert(im) im = ImageEnhance.Brightness(im).enhance(2.0) im = ImageOps.colorize(im, (0, 0, 0), (255, 255, 255)) im.save(self.imagenesOut[i]) else: self.notificador.error.emit('Error al procesar la imagen: ' + self.imagenesIn[i]) return # notificar a la aplicacion el resultado de la operacion self.notificador.correcto.emit(self.plugin.nombre()) # notificar el fin de operaciones self.notificador.fin_operacion.emit() return
def pic_rect(content, x, y, w, h, max_length=120, min_length=70, quality=88): im = open_pic(content) try: #cannot less than min_length w = max(w, min_length) h = max(h, min_length) max_l = max(w, h) #crop a rect new = im.crop((x, y, x+w, y+h)) #zoom_out if max_l > max_length: ww = w hh = h # w > h if max_l > h: hh = hh * max_length // ww ww = max_length # h > w elif max_l > w: ww = ww * max_l // hh hh = max_length else: ww = hh = max_length new = new.resize((ww, hh), Image.ANTIALIAS) ie = ImageEnhance.Sharpness(new) new = ie.enhance(1.2) f = StringIO() new.save(f, 'JPEG', quality=quality) return f.getvalue() except IOError, e: if e.message == "cannot read interlaced PNG files": raise UnknownPictureError() else: raise
def ctpn(sess, net, image_name): timer = Timer() timer.tic() img = cv2.imread(image_name) img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) #将OPENCV图像转换为PIL图像, pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) #求图片清晰度 imageVar = cv2.Laplacian(img, cv2.CV_64F).var() if imageVar <= 5000: pil_img = ImageEnhance.Sharpness(pil_img).enhance(3.0) #将PIL图像转换为opencv图像 img = cv2.cvtColor(np.asarray(pil_img), cv2.COLOR_RGB2BGR) scores, boxes = test_ctpn(sess, net, img) textdetector = TextDetector() boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) draw_boxes(img, image_name, boxes, scale) timer.toc() print(('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
def watermark(self, img): import StringIO, Image, ImageEnhance tileImage = Image.open(StringIO.StringIO(img)) wmark = Image.open(self.watermarkimage) assert self.watermarkopacity >= 0 and self.watermarkopacity <= 1 if wmark.mode != 'RGBA': wmark = wmark.convert('RGBA') else: wmark = wmark.copy() alpha = wmark.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(self.watermarkopacity) wmark.putalpha(alpha) if tileImage.mode != 'RGBA': tileImage = tileImage.convert('RGBA') watermarkedImage = Image.new('RGBA', tileImage.size, (0, 0, 0, 0)) watermarkedImage.paste(wmark, (0, 0)) watermarkedImage = Image.composite(watermarkedImage, tileImage, watermarkedImage) buffer = StringIO.StringIO() if watermarkedImage.info.has_key('transparency'): watermarkedImage.save( buffer, self.extension, transparency=compositeImage.info['transparency']) else: watermarkedImage.save(buffer, self.extension) buffer.seek(0) return buffer.read()
def tesser_ocr(inputfile): im = Image.open(inputfile) if im.size[0] >= 500 or im.size[1] >= 100: scale = 1.0 else: scale = max( float(500) / float(im.size[0]), float(100) / float(im.size[1])) im_resized = im.resize((int(scale * im.size[0]), int(scale * im.size[1])), Image.ANTIALIAS) #将PIL图像转换为opencv图像 cv2_img = cv2.cvtColor(np.asarray(im_resized), cv2.COLOR_RGB2BGR) #求图片清晰度 imageVar = cv2.Laplacian(cv2_img, cv2.CV_64F).var() if imageVar <= 2000: im_resized = ImageEnhance.Sharpness(im_resized).enhance(3.0) im_gray = im_resized.convert("L") im_binary = binarizing(im_gray, 127) text = pytesseract.image_to_string(im_binary, lang="chi_sim+eng", config="-psm 7") return text
def get_face_encodings(path_to_image): #Equalise image using Histogram equalization in cv2 path_to_image = IE.avg_brightness(path_to_image) # Load image using scipy image = scipy.misc.imread(path_to_image) # Detect faces using the face detector detected_faces_using_SVM = face_detector(image, 1) #detected_faces_using_CNN = cnn_face_detector(image, 1) #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #detected_faces_using_Haar = haar_detector.detectMultiScale(gray, 1.8, 2) # Get pose/landmarks of those faces # Will be used as an input to the function that computes face encodings # This allows the neural network to be able to produce similar numbers for faces of the same people, regardless of camera angle and/or face positioning in the image shapes_faces = [ shape_predictor(image, face) for face in detected_faces_using_SVM ] #shapes_faces = [shape_predictor(image, dlib.rectangle(face.rect.left(),face.rect.top(),face.rect.right(),face.rect.bottom())) for face in detected_faces_using_CNN] #shapes_faces = [shape_predictor(image,dlib.rectangle(x,y,x+w,y+h)) for (x,y,w,h) in detected_faces_using_Haar] # For every face detected, compute the face encodings face_encoding = [ np.array( face_recognition_model.compute_face_descriptor( image, face_pose, 1)) for face_pose in shapes_faces ] return face_encoding
def reduce_opacity(mark, opacity): assert opacity >= 0 and opacity <= 1 mark = mark.convert('RGBA') if mark.mode != 'RGBA' else mark.copy() alpha = mark.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(opacity) mark.putalpha(alpha) return mark
def CrackCaptcha(imgpath): im = Image.open(imgpath) nx, ny = im.size im2 = im.resize((int(nx * 5), int(ny * 5)), Image.BICUBIC) im2.save("temp2.jpg") enh = ImageEnhance.Contrast(im) enh.enhance(5.0)
def split_pic(image_name): #调用getverify函数将图片做预处理 getverify(image_name) im = Image.open(image_name) im = im.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(2) im = im.convert('1') #im.show() #all by pixel s = 4 #start postion of first number w = 7 #width of each number h = 15 #end postion from top im_new = [] #split four numbers in the picture for i in range(4): im1 = im.crop((s + w * i, 0, s + w * (i + 1), 15)) im_new.append(im1) code_data = [] for k in range(4): l = [] for i in range(15): for j in range(7): if (im_new[k].getpixel((j, i)) == 255): l.append(0) else: l.append(1) code_data.append(l) return code_data
def getValicode(self): element = self.driver.find_element_by_id("change_cas") element.click() time.sleep(0.5) self.driver.get_screenshot_as_file("screenshot.png") img = IMG.open('screenshot.png') width = img.size[0] height = img.size[1] region = (int(width * 0.50699677), int(height * 0.52849162), int(width * 0.593110872), int(height * 0.57318436)) cropImg = img.crop(region) cropImg.save('1.png') image = IMG.open('1.png') enhancer = ImageEnhance.Contrast(image) image_enhancer = enhancer.enhance(2) valicode = image_to_string(image_enhancer) if len(valicode) == 0: return self.getValicode() else: pattern = re.compile(r'[0-9,a-z,A-Z]{4}') match = pattern.match(valicode) if match: print valicode return valicode else: return self.getValicode()