def __getitem__(self, index): img0_tuple = random.choice(self.imageFolderDataset.imgs) should_get_same_class = random.randint(0, 1) if should_get_same_class: while True: img1_tuple = random.choice(self.imageFolderDataset.imgs) if img0_tuple[1] == img1_tuple[1]: break else: while True: img1_tuple = random.choice(self.imageFolderDataset.imgs) if img0_tuple[1] != img1_tuple[1]: break ############# Read image path and convert to gray and white image ########### img0 = Image.open(img0_tuple[0]) img1 = Image.open(img1_tuple[0]) img0 = img0.convert("L") img1 = img1.convert("L") mean_0 = np.mean(img0) gamma_val_0 = math.log10(0.5) / math.log10( mean_0 / 255) # Formula for calculating gamma img0 = np.array(img0) fI = img0 / 255.0 Oc = np.power(fI, gamma_val_0) data = Oc * 255.0 img0 = Image.fromarray(data.astype('uint8')).convert('L') mean_1 = np.mean(img1) gamma_val_1 = math.log10(0.5) / math.log10( mean_1 / 255) # Formula for calculating gamma img1 = np.array(img1) fI = img1 / 255.0 Oc = np.power(fI, gamma_val_1) data = Oc * 255.0 img1 = Image.fromarray(data.astype('uint8')).convert('L') ############################### END ######################################### ######################Perform related image filtering#################### img0 = img0.filter(ImageFilter.DETAIL) img0 = img0.filter(ImageFilter.MedianFilter(size=3)) img1 = img1.filter(ImageFilter.DETAIL) img1 = img1.filter(ImageFilter.MedianFilter(size=3)) ############################### END #################################### if self.should_transform: img0 = self.transform(img0) img1 = self.transform(img1) else: img0 = img0 img1 = img1 return img0, img1, torch.from_numpy( np.array([int(img1_tuple[1] != img0_tuple[1])], dtype=np.float32))
def py_tesseract(message, mode): if mode == 'message': if message != None: if message.server != None: # PY TESSERACT url = None for attach in message.attachments: #log (attach) str_attach = str(attach) attach_replace = str_attach.replace('\'', '"') attachment_info = json.loads(attach_replace) url = attachment_info['url'] if url != None: data = requests.get(url).content image = Image.open(io.BytesIO(data)) try: image.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(image) image = enhancer.enhance(2) image.convert('1') except ValueError: pass image.filter(ImageFilter.SHARPEN) #image_id = attachment_info['id'] #filename = '{}.png'.format(image_id) text = pytesseract.image_to_string(image) return text return None return None elif mode == 'image': # SCANS A DIRECT IMAGE if message != None: data = requests.get(message).content image = Image.open(io.BytesIO(data)) try: image.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(image) image = enhancer.enhance(2) image.convert('1') except ValueError: pass image.filter(ImageFilter.SHARPEN) text = pytesseract.image_to_string(image) return text return None return None
def imgTransfer(img): im = img im = im.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(1) im = im.convert('L') return im
def get_sobel_histogram(image): # See 8-edge-detection/3.sobel grayscale_image = image.convert('L') median_filter = ImageFilter.MedianFilter(size=3) median_image = grayscale_image.filter(median_filter) sobel_x = (-1, 0, 1, -2, 0, 2, -1, 0, 1) sobel_kernel_x = ImageFilter.Kernel((3, 3), sobel_x, scale=1) sobel_image_x = median_image.filter(sobel_kernel_x) sobel_y = (1, 2, 1, 0, 0, 0, -1, -2, -1) sobel_kernel_y = ImageFilter.Kernel((3, 3), sobel_y, scale=1) sobel_image_y = median_image.filter(sobel_kernel_y) # Generate histograms for X and Y and normalize them sobel_histogram_x = normalize_histogram(sobel_image_x.histogram()) sobel_histogram_y = normalize_histogram(sobel_image_y.histogram()) # Concatenate into a single histogram sobel_histogram = sobel_histogram_x sobel_histogram.extend(sobel_histogram_y) sobel_histogram_path = save_histogram( sobel_histogram, 'sobel_' + os.path.basename(image.filename)) return sobel_histogram, sobel_histogram_path
def get_concat_v_blank(im1,im2,im3,pollutant_name,scale,number_of_lines=5,color=(255,255,255),filter_value=0): #séparation entre l'image de pollution et la légende separation_image=0.005 #ratio de la hauteur de la légende par rapport au logo ratio_legende_logo_h=0.15 #ratio de la largeur de la légende par rapport au logo ratio_legende_logo_l=0.1 font_type_number = ImageFont.truetype('arial.ttf',int(round(20*im1.height/800))) font_type_legend = ImageFont.truetype('arial.ttf',int(round(18*im1.height/800))) if(filter_value>0): im1=im1.filter(ImageFilter.MedianFilter(size=filter_value)) dst=Image.new('RGB',(max(im1.width,im2.width),im1.height + int(im1.height*separation_image) + im2.height),color) dst.paste(im1,(0,0)) dst.paste(im2,(0,im1.height+int(im1.height*separation_image))) dst.paste(im3,(int(im2.width*(1+ratio_legende_logo_l)),im1.height+int(im1.height*separation_image)+int(im2.height*ratio_legende_logo_h))) draw = ImageDraw.Draw(dst) draw.line((0,im1.height)+im1.size,width=int(im1.height*separation_image),fill=(0,0,0)) for i in range(0,number_of_lines): draw.text(xy=(int(im2.width*(1+ratio_legende_logo_l*0.3))+int(i*im3.width/(number_of_lines-1)),im1.height+int(im1.height*separation_image)+int(im2.height*ratio_legende_logo_h)+int(im3.height*1.05)),text=str(round(scale[0]+i*(scale[1]-scale[0])/(number_of_lines-1),1)),fill=(0,0,0),font=font_type_number) draw.text(xy=(int(im2.width*(1+ratio_legende_logo_l))+int(im3.width/(number_of_lines-1)),im1.height+int(im1.height*separation_image)+int(im2.height*ratio_legende_logo_h)+int(im3.height*1.9)),text=str("Concentration en "+str(pollutant)+" (µg/m3)"),fill=(0,0,0),font=font_type_legend) return dst
def img_to_numpy(img, threshold=115): img = img.convert('L') # 先转成灰度值 bin_img = img.point(configs.BIN_TABLE, '1') # 得到二值化后的黑白图片 # bin_img.show() fil_img = bin_img.filter(ImageFilter.MedianFilter(size=9)) # fil_img.show() return np.array(fil_img, dtype=np.int32)
def cartoonify_pt1(img): # applying median filter with 3x3 kernel med3 = img.filter(ImageFilter.MedianFilter(3)) # applying median filter with 5x5 kernel med5 = med3.filter(ImageFilter.MedianFilter(5)) # applying median filter with 5x5 kernel med7 = med5.filter(ImageFilter.MedianFilter(5)) # applying median filter with 7x7 kernel med3_2 = med7.filter(ImageFilter.MedianFilter(7)) # applying median filter with 5x5 kernel medFinal = med3_2.filter(ImageFilter.MedianFilter(5)) return medFinal
def image_clean(path): """ 清洗图片 :param path: 图片路径 :return img_end, filename: 清洗好的图片以及图片名称 """ # 读取图片 img = Image.open(path) # 将图片转换为灰度图 img_l = img.convert('L') # 将灰度图转换为数组形式 img_arr = np.array(img_l) # 求出该数组的平均值 means = img_arr.mean() # 调整平均值,作为清洗参数 if means > 227: means = means + 30 # print('means:', means) # 清洗灰度图中的“雀斑” img_clean = img_l.point(lambda i: i > means - 40, mode='1') # 使用中值滤波再次清洗 img_end = img_clean.filter(ImageFilter.MedianFilter(size=3)) # print(im_f.size) (160, 60) filename = img.filename.split('.')[1].rsplit('/')[-1] return img_end, filename
def __getitem__(self, idx): # input and target images inputName = os.path.join(self.rootDir, self.frame.iloc[idx, 0]) targetName = os.path.join(self.rootDir, self.frame.iloc[idx, 1]) # process and filtering the images # inputImage = Image.open(inputName).convert('RGB') S_THRESHOLD = self.thresh inputImage = Image.open(inputName).convert('L') median_filter = ImageFilter.MedianFilter(self.kernel) median_image = inputImage.filter(median_filter) inputImage = np.array(median_image) inputImage[inputImage >= S_THRESHOLD] = 255 inputImage = np.stack((inputImage, ) * 3, axis=-1) io.imsave(inputName.split('.jpg')[0] + '.jpeg', inputImage) # i, j, h, w = transforms.RandomCrop.get_params(inputImage, output_size=(256, 256)) # inputImage = transforms.functional.crop(inputImage, i, j, h, w) if self.transform is not None: seed = random.randint(0, 2**32) self._set_seed(seed) inputImage = self.transform(inputImage) targetImage = Image.open(targetName).convert('L') # targetImage = transforms.functional.crop(targetImage, i, j, h, w) if self.targetTransform is not None: self._set_seed(seed) targetImage = self.targetTransform(targetImage) # save_image(inputImage, 'inputimg3/inp{}.jpg'.format(idx)) # save_image(targetImage, 'inputimg3/inp{}.png'.format(idx)) return inputImage, targetImage
def process_img(im_obj): ''' :param im_obj: 图片对象 :return: 二值化后的数组 ''' img_array = numpy.array(im_obj) l0 = img_array.shape[0] l1 = img_array.shape[1] for i in range(l0): for j in range(l1): if 150 <= img_array[i][j][0] and 160 <= img_array[i][j][ 1] and 160 <= img_array[i][j][2]: img_array[i][j][0] = 255 img_array[i][j][1] = 255 img_array[i][j][2] = 255 img_obj2 = Image.fromarray(img_array) im_obj3 = img_obj2.filter(ImageFilter.MedianFilter()) # im_obj3.save(r'G:\PycharmProjects\machineLearning\imageCodeKNN\CT_jiangsu\imagesArray\0.jpg') # img_obj2.show() img_array2 = numpy.array(im_obj3) bin_list = [] for line in img_array2: sub_bin = [] for i in line: if 150 <= i[0] and 160 <= i[1] and 160 <= i[2]: sub_bin.append(0) else: sub_bin.append(1) bin_list.append(sub_bin) return numpy.array(bin_list)
def parse_answer_area(source_file, text_area_file, compress_level, crop_area, backup_file="text_area_2.png"): image = Image.open(source_file) if compress_level == 1: image1 = image.convert("L") image2 = image.convert("RGB") elif compress_level == 2: image = image.convert("1") width, height = image.size[0], image.size[1] print("屏幕宽度: {0}, 屏幕高度: {1}".format(width, height)) region1 = image1.crop((width * crop_area[0], height * crop_area[1], width * crop_area[2], height * crop_area[3])) region2 = image2.crop((width * crop_area[0], height * crop_area[1], width * crop_area[2], height * crop_area[3])) im = enhance_image(region2, 0.8, 1.3) im = im.filter(ImageFilter.DETAIL) im = im.filter(ImageFilter.MedianFilter(5)) im = im.filter(ImageFilter.FIND_EDGES) im = im.filter(ImageFilter.CONTOUR) im.save(backup_file) im2 = enhance_image(region1, 1.22, 1.2) im2.save(text_area_file)
def image_pre_process(img_name): img = Image.open(img_name) img = img.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(img) img = enhancer.enhance(1) img = img.convert('L') return img
def img_transfer(f_name): im = Image.open(f_name) im = im.convert('L') im = im.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(1) return im
def img_transfer(img): """ 图片转化,包括(边界拼接),滤波器,增强,(放大),灰度图转换,去噪,二值化 :param img:待处理图片 :return:处理后图片 """ # img = Image.open(img) # img.show() # img = img.rotate(10) w, h = img.size w *= 1.5 h *= 1.5 img = img.resize((int(w), int(h))) img = img.filter(ImageFilter.MedianFilter(1)) # 滤波器 img = ImageEnhance.Contrast(img).enhance(1.5) # 图像增强,enhance参数表示对比度 img = ImageEnhance.Sharpness(img).enhance(1.5) # 图片锐化 img = img.convert('L') # 灰度图转化 img = denoisy(img) img = binary(img, 200) # img = ImageEnhance.Sharpness(img).enhance(2) # print(img.size) # img = ImageEnhance.Sharpness(img).enhance(1.5) # img = ImageEnhance.Contrast(img).enhance(1.5) # 图像增强,enhance参数表示对比度 # img = denoisy(img) # img = binary(img, 200) # img.thumbnail(size) # 只支持按比例缩小 img_name = str(time.time()) + ".jpg" # img.save(img_name, mimetype='image/jpeg') # print(img.size) # img.show() return img
def _blow_up_image(): try: img = Image.open(args.image) except FileNotFoundError: print(colored("[!] I couldn't find a file by that name. Fake you!", 'red')) return False except OSError: print(colored("[!] {} is not an image file!".format(args.image), 'red')) return False basewidth = 2500 img = Image.open(args.image) wpercent = (basewidth / float(img.size[0])) hsize = int((float(img.size[1]) * float(wpercent))) # Resize happens here img = img.resize((basewidth, hsize), Image.ANTIALIAS) # Thanks Stack Overflow <3 : https://stackoverflow.com/a/37750605/5486120 img = img.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(img) img = enhancer.enhance(2) # Return the sexy image object return img
def __init__(self, folder, size): """ Arguments: folder: a string, the path to a folder with images. size: a tuple of integers (h, w). """ self.names = os.listdir(folder) self.folder = folder color_transforms = [ transforms.Lambda(lambda x: x.filter(ImageFilter.MedianFilter(3))), transforms.ColorJitter(brightness=0, contrast=0, saturation=2.0, hue=0.5), transforms.RandomGrayscale(p=0.3) ] self.transform = transforms.Compose([ transforms.Resize(size), transforms.RandomChoice(color_transforms), transforms.RandomHorizontalFlip(), transforms.ToTensor() ])
def ConvertAll(): WIDTH = 320 HEIGHT = 159 MAXCOUNT = 10 region = (0, 0, WIDTH, HEIGHT) pos = [0, 0, WIDTH, HEIGHT] count = 0 newim = Image.new("L", ((WIDTH) * MAXCOUNT, HEIGHT)) for f in os.listdir('jpgfiles'): #if f != '19.jpg': #continue if not f.endswith('.jpg'): continue oldfile = r'jpgfiles\%s' % f im = Image.open( oldfile) #.convert('L').point(lambda x: 255 if x > 127 else 0) if im.size != (320, 240): print oldfile continue im = im.crop(region) im = im.filter(ImageFilter.MedianFilter()) #enhancer = ImageEnhance.Contrast(im) #im = enhancer.enhance(2) im = ImageEnhance.Contrast(im).enhance(5) im = im.convert('L').point(lambda x: 255 if x > 127 else 0) newim.paste(im, pos) pos[0] += region[2] - region[0] pos[2] += region[2] - region[0] if (pos[0] >= newim.size[0]): #print pos, newim.size newfile = r'train\eng.xcar.exp%s.tif' % count count += 1 newim.save(newfile) pos = list(region) print newfile
def run(self): im = Image.open("1148.png") #im.show() #cv2.waitKey(0) #lab?? im = im.filter(ImageFilter.MedianFilter()) #cv2.waitKey(0) #lab?? enhancer = ImageEnhance.Contrast(im) #im.show() #cv2.waitKey(0) #lab?? im.save("1148.png".replace('.png', '_1.png')) #im.show() image = cv2.imread("1148_1.png") #cv2.imshow("Original",image) #cv2.waitKey(0) #lab?? labs = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) print len(labs) print len(labs[0]) print type(labs) data = [] #for row in labs: # for col in row: # data.append(col) print len(data) return data
def generate(model, _imageArray, _gpu = -1, _padding=50, _medianFilter=3): xp = np if _gpu < 0 else cuda.cupy start = time.time() image = _imageArray.astype(dtype=np.float32).transpose(2, 0, 1) print("Started transforming image") image = image.reshape((1,) + image.shape) if _padding > 0: image = np.pad(image, [[0, 0], [0, 0], [_padding, _padding], [_padding, _padding]], 'symmetric') image = xp.asarray(image) x = Variable(image) y = model(x) result = cuda.to_cpu(y.data) if _padding > 0: result = result[:, :, _padding:-_padding, _padding:-_padding] result = np.uint8(result[0].transpose((1, 2, 0))) if _medianFilter > 0: pilImage = Image.fromarray(result) pilImage = pilImage.filter(ImageFilter.MedianFilter(_medianFilter)) result = np.array(pilImage, dtype=np.uint8) print('Transforming took ', time.time() - start, 'sec') return result
def ocr_image(self, image, save): image_source = self._get_source(image) if not os.path.isfile(image_source): raise FileNotFoundException("File not found: " + str(image_source)) im = Image.open(image_source) im = im.convert("RGBA") new_im_data = [] img_data = im.getdata() for item in img_data: if item[0] < 112 or item[1] < 112 or item[2] < 112: new_im_data.append(item) else: new_im_data.append((255, 255, 255)) im.putdata(new_im_data) im = im.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(2) im = im.convert('1') output_folder = self._get_output_folder(image_source) temp_image = os.path.join(output_folder, "temp_.jpg") im.save(temp_image) # hungarian - hun # english - eng text = pytesseract.image_to_string( Image.open(temp_image), config= '-c tessedit_char_whitelist=0123456789abcdefghijklmnopqrstuvwxyz -psm 6', lang='eng') if not save: os.remove(temp_image) return " ".join(text.splitlines())
def imgTransfer(im): im = im.filter(ImageFilter.MedianFilter(1)) im = ImageEnhance.Contrast(im).enhance(1.5) im = im.convert('L') # 灰度图转换 im = denoising(im) # 图片去噪 imgc = binarizing(im, 200) # 图片二值化 return imgc
def img_transfer(im): im = im.convert('RGB').filter(ImageFilter.DETAIL) im = im.filter(ImageFilter.MedianFilter()) # 滤镜medianfilter是中值滤波器作用是减少噪声 im = im.convert('L') # convert图像模式转换转为L模式 灰度模式 return im
def filter(self, **kwargs) -> Image: if 'size' in kwargs: s = int(kwargs['size']) else: s = 3 return self.image.convert('RGB').filter( ImageFilter.MedianFilter(size=s))
def image_enhance(img): #set dpi to 600,600 image = Image.open(img) dir = os.getcwd() + "/enhanced_test" os.chdir(dir) #image.save('image_600.jpg',dpi=(600,600)) #image enhacement #image = Image.open('image_600.jpg') image = image.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Sharpness(image) factor = 2.0 image = enhancer.enhance(factor) #grey scale coloriser = ImageEnhance.Color(image) factor = 0.0 image = coloriser.enhance(factor) #contrast setting coloriser = ImageEnhance.Contrast(image) image.save('image_enhanced.jpg') code = image_to_string(Image.open('image_enhanced.jpg')) return code
def median_subtracting_img(img, filter_size, img_name, interm): median_img = img.filter(ImageFilter.MedianFilter(size=filter_size)) if interm == True: filename = 'Outputs/median_image/' + img_name + '.png' median_img.save(filename) np_median = np.asarray(median_img) xy = np.where(np_median[:, :, 0] == 0) np_enhance = np.array(img) e = np.array(xy) k = e.shape[1] if k == 3: for i in range(k): np_enhance[e[0, i], e[1, i], 0] = 0 np_enhance[e[0, i], e[1, i], 1] = 0 np_enhance[e[0, i], e[1, i], 2] = 0 else: np_enhance[e[0, 0], e[1, 0], 0] = 0 enhanced_changed_img = Image.fromarray(np_enhance) if interm == True: filename_e = 'Outputs/enhanced_change_image/' + img_name + '.png' enhanced_changed_img.save(filename_e) return enhanced_changed_img
def phenotype(self): pyautogui.PAUSE = 1 counter = 0 for i in self.__genoUnitCollection: i.phenotype() if counter % 5 == 0: # Grab screenshot and crop score area screenshot = ImageGrab.grab() scoreRectangle = (560, 650, 660, 680) cropped_rectangle = screenshot.crop(scoreRectangle) # Filter and enhance area to better recognize digits cropped_rectangle = cropped_rectangle.filter( ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(cropped_rectangle) cropped_rectangle = enhancer.enhance(2) cropped_rectangle = cropped_rectangle.convert('1') text = image_to_string(cropped_rectangle) # Remove non-digits from score all = string.maketrans('', '') nodigits = all.translate(all, string.digits) # Convert to Int and save as Fitness if text.translate(all, nodigits) == '': break self.__fitness = int(text.translate(all, nodigits)) counter += 1
def run(self): #fileName="test.jpg" fileName = self.fileName fileName_filter = fileName.replace(".jpg", "_1.jpg").replace(".png", "_1.png") im = Image.open(fileName) #im.show() #cv2.waitKey(0) #lab?? im = im.filter(ImageFilter.MedianFilter()) #cv2.waitKey(0) #lab?? enhancer = ImageEnhance.Contrast(im) #im.show() #cv2.waitKey(0) #lab?? im.save(fileName_filter) #im.show() image = cv2.imread(fileName_filter) #cv2.imshow("Original",image) #cv2.waitKey(0) #lab?? labs = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) print len(labs) print len(labs[0]) print type(labs) data = np.array([0, 0, 0]) st = "" for row in labs: for col in row: st = st + "%s\t%s\t%s\n" % (col[0], col[1], col[2]) #data.append(col) f = file("testData.txt", "w+") f.writelines(st) f.close() #print len(data) return st
def imageGrab(): boxques = (48, 370, 410, 430) boxop1 = (48, 505, 400, 535) boxop2 = (48, 595, 400, 625) boxop3 = (48, 688, 400, 715) imgques = ImageGrab.grab(boxques) imgques.save('imgques.jpeg') imgques = imgques.filter(ImageFilter.MedianFilter()) imgques = ImageEnhance.Contrast(imgques) imgop1 = ImageGrab.grab(boxop1) imgop1.save('imgop1.jpeg') imgop2 = ImageGrab.grab(boxop2) imgop2.save('imgop2.jpeg') imgop3 = ImageGrab.grab(boxop3) imgop3.save('imgop3.jpeg') #time.sleep(1) text_ques = pytesseract.image_to_string(Image.open('imgques.jpeg')) print(text_ques) text_op1 = pytesseract.image_to_string(Image.open('imgop1.jpeg')) print(text_op1) text_op2 = pytesseract.image_to_string(Image.open('imgop2.jpeg')) print(text_op2) text_op3 = pytesseract.image_to_string(Image.open('imgop3.jpeg')) print(text_op3) url = 'https://www.bing.com/search?q=' url += text_ques print(url) search(url)
def foto_crop(_foto): try: image = Image.open(_foto) except IOError: print "don't treat" #continue #image = image.resize((541, 538)) width = image.size[0] #Определяем ширину. height = image.size[1] #Определяем высоту. sum = Image.new('RGB', (width, width), (255, 255, 255)) # create a new white image if height >= width: #sum = Image.new( 'RGB', (width, width) , (255,255,255)) # create a new white image print u"height is higher" diff = height - width for i in xrange(width): for j in xrange(width): pixel = image.getpixel((i, j + (diff / 2))) sum.putpixel((i, j), pixel) #sum.save(_foto) # перезаписываем первоначальный файл else: # height < width: #sum = Image.new( 'RGB', (height, height) , (255,255,255)) # create a new white image print u"width is higher" diff = width - height for i in xrange(height): for j in xrange(height): pixel = image.getpixel((i + (diff / 2), j)) sum.putpixel((i, j), pixel) sum.filter(ImageFilter.MedianFilter(size=3)) sum.save(_foto)
def FinalProcess(img): img = ImageEnhance.Contrast(img).enhance(3.0) img = img.convert('L') binarizing(img) remove_hot_point(img) img = img.filter(ImageFilter.MedianFilter(size = 5)) return img