def draw_maze(self): wall_block_image = Image.open("../IMAGE/wall_block.png") wall_block_image = wall_block_image.resize((size_of_block, size_of_block), Image.ANTIALIAS) wall_block_image = ImageTk.PhotoImage(wall_block_image) food_image = Image.open("../IMAGE/food.png") food_image = food_image.resize((size_of_block, size_of_block), Image.ANTIALIAS) food_image = ImageTk.PhotoImage(food_image) ghost_image = Image.open("../IMAGE/ghost.png") ghost_image = ghost_image.resize((size_of_block, size_of_block), Image.ANTIALIAS) ghost_image = ImageTk.PhotoImage(ghost_image) score_image = Image.open("../IMAGE/score.png") score_image = score_image.resize((180, 130), Image.ANTIALIAS) score_image = ImageTk.PhotoImage(score_image) score_image_frame = self.maze_frame.create_image(self.size[1] * size_of_block + 20, (self.size[0] * size_of_block)//2 - 50, anchor='nw', image=score_image) for row in range(self.size[0]): for column in range(self.size[1]): if self.maze[row][column] == 1: self.maze_frame.create_image(column * size_of_block, row * size_of_block, anchor='nw', image=wall_block_image) if self.maze[row][column] == 2: self.food.append([self.maze_frame.create_image(column * size_of_block, row * size_of_block, anchor='nw', image=food_image), (row, column)]) for row in range(self.size[0]): for column in range(self.size[1]): if self.maze[row][column] == 3: self.monster.append([self.maze_frame.create_image(column * size_of_block, row * size_of_block, anchor='nw', image=ghost_image), (row, column)]) self.maze_frame.image = [wall_block_image, food_image, ghost_image, score_image]
def input(): print("enter the file name") from inputproject import wq qw = wq print(qw) im = Image.open(qw, "r") im.show()
def draw_pacman(self): pacman_image = Image.open("../IMAGE/pacman.png") pacman_image = pacman_image.resize((size_of_block, size_of_block), Image.ANTIALIAS) pacman_image = ImageTk.PhotoImage(pacman_image) self.pacman = self.maze_frame.create_image(self.pacman_pos[1] * size_of_block, self.pacman_pos[0] * size_of_block, anchor='nw', image=pacman_image) self.maze_frame.image.append(pacman_image)
def edit(): # function to edit images count = 0 for item in dirs: # traversing through all directories percentage = (count / len(dirs)) * 100 # just to show percentage of conversion print("Editing images ==> " + str(percentage) + '%') count += 1 if os.path.isfile( source + item ): # check if directories are files or not, ignore if not files img = Image.open(source + item) # loading images file_path, splited_text = os.path.splitext( source + item ) # file_path,splited_text are used to store splitted text, file_path contains full path of images while splited_text will be empty file_name = file_path.split('/')[ -1] # spliting the name of file from full path img_resize = img.resize( (128, 128), Image.ANTIALIAS) # resizing images from 192x192 to 128x128 img_rotate = img_resize.rotate( 90) # rotating images to 90 degree clock_wise if img_rotate.mode != 'RGB': # converting images to RGB's to save without error img_rotate = img_rotate.convert('RGB') img_rotate.save( destination + file_name + '.jpeg', 'JPEG', quality=90) # saving edited images to destination path
def getImagesAndLabels(path): #method to get the images and label data imagePaths = [os.path.join(path, f) for f in os.listdir(path)] # Get all file path faceSamples = [] # create empty face sample list ids = [] # create empty id list for imagePath in imagePaths: # Loop for all the file path PIL_img = Image.open(imagePath).convert( 'L') # Get the image and convert it to grayscale img_numpy = np.array(PIL_img, 'uint8') # PIL image to numpy array id = int( os.path.split(imagePath)[-1].split(".")[1]) # Get the image id faces = detector.detectMultiScale( img_numpy) # Get the face from the training images for (x, y, w, h) in faces: # Loop for each face, append to their respective ID faceSamples.append(img_numpy[y:y + h, x:x + w]) # Add the image to face samples ids.append(id) # Add the ID to IDs return faceSamples, ids
def get_image(path): img = np.asarray(Image.open(path)) if len(img.shape) == 3: if img.shape[2] > 3: return np.array([[pixel[:3] for pixel in row] for row in img]) elif len(img.shape) == 2: return np.array([[[pixel] * 3 for pixel in row] for row in img]) return img
def abhi(): import tkinter as qw root = qw.Tk() im = Image.open('F:\wallpaper\Wallpaperz\arizona_waterfalls-wide.jpg') # im = im.resize((950, 1050)) # img = ImageTk.PhotoImage(Image.open('F:\wallpaper\Wallpaperz\arizona_waterfalls-wide.jpg')) panel = Label(root, image=im) panel.pack(side="bottom", fill="both", expand="yes") root.mainloop()
def GOBoardElement(self, window): imgWidth = "500" imgHeight = "500" img = Image.open("GO9by9.png") img = img.resize(imgWidth + "x" + imgHeight) img = ImageTk.PhotoImage(img) panel = window.Label(window, image=img) panel.image = img panel.place(relheight=.095, relwidth=0.25, relx=0.7, rely=0.03)
def colorfilter(): global panel x = openfn() u = color123() color_image = Image.open(x) bw = color_image.convert('L') img = bw.resize((1250, 900), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img panel.pack(side="top", fill="both", expand="NO")
def resize(imgFile): im1 = Image.open(imgFile) width, height = im1.size ratio = (width*100/height) #if ratio < 60 or ratio > 150: # print imgFile #im1.close() print imgFile, "(original):", width, "x", height im2 = im1.resize((150,150), Image.ANTIALIAS) im2.save(imgFile) width, height = im2.size print imgFile, "(new):", width, "x", height
def open_img(): global panel x = openfn() img = Image.open(x) img = img.resize((1250, 900), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img #panel.image = NONE panel.pack(side="top", fill="both", expand="NO")
def decrypt(): # load the image and convert it into a numpy array and display on the GUI. load = Image.open("Syngenta.jpg") load.thumbnail(image_display_size, Image.ANTIALIAS) load = np.asarray(load) load = Image.fromarray(np.uint8(load)) render = ImageTk.PhotoImage(load) img = Label(app, image=render) img.image = render img.place(x=100, y=50) # Algorithm to decrypt the data from the image img = cv2.imread("Syngenta.jpg") data = [] stop = False for index_i, i in enumerate(img): i.tolist() for index_j, j in enumerate(i): if ((index_j) % 3 == 2): # first pixel data.append(bin(j[0])[-1]) # second pixel data.append(bin(j[1])[-1]) # third pixel if (bin(j[2])[-1] == '1'): stop = True break else: # first pixel data.append(bin(j[0])[-1]) # second pixel data.append(bin(j[1])[-1]) # third pixel data.append(bin(j[2])[-1]) if (stop): break message = [] # join all the bits to form letters (ASCII Representation) for i in range(int((len(data) + 1) / 8)): message.append(data[i * 8:(i * 8 + 8)]) # join all the letters to form the message. message = [chr(int(''.join(i), 2)) for i in message] message = ''.join(message) message_label = Label(app, text=message, bg='lavender', font=("Times New Roman", 10)) message_label.place(x=30, y=400)
def train_classifier(data_dir): path = [os.path.join(data_dir, f) for f in os.listdir(data_dir)] faces = [] ids = [] for imgs in path: img = Image.open(imgs).convert("L") imgeNp = np.array(img, 'uint8') id = int(os.path.split(imgs)[1].split(".")[1]) faces.append(imgeNp) ids.append(id) ids = np.array(ids) clf = cv2.face.LBPHFaceRecognizer_create() clf.train(faces, ids) clf.write("aomclassifier.xml")
def input_fn(request_body, content_type='application/json'): logger.info('Deserializing the input data.') if content_type == 'application/json': input_data = json.loads(request_body) url = input_data['url'] logger.info(f'Image url: {url}') image_data = Image.open(requests.get(url, stream=True).raw) image_transform = transforms.Compose([ transforms.Resize(size=256), transforms.CenterCrop(size=224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) return image_transform(image_data) raise Exception(f'Requested unsupported ContentType in content_type {content_type}')
def Recognize(self, fn): im = Image.open(fn) im = util.CenterExtend(im, radius=20) vec = np.asarray(im.convert('L')).copy() Y = [] for i in range(vec.shape[0]): for j in range(vec.shape[1]): if vec[i][j] <= 200: Y.append([i, j]) gmm = GaussianMixture(n_components=7, covariance_type='tied', reg_covar=1e2, tol=1e3, n_init=9) gmm.fit(Y) centers = gmm.means_ points = [] for i in range(7): scoring = 0.0 for w_i in range(3): for w_j in range(3): p_x = centers[i][0] - 1 + w_i p_y = centers[i][1] - 1 + w_j cr = util.crop(im, p_x, p_y, radius=20) cr = cr.resize((40, 40), Image.ANTIALIAS) X = np.asarray(cr.convert('L'), dtype='float') X = (X.astype("float") - 180) / 200 x0 = np.expand_dims(X, axis=0) x1 = np.expand_dims(x0, axis=3) global model if self.model.predict(x1)[0][0] < 0.5: scoring += 1 if scoring > 4: points.append((centers[i][0] - 20, centers[i][1] - 20)) return points
def main(file): #file = 'substinces.PNG' # opening an image from the source path img = Image.open(file) dot = file.index(".") name = file[0:dot] # path where the tesseract module is installed pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract.exe' # converts the image to result and saves it into result variable result = pytesseract.image_to_string(img) # write text in a text file and save it to source path with open(name + '.txt', mode='w') as file: file.write(result) with open(name + '.txt', mode='r') as file: orig = file.read()
def processPic(self): """ Looks for geodata in EXIF. Args: - Returns: None: when either the pic is not open or no geodata is found. data: data with geodata represented as {"latitude": value, "longitude": value, "datetime": value}. """ try: image = Image.open(self.lclPath) except: return None else: exif_data = exif.get_exif_data(image) data = exif.get_lat_lon_datatime(exif_data) if data and (data["latitude"] != "" and data["longitude"] != ""): return data
def UploadInfo(self): #ticket = str(self.ticket_number.get())+".PNG" #print(ticket) try: self.path = "D:/Apache Software Foundation/Tomcat 9.0/webapps/cenrayid/WEB-INF/classes/photo/" + self.ticket_number.get( ) + ".jpg" #path="/Photo/"+str(self.ticket_number.get())+".png" #self.img = ImageTk.PhotoImage(Image.open(path)) self.img = Image.open(self.path) self.img = self.img.resize((100, 100), Image.ANTIALIAS) self.img = ImageTk.PhotoImage(self.img) ''' self.img=PhotoImage(file="D:/TimeKeeper_stuffs/Photo/"+self.ticket_number.get()+".png") self.img=self.img.zoom(25,25) self.img=self.img.subsample(50) ''' Label(self.root, image=self.img).place(x=50, y=450) except Exception as e: #print(e) Label(self.root, text="unable to load image").place(x=50, y=450) self.dictionarydata = {} #*********************************************************************** # created object of class DataBase; # if we want to use different database then just change DataBase class self.db = DataBase() #************************************************************************ #print(self.ticket_number.get()) self.dictionarydata = self.db.load(self.ticket_number.get()) #print(self.dictionarydata) if (self.dictionarydata): self.fullname.set(self.dictionarydata['full_name']) self.department.set(self.dictionarydata['department']) #self.cardLost.set(self.dictionarydata['card_lost']) self.memo.set(self.dictionarydata['reason_for_memo']) self.remark.insert('end', self.dictionarydata['remark']) self.shift.set(self.dictionarydata['shift']) self.dept_code.set(self.dictionarydata['department_code']) return True else: return False
def mirror(): global panel x = openfn() image_obj = Image.open(x) rotated_image = image_obj.transpose(Image.FLIP_LEFT_RIGHT) img = rotated_image.resize((1250, 900), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img panel.pack(side="top", fill="both", expand="NO") def saveb(): MsgBox = tk.messagebox.askquestion('SAVE', 'do you want to save the img', icon='warning') if MsgBox == 'yes': rotated_image.save('mirror.JPEG') threading.Timer(5.0, saveb).start()
def cropy(): global panel x = openfn() image_obj = Image.open(x) cropped_image = image_obj.crop((161, 166, 706, 1050)) img = cropped_image.resize((400, 500), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img panel.pack(side="top", fill="both", expand="NO") def saveb(): MsgBox = tk.messagebox.askquestion('SAVE', 'do you want to save the img', icon='warning') if MsgBox == 'yes': cropped_image.save('crop.JPEG') threading.Timer(5.0, saveb).start()
def blackwhite(): global panel x = openfn() color_image = Image.open(x) bw = color_image.convert('L') img = bw.resize((1250, 900), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img panel.pack(side="top", fill="both", expand="NO") def saveb(): MsgBox = tk.messagebox.askquestion('SAVE', 'do you want to save the img', icon='warning') if MsgBox == 'yes': bw.save('blackandwhite.jpg') threading.Timer(5.0, saveb).start()
def rotate(): global panel x = openfn() image_obj = Image.open(x) rotated_image = image_obj.rotate(90) img = rotated_image.resize((1250, 900), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img panel.pack(side="top", fill="both", expand="NO") #rotated_image.save(saved_location) #rotated_image.show() def saveb(): MsgBox = tk.messagebox.askquestion('SAVE', 'do you want to save the img', icon='warning') if MsgBox == 'yes': rotated_image.save('rotate.JPEG') threading.Timer(5.0, saveb).start()
def sharpen(): global panel x = openfn() im = Image.open(x) im_sharp = im.filter(ImageFilter.SHARPEN) img = im_sharp.resize((1250, 900), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img panel.pack(side="top", fill="both", expand="NO") #im_sharp.save('image_sharpened.jpg', 'JPEG') #im_sharp.show() def saveb(): MsgBox = tk.messagebox.askquestion('SAVE', 'do you want to save the img', icon='warning') if MsgBox == 'yes': im_sharp.save('sharpen.jpg') threading.Timer(5.0, saveb).start()
def blur(): global panel x = openfn() img = Image.open(x) im = img.filter(ImageFilter.BLUR) img = im.resize((1250, 900), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img #panel.image = NONE panel.pack(side="top", fill="both", expand="NO") def saveb(): MsgBox = tk.messagebox.askquestion('SAVE', 'do you want to save the img', icon='warning') if MsgBox == 'yes': im.save('blur.jpg') threading.Timer(5.0, saveb).start()
def brightness(): global panel x = openfn() im = Image.open(x) im3 = ImageEnhance.Brightness(im) im2 = im3.enhance(2.0) img = im2.resize((1000, 800), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) panel = Label(root, image=img) panel.image = img panel.pack() #panel.image = NONE def saveb(): MsgBox = tk.messagebox.askquestion('SAVE', 'do you want to save the img', icon='warning') if MsgBox == 'yes': im3.enhance(2.0).save("brightness.jpg") threading.Timer(5.0, saveb).start()
def getImagesAndLabels(path): #get the path of all the files in the folder imagePaths = [os.path.join(path, f) for f in os.listdir(path)] #create empth face list faceSamples = [] #create empty ID list Ids = [] #now looping through all the image paths and loading the Ids and the images for imagePath in imagePaths: #loading the image and converting it to gray scale pilImage = Image.open(imagePath).convert('L') #Now we are converting the PIL image into numpy array imageNp = np.array(pilImage, 'uint8') #getting the Id from the image Id = int(os.path.split(imagePath)[-1].split(".")[1]) # extract the face from the training image sample faces = detector.detectMultiScale(imageNp) #If a face is there then append that in the list as well as Id of it for (x, y, w, h) in faces: faceSamples.append(imageNp[y:y + h, x:x + w]) Ids.append(Id) return faceSamples, Ids
def getImagesWithID(path): imagePaths = [os.path.join(path, f) for f in os.listdir(path)] #print (imagePaths) #getImagesWithID(path) faces = [] IDs = [] for imagePath in imagePaths: faceImg = Image.open(imagePath).convert('L') # this is pil image so we need to convert it into numpy array so that open cv can work with it faceNp = np.array(faceImg, 'uint8') #now we want face ids ID = int(os.path.split(imagePath)[-1].split('.')[1]) #now we have id and images now we can directly store it to into the faces and ids. faces.append(faceNp) IDs.append(ID) cv2.imshow("training", faceNp) #to show which images that are captured. cv2.waitKey(70) return np.array(IDs), faces
def __getitem__(self, item): image = Image.open(self.image_paths[item]) image = image.convert('RGB') targets = self.targets[item] if self.resize is not None: image = image.resize((self.resize[1], self.resize[0]), resample=Image.BILINEAR) image = np.array(image) if self.augmentations is not None: augmented = self.augmentations(image=image) image = augmented['image'] image = np.transpose(image, (2, 0, 1)).astype(np.float64) return { 'image': torch.tensor(image, dtype=torch.float), 'targets': torch.tensor(targets, dtype=torch.long) }
def getIImage(b64_img): valid_types = ('png','jpg','jpeg') imgBytes = base64.decodebytes(b64_img.encode('UTF-8')) imgType = imghdr.what(None, imgBytes) if imgType not in valid_types: # we should convert it! imgSI = BytesIO(imgBytes) imgSO = BytesIO() img = Image.open(imgSI) if img.format.lower() in valid_types: imgB64 = b64_img imgType = img.format else: img.save(imgSO, 'png') imgSO.seek(0) imgB64 = base64.encodebytes(imgSO.read()).decode('UTF-8').replace("\n", "") imgType = 'png' img.close() imgSO.close() imgSI.close() else: imgB64 = b64_img return imgB64, imgType
def load_images(file): """ Function loads black and white image and convert it to CIE LAB Parameters ---------- file : str Path to image file Returns ------- ndarray 3D array with image """ try: img = Image.open(file) except (OSError, ValueError, IOError, ZeroDivisionError) as e: print("Can not open file", file, "Error: ", e) return None img = img.convert(mode="RGB") # ensure that image rgb rgb = np.array(img) return color.rgb2lab(rgb)
# -*- coding: utf-8 -*- """ Created on Fri Aug 14 16:07:12 2020 @author: Sourav """#Import required Image library import cv2 import os from pil import Image,ImageFilter im = Image.open('G:\chk2.jpg') #im.show() cropped = im.crop((472,147,703,241)) gaussImage = cropped.filter(ImageFilter.GaussianBlur(20)) im.paste(gaussImage,(472,147,703,241)) im.save('G:\masked1.jpg') # Reading an image in default mode image = cv2.imread('G:\masked1.jpg') directory = r'G:\Chequemasking' os.chdir(directory) print(os.listdir(directory)) # font font = cv2.FONT_HERSHEY_PLAIN org = (472,220) # fontScale
def open_file_as_pil_image(source_file): """Return a new Image object from source file.""" return Image.open(source_file)
from pil import Image, ImageDraw, ImageFont def add_num(img): draw = ImageDraw.Draw(img) myfont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', size=40) fillcolor = "#ff0000" width, height = img.size draw.text((width-40, 0), '99', font=myfont, fill=fillcolor) img.save('result.jpg', 'jpeg') return 0 if __name__ == '__main__': image = Image.open('C:\Users\Administrator\Pictures\zoro.jpg') add_num(image)