def generate_picture_from_user_info(username, statistics, achievements): image = Image.new("RGB", (450, 470), color=(180, 180, 180)) draw = ImageDraw.Draw(image) color = (94, 73, 15) username = ((10, 10), username) stats = [ ((10, 50), "Создано задач: {}".format(statistics['task_count'])), ((10, 70), "Решено задач: {}".format(statistics['solved_task_count'])), ((10, 90), "Процент правильных ответов: {}%".format(statistics['percentage'])), ((10, 110), "Рейтинг: {}".format(statistics['rating'])), ] header_font_size, statistic_font_size = 30, 15 header_font = ImageFont.truetype("static/arial.ttf", header_font_size) statistic_font = ImageFont.truetype("static/arial.ttf", statistic_font_size) draw.text(username[0], username[1], fill=color, font=header_font) for stat in stats: draw.text(stat[0], stat[1], fill=color, font=statistic_font) ach_first = Achievement.objects.get(name='First') first = Image.open(ach_first.imageUrl) first.thumbnail((100, 100)) if achievements.filter(achievement=ach_first).exists(): image.paste(first, (340, 30)) draw.text((430, 110), str(achievements.get(achievement=ach_first).count), fill=(255, 0, 0), font=statistic_font) else: com = ImageOps.colorize(ImageOps.grayscale(first), (0, 0, 0), (50, 50, 50)) image.paste(com, (340, 30)) pictures_and_positions = (('Creator1', (10, 140)), ('Creator2', (120, 140)), ('Creator3', (230, 140)), ('Creator4', (340, 140)), ('Solver1', (10, 250)), ('Solver2', (120, 250)), ('Solver3', (230, 250)), ('Solver4', (340, 250)), ('Commentator1', (10, 360)), ('Commentator2', (120, 360)), ('Commentator3', (230, 360)), ('Commentator4', (340, 360)),) for pp in pictures_and_positions: generate_achieve_on_image(image, achievements, pp[0], pp[1]) return image
def transform(cls, key_type, source): """ return a BytesIO object with the transformed image """ file = open(source, 'rb') im = Image.open(file) ImageFile.MAXBLOCK = im.size[0] * im.size[1] if im.mode != 'RGB': im = im.convert('RGB') spec = specs[key_type] if spec.get('crop'): w, h = im.size[0], im.size[1] if w <= spec['width'] or h <= spec['height']: target_ratio = spec['width'] / spec['height'] source_ratio = w / h if source_ratio >= target_ratio: w = h * target_ratio else: h = w / target_ratio w, h = int(w), int(h) im = ImageOps.fit(im, (w, h), Image.ANTIALIAS) else: im = ImageOps.fit(im, (spec['width'], spec['height']), Image.ANTIALIAS) else: im.thumbnail((spec['width'], spec['height']), Image.ANTIALIAS) output = BytesIO() im.save(output, format='JPEG', quality=spec.get('quality', 75), optimize=True, progressive=False) file.close() return output
def put(self, file_obj, **kwargs): """ Insert a image in database applying field properties (size, thumbnail_size) """ field = self.instance._fields[self.key] try: img = Image.open(file_obj) img_format = img.format except: raise ValidationError('Invalid image') if (field.size and (img.size[0] > field.size['width'] or img.size[1] > field.size['height'])): size = field.size if size['force']: img = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS) else: img.thumbnail((size['width'], size['height']), Image.ANTIALIAS) thumbnail = None if field.thumbnail_size: size = field.thumbnail_size if size['force']: thumbnail = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS) else: thumbnail = img.copy() thumbnail.thumbnail((size['width'], size['height']), Image.ANTIALIAS) if thumbnail: thumb_id = self._put_thumbnail(thumbnail, img_format) else: thumb_id = None w, h = img.size io = StringIO() img.save(io, img_format) io.seek(0) return super(ImageGridFsProxy, self).put(io, width=w, height=h, format=img_format, thumbnail_id=thumb_id, **kwargs)
def generate_achieve_on_image(image, achievements, name, pos): ach = Achievement.objects.get(name=name) picture = Image.open(ach.imageUrl) picture.thumbnail((100, 100)) if not achievements.filter(achievement=ach).exists(): picture = ImageOps.colorize(ImageOps.grayscale(picture), (0, 0, 0), (50, 50, 50)) image.paste(picture, pos)
def generate_tile_image(img, tile): # tile the image horizontally (x2 is enough), # some cards need to wrap around to create a bar (e.g. Muster for Battle), # also discard alpha channel (e.g. Soulfire, Mortal Coil) tiled = Image.new("RGB", (img.width * 2, img.height)) tiled.paste(img, (0, 0)) tiled.paste(img, (img.width, 0)) x, y, width, height = get_rect( tile["m_TexEnvs"]["_MainTex"]["m_Offset"]["x"], tile["m_TexEnvs"]["_MainTex"]["m_Offset"]["y"], tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["x"], tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["y"], tile["m_Floats"].get("_OffsetX", 0.0), tile["m_Floats"].get("_OffsetY", 0.0), tile["m_Floats"].get("_Scale", 1.0), img.width ) bar = tiled.crop((x, y, x + width, y + height)) bar = ImageOps.flip(bar) # negative x scale means horizontal flip if tile["m_TexEnvs"]["_MainTex"]["m_Scale"]["x"] < 0: bar = ImageOps.mirror(bar) return bar.resize((OUT_WIDTH, OUT_HEIGHT), Image.LANCZOS)
def cheese(z): i = 0 while (i < (RESW*RESH*65/100) or i > (RESW*RESH*95/100) ): im1 = cam.get_image() time.sleep(0.055) p.ChangeDutyCycle(12) time.sleep(0.055) im2 = cam.get_image() time.sleep(0.055) p.ChangeDutyCycle(0) time.sleep(0.055) pygame.image.save(im1, "b%08d.jpg" % z) pygame.image.save(im2, "a%08d.jpg" % z) im2 = Image.open("b%08d.jpg" % z).rotate(ROT) im1 = Image.open("a%08d.jpg" % z).rotate(ROT) draw = ImageDraw.Draw(im2) draw.rectangle([0,0,RESW,CROPH], fill=0) draw = ImageDraw.Draw(im1) draw.rectangle([0,0,RESW,CROPH], fill=0) draw.line((int(RESW/2), 0,int(RESW/2),CROPH),fill=255) diff = ImageChops.difference(im2, im1) diff = ImageOps.grayscale(diff) diff = ImageOps.posterize(diff, 6) v = diff.getcolors() i= v[0][0] print i im1.save("b%08d.jpg" % z, quality= 90) im1 = Image.new("RGB", (RESW,RESH)) im1.paste(diff) im1.save("%08d.jpg" % z, quality= 90) im2.save("a%08d.jpg" % z, quality= 90)
def optimizeImage(self, gamma): if gamma < 0.1: gamma = self.gamma if gamma == 1.0: self.image = ImageOps.autocontrast(self.image) else: self.image = ImageOps.autocontrast(Image.eval(self.image, lambda a: 255 * (a / 255.) ** gamma))
def apply_polaroid(pixbuf,imageText): width,height = pixbuf.get_width(),pixbuf.get_height() frameSize = (300,320) imageOutputSize = (270,245) imgModified = Image.open('images/frame.jpg') #cropped image to the requested framesize imgModified = ImageOps.fit(imgModified, frameSize, Image.ANTIALIAS, 0, (0.5,0.5)) y = Image.frombytes(K.ImageConstants.RGB_SHORT_NAME,(width,height),pixbuf.get_pixels()) #cropped image to the requested size y = ImageOps.fit(y, imageOutputSize, Image.ANTIALIAS, 0, (0.5,0.5)) y = ImageOps.autocontrast(y, cutoff=2) y = ImageEnhance.Sharpness(y).enhance(2.0) boxOnImage = (12,18) imgModified.paste(y, boxOnImage) #text on image textWidget = ImageDraw.Draw(imgModified).textsize(imageText) fontxy = (frameSize[0]/2 - textWidget[0]/2, 278) ImageDraw.Draw(imgModified).text(fontxy, imageText,fill=(40,40,40)) imgOutput = Image.new(imgModified.mode, (300,320)) imgOutput.paste(imgModified, (imgOutput.size[0]/2-imgModified.size[0]/2, imgOutput.size[1]/2-imgModified.size[1]/2)) return I.fromImageToPixbuf(imgOutput)
def resizeImage(self): if self.image.size[0] <= self.size[0] and self.image.size[1] <= self.size[1]: method = Image.BICUBIC else: method = Image.LANCZOS if self.opt.stretch: self.image = self.image.resize(self.size, method) elif self.image.size[0] <= self.size[0] and self.image.size[1] <= self.size[1] and not self.opt.upscale: if self.opt.format == 'CBZ': borderw = int((self.size[0] - self.image.size[0]) / 2) borderh = int((self.size[1] - self.image.size[1]) / 2) self.image = ImageOps.expand(self.image, border=(borderw, borderh), fill=self.fill) if self.image.size[0] != self.size[0] or self.image.size[1] != self.size[1]: self.image = ImageOps.fit(self.image, self.size, method=Image.BICUBIC, centering=(0.5, 0.5)) else: if self.opt.format == 'CBZ': ratioDev = float(self.size[0]) / float(self.size[1]) if (float(self.image.size[0]) / float(self.image.size[1])) < ratioDev: diff = int(self.image.size[1] * ratioDev) - self.image.size[0] self.image = ImageOps.expand(self.image, border=(int(diff / 2), 0), fill=self.fill) elif (float(self.image.size[0]) / float(self.image.size[1])) > ratioDev: diff = int(self.image.size[0] / ratioDev) - self.image.size[1] self.image = ImageOps.expand(self.image, border=(0, int(diff / 2)), fill=self.fill) self.image = ImageOps.fit(self.image, self.size, method=method, centering=(0.5, 0.5)) else: hpercent = self.size[1] / float(self.image.size[1]) wsize = int((float(self.image.size[0]) * float(hpercent))) self.image = self.image.resize((wsize, self.size[1]), method) if self.image.size[0] > self.size[0] or self.image.size[1] > self.size[1]: self.image.thumbnail(self.size, Image.LANCZOS)
def display_start_frame(self): frame = self.starting_frame.get() l, r = self.get_frame_location(self.left_cam) self.left_cam = self.left_cam[:l] + str(frame) + self.left_cam[r:] self.right_cam = self.right_cam[:l] + str(frame) + self.right_cam[r:] # load image try: self.left_frame = Image.open(self.left_cam) self.right_frame = Image.open(self.right_cam) self.left_frame = ImageOps.autocontrast(self.left_frame) self.right_frame = ImageOps.autocontrast(self.right_frame) left_frame_gif = ImageTk.PhotoImage(self.left_frame) right_frame_gif = ImageTk.PhotoImage(self.right_frame) # update image on gui self.left_image.configure(image = left_frame_gif) self.left_image.image = left_frame_gif self.right_image.configure(image = right_frame_gif) self.right_image.image = right_frame_gif self.frame_number.configure(text = "Current frame = " + self.left_cam[l:r]) self.frame_number.text = "Current frame = " + self.left_cam[l:r] self.master.update() except: showerror("Invalid start frame", "Please pick a valid start frame to display")
def changeImage(slice_num): global PreViewImage, PreviewName, stlfilename global image_tk PreviewName.set("Preview Images - "+stlfilename[:-4]+str(slice_num)+".png") OperationValue = OperationVar.get() imageBlank = Image.new("RGB", (768,480),0) image_im_m1 = imageBlank if (OperationValue == 1): imageFile = FileputPath+stlfilename[:-4]+str(int(slice_num)) +".png" try: image_im = Image.open(imageFile) except: print imageFile+" error" showinfo("Error:", imageFile+" Open Error!") #checkslice_ui.destroy() return if (OperationValue == 2): imageFile = FileputPath+stlfilename[:-4]+str(int(slice_num)) +".png" try: image_im = Image.open(imageFile) except: print imageFile+" error" showinfo("Error:", imageFile+" Open Error!") #checkslice_ui.destroy() return imageFilem1 = FileputPath+stlfilename[:-4]+str(int(slice_num)-1)+".png" try: image_im_m1 = Image.open(imageFilem1) except: image_im_m1 = imageBlank image_im = image_im.convert("L") image_im = ImageOps.colorize(image_im, (0,0,0), (255,0,0)) image_im = image_im.convert("RGB") image_im_m1 = image_im_m1.convert("L") image_im_m1 = ImageOps.colorize(image_im_m1, (0,0,0), (255,255,255)) image_im_m1 = image_im_m1.convert("RGB") try: image_im = Image.blend(image_im, image_im_m1, 0.3) except: null() image_im_enhance = ImageEnhance.Brightness(image_im) image_im = image_im_enhance.enhance(2.0) image_tk = ImageTk.PhotoImage(image_im) PreViewImage.configure(image = image_tk) return
def get_frame_number(self, working_frame, testing_frame): ''' Determines if a user entered frame value is valid or if we should revert to a previously working frame. ''' try: # try new starting frame that user chose l, r = self.get_frame_location(self.left_cam) left_cam = self.left_cam[:l] + str(testing_frame) + self.left_cam[r:] right_cam = self.right_cam[:l] + str(testing_frame) + self.right_cam[r:] # try to open image self.left_frame = Image.open(left_cam) self.right_frame = Image.open(right_cam) left_frame = ImageOps.autocontrast(self.left_frame) right_frame = ImageOps.autocontrast(self.right_frame) left_frame_gif = ImageTk.PhotoImage(self.left_frame) right_frame_gif = ImageTk.PhotoImage(self.right_frame) return testing_frame except: # picture doesn't exist, start at previously working start frame self.start_frame = working_frame # get frame and filenames l, r = self.get_frame_location(self.left_cam) frame = self.start_frame self.left_cam = self.left_cam[:l] + str(working_frame) + self.left_cam[r:] self.right_cam = self.right_cam[:l] + str(working_frame) + self.right_cam[r:] return working_frame
def display_frame(self): ''' Displays the frame that the user selected via the slider ''' frame = self.slider.get() l, r = self.get_frame_location(self.left_cam) self.left_cam = self.left_cam[:l] + str(frame) + self.left_cam[r:] self.right_cam = self.right_cam[:l] + str(frame) + self.right_cam[r:] # load image self.left_frame = Image.open(self.left_cam) self.right_frame = Image.open(self.right_cam) self.left_frame = ImageOps.autocontrast(self.left_frame) self.right_frame = ImageOps.autocontrast(self.right_frame) left_frame_gif = ImageTk.PhotoImage(self.left_frame) right_frame_gif = ImageTk.PhotoImage(self.right_frame) # update image on gui self.left_image.configure(image = left_frame_gif) self.left_image.image = left_frame_gif self.right_image.configure(image = right_frame_gif) self.right_image.image = right_frame_gif self.frame_number.configure(text = "Current frame = " + self.left_cam[l:r]) self.frame_number.text = "Current frame = " + self.left_cam[l:r] self.master.update()
def create_test(imgage_dir, img, imgs, resize_image): global img_size global puzzle_size another_image = '' while another_image == '' or another_image == img: another_image = imgs[random.randint(0, len(imgs) - 1)] subimage_position = (random.randint(0, img_size[0] - puzzle_size[0]), random.randint(0, img_size[1] - puzzle_size[1])) subimage = Image.open(os.path.join(imgage_dir, another_image)) if resize_image: subimage = ImageOps.fit(subimage, (img_size[0], img_size[1]), method = Image.ANTIALIAS, centering = (0.5,0.5)) subimage_puzzle_piece_filling = subimage.crop((subimage_position[0], subimage_position[1], subimage_position[0] + puzzle_size[0], subimage_position[1] + puzzle_size[1])) challenge_background = Image.open(os.path.join(imgage_dir, img)) # crop to img_size centered (width, height) = challenge_background.size x_start, y_start = ((width - img_size[0])/2, (height - img_size[1])/2) if resize_image: # resize full image to size, keeping aspect ratio centered_challenge_background = ImageOps.fit(challenge_background, (img_size[0], img_size[1]), method = Image.ANTIALIAS, centering = (0.5,0.5)) else: # or just crop a portion from the center centered_challenge_background = challenge_background.crop((x_start, y_start, x_start + img_size[0], y_start + img_size[1])) puzzle_piece_position = (random.randint(0, img_size[0] - puzzle_size[0]) / 10, random.randint(0, img_size[1] - puzzle_size[1]) / 10) puzzle_piece_position = (puzzle_piece_position[0] * 10, puzzle_piece_position[1] * 10) puzzle_piece = centered_challenge_background.crop((puzzle_piece_position[0], puzzle_piece_position[1], puzzle_piece_position[0] + puzzle_size[0], puzzle_piece_position[1] + puzzle_size[1])) centered_challenge_background = mergePNG(centered_challenge_background, subimage_puzzle_piece_filling, puzzle_piece_position) return centered_challenge_background, puzzle_piece, puzzle_piece_position
def save(self): sizes = {'thumbnail': {'height': 340, 'width': 300}, 'medium': {'height': 370, 'width': 635}} super(Post_related_images, self).save() photopath = str(self.image.path) # this returns the full system path # to the original file im = Image.open(photopath) # open the image using PIL # ins=ImageOps() # pull a few variables out of that full path extension = photopath.rsplit('.', 1)[1] # the file extension filename = photopath.rsplit('/', 1)[-1].rsplit('.', 1)[:-1][0] # the # file name only (minus path or extension) fullpath = photopath.rsplit('/', 1)[:-1][0] # the path only (minus # the filename.extension) # use the file extension to determine if the image is valid # before proceeding if extension not in ['jpg', 'jpeg', 'gif', 'png']: sys.exit() # create medium image ins = ImageOps.fit(im, (sizes['medium']['width'], sizes['medium']['height']), Image.ANTIALIAS) medname = str(filename) + "_" + str(sizes['medium']['width']) + "x" + str(sizes['medium']['height']) + ".jpg" ins.save(str(fullpath) + '/' + medname) self.largeimage = self.image.url.rsplit('/', 1)[:-1][0] + '/' + medname # create thumbnail ins = ImageOps.fit(im, (sizes['thumbnail']['width'], sizes['thumbnail']['height']), Image.ANTIALIAS) thumbname = filename + "_" + str(sizes['thumbnail']['width']) + "x" + str(sizes['thumbnail']['height']) + ".jpg" ins.save(fullpath + '/' + thumbname) self.smallimage = self.image.url.rsplit('/', 1)[:-1][0] + '/' + thumbname super(Post_related_images, self).save()
def draw_illustration(card, illustration_name, y, vertical_space, width_minus_border, outline_width, outline_color): image_spacing = 20 image_spacing_x2 = image_spacing * 2 illustration = Image.open("illustrations/" + illustration_name + ".png") cropped = ImageOps.fit(illustration, (width_minus_border - (outline_width * 2) - image_spacing_x2, vertical_space - (outline_width * 2)), Image.ANTIALIAS, 0.01, (0.5, 0.5)) cropped_with_border = ImageOps.expand(cropped, border=outline_width, fill=outline_color) card.paste(cropped_with_border, (0 + image_spacing, y))
def put_contour(image, size=1, offset=0, contour_color=0, fill_color=0, opacity=100, include_image=True): if not has_transparency(image): return put_border( image, size, offset, contour_color, fill_color, opacity, include_image) image = image.convert('RGBA') mask = imtools.get_alpha(image) w, h = image.size outer_mask = mask.resize( (w + 2 * (size + offset), h + 2 * (size + offset)), Image.ANTIALIAS) inner_mask = mask.resize( (w + 2 * offset, h + 2 * offset), Image.ANTIALIAS) inner_mask = ImageOps.expand(inner_mask, border=size, fill=0) paste(outer_mask, (255 * opacity) / 100, mask=inner_mask) if include_image: image = ImageOps.expand(image, border=size + offset, fill=(0, 0, 0, 0)) mask = ImageOps.expand(mask, border=size + offset, fill=0) paste(outer_mask, 255, mask=mask) contour = ImageOps.colorize(outer_mask, (255, 255, 255), contour_color) paste(contour, fill_color, mask=inner_mask) if include_image: paste(contour, image, mask=image) contour.putalpha(outer_mask) return contour
def classify_DCT(image1,image2,size=(32,32),part_size=(8,8)): """ 'image1' and 'image2' is a Image Object. You can build it by 'Image.open(path)'. 'Size' is parameter what the image will resize to it and then image will be compared by the pHash. It's 32 * 32 when it default. 'part_size' is a size of a part of the matrix after Discrete Cosine Transform,which need to next steps. It's 8 * 8 when it default. The function will return the hamming code,less is correct. """ assert size[0]==size[1],"size error" assert part_size[0]==part_size[1],"part_size error" image1 = image1.resize(size).convert('L').filter(ImageFilter.BLUR) image1 = ImageOps.equalize(image1) matrix = get_matrix(image1) DCT_matrix = DCT(matrix) List = sub_matrix_to_list(DCT_matrix, part_size) middle = get_middle(List) code1 = get_code(List, middle) image2 = image2.resize(size).convert('L').filter(ImageFilter.BLUR) image2 = ImageOps.equalize(image2) matrix = get_matrix(image2) DCT_matrix = DCT(matrix) List = sub_matrix_to_list(DCT_matrix, part_size) middle = get_middle(List) code2 = get_code(List, middle) return comp_code(code1, code2)
def checkImage( self, expected, actual, tol, msg ): '''Compare two image files. = INPUT VARIABLES - expected The filename of the expected image. - actual The filename of the actual image. - tol The tolerance (a unitless float). This is used to determinte the 'fuzziness' to use when comparing images. ''' from PIL import Image, ImageOps, ImageFilter # open the image files and remove the alpha channel (if it exists) expectedImage = Image.open( expected ).convert("RGB") actualImage = Image.open( actual ).convert("RGB") # normalize the images expectedImage = ImageOps.autocontrast( expectedImage, 2 ) actualImage = ImageOps.autocontrast( actualImage, 2 ) # compare the resulting image histogram functions h1 = expectedImage.histogram() h2 = actualImage.histogram() rms = math.sqrt( reduce( operator.add, map( lambda a,b: ( a - b )**2, h1, h2) ) / len( h1 ) ) diff = rms / 10000.0 msg += "\nError: Image files did not match.\n" \ " RMS Value: %22.15e\n" \ " Expected: %s\n" \ " Actual : %s\n" \ " Tolerance: %22.15e\n" % ( diff, expected, actual, tol ) self.assertLessEqual( diff, tol, msg )
def cheese(z): i = 0 while (i < (RESW*RESH*65/100) or i > (RESW*RESH*95/100) ): urllib.urlretrieve("http://127.0.0.1:8081/?action=snapshot", "b%08d.jpg" % z) time.sleep(0.055) p.ChangeDutyCycle(12) time.sleep(0.055) urllib.urlretrieve("http://127.0.0.1:8081/?action=snapshot", "a%08d.jpg" % z) time.sleep(0.055) p.ChangeDutyCycle(0) time.sleep(0.055) im2 = Image.open("b%08d.jpg" % z).rotate(ROT) im1 = Image.open("a%08d.jpg" % z).rotate(ROT) draw = ImageDraw.Draw(im2) draw.rectangle([0,0,RESW,CROPH], fill=0) draw = ImageDraw.Draw(im1) draw.rectangle([0,0,RESW,CROPH], fill=0) draw.line((int(RESW/2), 0,int(RESW/2),CROPH),fill=128) diff = ImageChops.difference(im2, im1) diff = ImageOps.grayscale(diff) diff = ImageOps.posterize(diff, 6) v = diff.getcolors() i= v[0][0] #print i im1.save("b%08d.jpg" % z, quality= 90) im1 = Image.new("RGB", (RESW,RESH)) im1.paste(diff) im1.save("%08d.jpg" % z, quality= 90) im2.save("a%08d.jpg" % z, quality= 90)
def compare(ab, ac, image_b, image_c, problem, options): if max(ac, ab) < .5: # no answer return max(ac, ab), (max(ac, ab), -1) if ab >= ac: return max(ac, ab), (searchForSolution(problem, ImageOps.mirror(image_c), options)) return max(ac, ab), (searchForSolution(problem, ImageOps.mirror(image_b), options))
def tats(image): image = image.convert('RGB') colours = util.get_dominant_colours(image, 9) colours = util.order_colours_by_brightness(colours) bg = random.choice(colours[:3]) light = random.choice(colours[3:6]) dark = random.choice(colours[6:]) dist = math.sqrt(sum(map(lambda (a, b): math.pow(a - b, 2), zip(light, dark)))) if dist < 100: light = util.modify_hls(light, l=lambda l: l + 100) light = util.modify_hls(light, s=lambda s: s + 100) dark = util.modify_hls(dark, s=lambda s: s + 100) layer = Image.open(os.path.dirname(os.path.abspath(__file__)) + '/' + 'assets/tats.png') layer.load() r, g, b, a = layer.split() layer = layer.convert('RGB') layer = ImageOps.grayscale(layer) layer = ImageOps.colorize(layer, tuple(dark), tuple(light)) layer.putalpha(a) im = Image.new('RGB', layer.size, tuple(bg)) im.paste(layer, mask=layer) return im
def convertImage(self, inputPath, outputPath): r = inputPath path = outputPath for root, dirs, files in os.walk(r): for f in files: if f.endswith('.gif'): if not os.path.exists(path): #Tworzony jest folder wyjsciowy os.makedirs(path) newroot = root.split(r)[1] #Te dwie linijki moga powodowac problemy, dlatego wazne jest, by obrazy do konwersji znajdowaly sie w podfolderach newroot = newroot.split('/')[1] #Podfolder f2 = f.split('.')[0] orgImg = Image.open(root + "/" + f) orgImg = orgImg.convert('L') orgSize = orgImg.size # niech beda tylko kwadratowe obrazy assert orgSize[0] == orgSize[1] assert self.newSize[0] == self.newSize[1] borderSize = int((self.newSize[0] - orgSize[0]) / 2) # dodanie bialej ramki newImg = ImageOps.expand(orgImg, borderSize, 0) # zapisanie wersji bez inwersji newImg.save(path + newroot + f2 + ".pgm") # dodanie wersji z inwersja newImg = ImageOps.invert(newImg) newImg.save(path + newroot + "n" + f2 + ".pgm")
def convert(action,image_name): # # actions = Actions() # image_path = gray(image_name) # # return image_path # return (render_template('core/convert.html', path=image_path, name=image_name)) # return action if not image_name: return (redirect('/')) else: if action == "gray": img = Image.open(UPLOAD_FOLDER + '/' + image_name).convert('L') elif action == "invert": img = Image.open(UPLOAD_FOLDER + '/' + image_name) img = ImageChops.invert(img) elif action == "sharpen": img = Image.open(UPLOAD_FOLDER + '/' + image_name).filter(ImageFilter.UnsharpMask(radius=2, percent=150, threshold=3)) elif action == "contrast": img = Image.open(UPLOAD_FOLDER + '/' + image_name) img = ImageOps.autocontrast(img, cutoff=5, ignore=None) elif action == "equalize": img = Image.open(UPLOAD_FOLDER + '/' + image_name) img = ImageOps.equalize(img, mask=None) elif action == "solarize": img = Image.open(UPLOAD_FOLDER + '/' + image_name) img = ImageOps.solarize(img, threshold=128) url = "/convert/"+action+"/"+image_name filename = str(time.time()) + image_name img.save(SAVE_FOLDER + '/' + filename) image_path = 'results/' + filename return (render_template('core/index.html', path=image_path, name=image_name, url=url))
def filter_contrastToAlpha(image, baseDir): alpha = Image.new('L', image.size, 255) alpha.paste(image, mask=get_alpha(image)) alpha = ImageOps.invert(alpha) alpha = ImageOps.autocontrast(alpha) return Image.merge('LA', [Image.new('L', image.size), alpha])
def pil_to_ascii(img, scalefactor=0.2, invert=False, equalize=True, lut='simple', aspect_correction_factor=None ): """ Generates an ascii string from a PIL image. Parameters ---------- img : PIL.Image PIL image to transform. scalefactor : float ASCII characters per pixel. invert : bool Invert luminance? equalize : bool equalize histogram (for best results do this). lut : str Name of the lookup table to use. Currently supports 'simple' and 'binary'. Returns ------- str Examples -------- >>> from asciisciit.misc import open_pil_img >>> img = open_pil_img("http://i.imgur.com/l2FU2J0.jpg") >>> text_img = pil_to_ascii(img, scalefactor=0.3) >>> print(text_img) >>> from PIL import Image >>> img = Image.open("some_image.png") >>> text_img = pil_to_ascii(img) >>> print(text_img) """ lookup = get_lut(lut) if aspect_correction_factor is None: aspect_correction_factor = get_aspect_correction_factor(lookup.exemplar) img = img.resize( (int(img.size[0]*scalefactor), int(img.size[1]*scalefactor*aspect_correction_factor)), Image.BILINEAR) img = img.convert("L") # convert to mono if equalize: img = ImageOps.equalize(img) if invert: img = ImageOps.invert(img) img = np.array(img, dtype=np.uint8) return u"\n" + u"".join(lookup.apply(img).flatten().tolist())
def run(self): while True: try: camera = WebCamera.objects.get(pk = self._camera.id) if camera.motion_control: now = datetime.now() request = get_pool().request("GET", "%s?action=snapshot" % camera.internal_url) try: source = Image.open(BytesIO(request.data)) img = ImageOps.equalize(ImageOps.grayscale(source)) if self._previous is not None: out = ImageMath.eval("convert(a - b, 'L')", a = img, b = self._previous) out = out.filter(MedianFilter()) total = 0 for idx, val in enumerate(out.histogram()): total += val * idx if total > 3000000: camera.last_motion = now camera.save() filename = os.path.join(camera.motion_folder, "{:%Y%m%d-%H%M%S}.jpg".format(now)) source.save(filename) filesize = os.path.getsize(filename) if filesize < 6700: os.remove(filename) self._previous = img finally: request.close() else: self._previous = None except: print("Ignore Exception") sleep(1)
def __call__(self, sample): img = sample['image'] mask = sample['label'] # random scale (short edge) short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) w, h = img.size if h > w: ow = short_size oh = int(1.0 * h * ow / w) else: oh = short_size ow = int(1.0 * w * oh / h) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # pad crop if short_size < self.crop_size: padh = self.crop_size - oh if oh < self.crop_size else 0 padw = self.crop_size - ow if ow < self.crop_size else 0 img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) # random crop crop_size w, h = img.size x1 = random.randint(0, w - self.crop_size) y1 = random.randint(0, h - self.crop_size) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask}
def find_lines(inpath, ulx, uly, lrx, lry, save_file=False, show_file=False): # These shouldn't really be global; it could be cleaned up. global xsize global ysize global pix # Load into PIL im = ImageOps.invert(ImageOps.grayscale(Image.open(inpath))) pix = im.load() xsize = lrx - ulx ysize = lry - uly line_height = 73 fudge = 70 start_y = uly boxes = [] for i in range(100): new_box = line_in_range(start_y, line_height, fudge) start_y = new_box[0] + line_height if get_box_val(new_box[0], new_box[0] + line_height) == 0: break boxes.append(new_box[0]) box_vals = [get_box_val(y, y+line_height) for y in boxes] med = np.median(box_vals) filtered_boxes = filter( lambda y: get_box_val(y,y+line_height) > med/2.0 and get_box_val(y,y+line_height) < med*2, boxes) # left, upper, right, and lower final_boxes = [(ulx, y, lrx, y+line_height) for y in filtered_boxes] return final_boxes
def zealous_crop(page_groups): # Zealous crop all of the pages. Vertical margins can be cropped # however, but be sure to crop all pages the same horizontally. for idx in (0, 1): # min horizontal extremes minx = None maxx = None width = None for grp in page_groups: for pdf in grp[idx].values(): bbox = ImageOps.invert(pdf.convert("L")).getbbox() if bbox is None: continue # empty minx = min(bbox[0], minx) if minx is not None else bbox[0] maxx = max(bbox[2], maxx) if maxx is not None else bbox[2] width = max(width, pdf.size[0]) if width is not None else pdf.size[0] if width != None: minx = max(0, minx-int(.02*width)) # add back some margins maxx = min(width, maxx+int(.02*width)) # do crop for pg in grp[idx]: im = grp[idx][pg] bbox = ImageOps.invert(im.convert("L")).getbbox() # .invert() requires a grayscale image if bbox is None: bbox = [0, 0, im.size[0], im.size[1]] # empty page vpad = int(.02*im.size[1]) im = im.crop( (0, max(0, bbox[1]-vpad), im.size[0], min(im.size[1], bbox[3]+vpad) ) ) if os.environ.get("HORZCROP", "1") != "0": im = im.crop( (minx, 0, maxx, im.size[1]) ) grp[idx][pg] = im
def posterize(img, bits_to_keep, **__): if bits_to_keep >= 8: return img bits_to_keep = max(1, bits_to_keep) # prevent all 0 images return ImageOps.posterize(img, bits_to_keep)
''' Calcul du gradient ''' HX = np.array([[-1/8,0,1/8],[-2/8,0,2/8],[-1/8,0,1/8]]) # Matrices de convolution pour calculer la dérivée discrète HY = np.array([[-1/8,-2/8,-1/8],[0,0,0],[1/8,2/8,1/8]]) deriveX = convolve(mat,HX) # Calcul du produit de convolution, donne le gradient selon X et Y deriveY = convolve(mat,HY) Grad = deriveX + deriveY*1j # Matrice gradient qui utilise les complexes (*1j) pour avoir 2 dimensions G = np.absolute(Grad) # Partie absolue du gradient Theta = np.angle(Grad) # Définition de l'angle '''Post traitement du gradient''' img_G = Image.fromarray(G).convert('L') # On transforme la matrice du gradient en image pour la visualiser mat_G = np.array(img_G) img_G_ = ImageOps.autocontrast(img_G,cutoff=1) # On travaille sur le contraste pour avoir une meilleure image : le gradient donne des pixels dont la luminosité reste dans un inverval, on l'étend avec cette fonction à l'interval [0 255] pour plus de visibilité mat_G_ = np.array(img_G_) #plt.hist(mat_G.flatten(),bins = range(256),density=True,cumulative=True,histtype='step') #plt.hist(mat_G_.flatten(),bins = range(256),density=True,cumulative=True,histtype='step') G_seuil = np.copy(mat_G_) s = G_seuil.shape seuil = 50 for i in range(s[0]): for j in range(s[1]): if G_seuil[i][j] < seuil: G_seuil[i][j] = 0.0 Image_Gradient_Seuil = Image.fromarray(G_seuil) '''img.show()
def _lazy_load(self): raw_data = PILImage.open(str(self._path)) self._data = ImageOps.exif_transpose(raw_data)
def create_tfrecord(self, image_paths, labels, idx_start, idx_end, output_path): # Open a TFRecordWriter for the output-file. with tf.python_io.TFRecordWriter(output_path) as writer: for i in range(idx_start, idx_end): utils.print_progress(count=i, total=(idx_end - idx_start)) image_path = image_paths[i] label = labels[i] # TODO: Do center cropping # img = cv2.imread(image_paths[i]) # img = cv2.resize(img, (224, 224)) # Load images img = Image.open(image_path) # TODO: # Center crop and resize image. size: The requested size in pixels, as a 2-tuple: (width, height) img = ImageOps.fit(img, (self.config.tfr_image_width, self.config.tfr_image_height), Image.LANCZOS, 0, (0.5, 0.5)) # img = img.resize(size=(self.config.tfr_image_width, self.config.tfr_image_height)) img = np.array(img) if output_path is not None: img_path_name = os.path.join(os.path.dirname(output_path), os.path.basename(image_path)) utils_image.save_image(img, img_path_name) ## Color constancy # img = utils_image.color_constancy(img, power=6, gamma=None) # if output_path is not None: # img_path_name = os.path.join(os.path.dirname(output_path), os.path.basename(image_path)) # img_path_name = img_path_name.split('.')[0] + '_ilu.' + img_path_name.split('.')[1] # # utils_image.save_image(img, img_path_name) # img_save = Image.fromarray(img.astype('uint8')) # img_save.save(img_path_name) # Convert the image to raw bytes. img_bytes = img.tostring() data = { 'image': self.wrap_bytes(img_bytes), 'label': self.wrap_int64(label) } # Wrap the data as TensorFlow Features. feature = tf.train.Features(feature=data) # Wrap again as a TensorFlow Example. example = tf.train.Example(features=feature) # Serialize the data. serialized = example.SerializeToString() # Write the serialized data to the TFRecords file. writer.write(serialized)
start = time.time() buffer = mandel(xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin, max_iter=mi, xres=w, yres=h) stop = time.time() print("Time taken [calculation]: " + str(stop - start) + " seconds") rgb = gencolmap(mi, cmname) image = genimage(buffer, rgb) stop2 = time.time() print("Time taken [render]: " + str(stop2 - stop) + " seconds") plotimage(image) stop3 = time.time() print("Time taken [display]: " + str(stop3 - stop2) + " seconds") win.mainloop() outfile = getdefaultfilename("Filename") if outfile != "": stop4 = time.time() print(">>> Writing file: " + outfile) imdat = Image.fromarray( np.transpose(image, axes=[1, 0, 2]).astype('uint8'), 'RGB') ImageOps.flip(imdat).save(outfile) stop5 = time.time() print("Time taken [write]: " + str(stop5 - stop4) + " seconds") else: print(">>> Not writing file.")
def __call__(self, img): if np.random.randint(2): img = ImageOps.mirror(img) return img
def __call__(self, img, liver_mask, kidney_mask): return ImageOps.equalize(img), liver_mask, kidney_mask
def equalize(img, **__): return ImageOps.equalize(img)
def auto_contrast(img, **__): return ImageOps.autocontrast(img)
def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh)
def resize(self, full_path, size): img = Image.open(full_path) method = Image.NEAREST if img.size == size else Image.ANTIALIAS return ImageOps.fit(img, size, method=method)
def invert(img, **__): return ImageOps.invert(img)
def __call__(self, img: Image): smallestdim = min(img.size[0], img.size[1]) size = (smallestdim, smallestdim) return ImageOps.fit(img, size)
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): ranges = { "shearX": np.linspace(0, 0.3, 10), "shearY": np.linspace(0, 0.3, 10), "translateX": np.linspace(0, 150 / 331, 10), "translateY": np.linspace(0, 150 / 331, 10), "rotate": np.linspace(0, 30, 10), "color": np.linspace(0.0, 0.9, 10), "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), "solarize": np.linspace(256, 0, 10), "contrast": np.linspace(0.0, 0.9, 10), "sharpness": np.linspace(0.0, 0.9, 10), "brightness": np.linspace(0.0, 0.9, 10), "autocontrast": [0] * 10, "equalize": [0] * 10, "invert": [0] * 10 } # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand def rotate_with_fill(img, magnitude): rot = img.convert("RGBA").rotate(magnitude) return Image.composite(rot, Image.new("RGBA", rot.size, (128, ) * 4), rot).convert(img.mode) func = { "shearX": lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, magnitude * random.choice( [-1, 1]), 0, 0, 1, 0), Image.BICUBIC, fillcolor=fillcolor), "shearY": lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, 0, magnitude * random. choice([-1, 1]), 1, 0), Image.BICUBIC, fillcolor=fillcolor), "translateX": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice( [-1, 1]), 0, 1, 0), fillcolor=fillcolor), "translateY": lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random. choice([-1, 1])), fillcolor=fillcolor), "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), "color": lambda img, magnitude: ImageEnhance.Color(img).enhance( 1 + magnitude * random.choice([-1, 1])), "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( 1 + magnitude * random.choice([-1, 1])), "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( 1 + magnitude * random.choice([-1, 1])), "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( 1 + magnitude * random.choice([-1, 1])), "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), "equalize": lambda img, magnitude: ImageOps.equalize(img), "invert": lambda img, magnitude: ImageOps.invert(img) } self.p1 = p1 self.operation1 = func[operation1] self.magnitude1 = ranges[operation1][magnitude_idx1] self.p2 = p2 self.operation2 = func[operation2] self.magnitude2 = ranges[operation2][magnitude_idx2]
def test_usm_accuracy(self): src = snakes.convert('RGB') i = src._new(ImageOps.unsharp_mask(src, 5, 1024, 0)) # Image should not be changed because it have only 0 and 255 levels. self.assertEqual(i.tobytes(), src.tobytes())
def _pad_crop_and_resize(self): template_img_path = self.ret['template_img_path'] template_img = Image.open(template_img_path) detection_img_path = self.ret['detection_img_path'] detection_img = Image.open(detection_img_path) w, h = template_img.size cx, cy, tw, th = self.ret['template_target_xywh'] p = round((tw + th) / 2, 2) template_square_size = np.sqrt((tw + p) * (th + p)) #a detection_square_size = template_square_size * 2 #A # pad detection_lt_x, detection_lt_y = cx - detection_square_size // 2, cy - detection_square_size // 2 detection_rb_x, detection_rb_y = cx + detection_square_size // 2, cy + detection_square_size // 2 left = -detection_lt_x if detection_lt_x < 0 else 0 top = -detection_lt_y if detection_lt_y < 0 else 0 right = detection_rb_x - w if detection_rb_x > w else 0 bottom = detection_rb_y - h if detection_rb_y > h else 0 padding = (int(left), int(top), int(right), int(bottom)) self.ret['new_template_img_padding'] = ImageOps.expand( template_img, border=padding, fill=self.ret['mean_template']) self.ret['new_detection_img_padding'] = ImageOps.expand( detection_img, border=padding, fill=self.ret['mean_detection']) new_w, new_h = left + right + w, top + bottom + h # crop part ## template part tl = cx + left - template_square_size // 2 tt = cy + top - template_square_size // 2 tr = new_w - tl - template_square_size tb = new_h - tt - template_square_size self.ret['template_cropped'] = ImageOps.crop( self.ret['new_template_img_padding'], (tl, tt, tr, tb)) #self.ret['template_cropped'].save('/home/songyu/djsong/srpn/srpn/tmp/visualization/tmp/{}_0_template_.jpg'.format(self.count)) ## detection part dl = cx + left - detection_square_size // 2 dt = cy + top - detection_square_size // 2 dr = new_w - dl - detection_square_size db = new_h - dt - detection_square_size self.ret['detection_cropped'] = ImageOps.crop( self.ret['new_detection_img_padding'], (dl, dt, dr, db)) #self.ret['detection_cropped'].save('/home/songyu/djsong/srpn/srpn/tmp/visualization/tmp/{}_1_detection.jpg'.format(self.count)) self.ret['detection_tlcords_of_original_image'] = ( cx - detection_square_size // 2, cy - detection_square_size // 2) self.ret['detection_tlcords_of_padding_image'] = ( cx - detection_square_size // 2 + left, cy - detection_square_size // 2 + top) self.ret['detection_rbcords_of_padding_image'] = ( cx + detection_square_size // 2 + left, cy + detection_square_size // 2 + top) self.ret['template_cropped_resized'] = self.ret[ 'template_cropped'].resize((127, 127)) self.ret['detection_cropped_resized'] = self.ret[ 'detection_cropped'].resize((256, 256)) self.ret['template_cropprd_resized_ratio'] = round( 127 / template_square_size, 2) self.ret['detection_cropped_resized_ratio'] = round( 256 / detection_square_size, 2) # compute target in detection, and then we will compute IOU # whether target in detection part x, y, w, h = self.ret['detection_target_xywh'] self.ret['target_tlcords_of_padding_image'] = (x + left - w // 2, y + top - h // 2) self.ret['target_rbcords_of_padding_image'] = (x + left + w // 2, y + top + h // 2) if self.check: # 在 padding图上作出各部分 s = osp.join(self.tmp_dir, '1_padding_img_with_detection_and_target') if not os.path.exists(s): os.makedirs(s) im = self.ret['new_detection_img_padding'] draw = ImageDraw.Draw(im) x1, y1 = self.ret['target_tlcords_of_padding_image'] x2, y2 = self.ret['target_rbcords_of_padding_image'] draw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=1, fill='red') # target in padding x1, y1 = self.ret['detection_tlcords_of_padding_image'] x2, y2 = self.ret['detection_rbcords_of_padding_image'] draw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=1, fill='green') # detection in padding save_path = osp.join(s, '{:04d}.jpg'.format(self.count)) im.save(save_path) ### use cords about padding to compute cords about detection x11, y11 = self.ret['detection_tlcords_of_padding_image'] x12, y12 = self.ret['detection_rbcords_of_padding_image'] x21, y21 = self.ret['target_tlcords_of_padding_image'] x22, y22 = self.ret['target_rbcords_of_padding_image'] x1_of_d = x21 - x11 y1_of_d = y21 - y11 x3_of_d = x22 - x11 y3_of_d = y22 - y11 x1 = np.clip(x1_of_d, 0, x12 - x11) y1 = np.clip(y1_of_d, 0, y12 - y11) x2 = np.clip(x3_of_d, 0, x12 - x11) y2 = np.clip(y3_of_d, 0, y12 - y11) if self.check: #画出detection图 s = osp.join(self.tmp_dir, '2_cropped_detection') if not os.path.exists(s): os.makedirs(s) im = self.ret['detection_cropped'] draw = ImageDraw.Draw(im) draw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=1, fill='red') save_path = osp.join(s, '{:04d}.jpg'.format(self.count)) im.save(save_path) cords_in_cropped_detection = np.array((x1, y1, x2, y2)) cords_in_cropped_resized_detection = ( cords_in_cropped_detection * self.ret['detection_cropped_resized_ratio']).astype(np.int32) x1, y1, x2, y2 = cords_in_cropped_resized_detection cx, cy, w, h = (x1 + x2) // 2, (y1 + y2) // 2, x2 - x1, y2 - y1 self.ret['target_in_resized_detection_x1y1x2y2'] = np.array( (x1, y1, x2, y2)).astype(np.int32) self.ret['target_in_resized_detection_xywh'] = np.array( (cx, cy, w, h)).astype(np.int32) self.ret['area_target_in_resized_detection'] = w * h if self.check: #画出resized detection图 s = osp.join(self.tmp_dir, '3_resized_detection') if not os.path.exists(s): os.makedirs(s) im = self.ret['detection_cropped_resized'] draw = ImageDraw.Draw(im) draw.line([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], width=1, fill='red') save_path = osp.join(s, '{:04d}.jpg'.format(self.count)) im.save(save_path)
def process(self, data: Image.Image) -> Image.Image: return ImageOps.autocontrast(data)
def _get_dataset(split, centered=False, normalize=False): ''' Gets the adapted dataset for the experiments Args : split (str): train or test normalize (bool): (Default=True) normalize data centered (bool): (Default=False) data centered to [-1, 1] Returns : (tuple): <training, testing> images and labels ''' import matplotlib.pyplot as plt import pandas as pd import numpy as np from glob import glob from PIL import Image from PIL import ImageOps PATH = '/notebooks/userdata/teamE/MrBank/Pipistrel/' if 'train' == split: path = PATH + "Train/nature/*.png" elif 'test' == split: path = "/notebooks/data/datasets/pipistrel/Hackathon/SingleFrame_ObjectProposalClassification/test/*/*.png" elif 'valid' == split: path = PATH + "Validation/Nature/*.png" # default case should not happen else: assert (False) data_list = [] label_list = [] filename_list = [] for img_i, img_path in enumerate(glob(path), 1): img = Image.open(img_path) img = img.resize((32, 32)) data_list.append(np.array(img)) label_list.append(int("boat" in img_path.lower())) filename_list.append(img_path.split('/')[-1]) if split in ['train', 'valid']: img = ImageOps.mirror(img) data_list.append(np.array(img)) label_list.append(int("boat" in img_path.lower())) filename_list.append(img_path.split('/')[-1]) else: assert ("test" == split) assert (len(data_list) == len(label_list)) assert (len(data_list) == len(filename_list)) assert (len(label_list) == len(filename_list)) assert (0 < img_i) assert (0 < len(data_list)) if 'train' == split: for patch_i, patch_path in enumerate( glob("/notebooks/userdata/teamE/NATURE_PATCHES/*"), 1): img = Image.open(patch_path) img = img.resize((32, 32)) data_list.append(np.array(img)) label_list.append(0) filename_list.append(patch_path.split('/')[-1]) if patch_i > 3000: break assert (0 < patch_i) for patch_i, patch_path in enumerate( glob("/notebooks/userdata/teamE/OCEAN_PATCHES_TRAIN/*"), 1): img = Image.open(patch_path) img = img.resize((32, 32)) data_list.append(np.array(img)) label_list.append(0) filename_list.append(patch_path.split('/')[-1]) assert (0 < patch_i) elif 'test' == split: for patch_i, patch_path in enumerate( glob("/notebooks/userdata/teamE/OCEAN_PATCHES_TEST/*"), 1): img = Image.open(patch_path) img = img.resize((32, 32)) data_list.append(np.array(img)) label_list.append(0) filename_list.append(patch_path.split('/')[-1]) assert (0 < patch_i) else: assert ("valid" == split) assert (len(data_list) == len(label_list)) assert (len(data_list) == len(filename_list)) assert (len(label_list) == len(filename_list)) data = np.array(data_list) labels = np.array(label_list) if split in ['train', 'valid']: shuffle_ind = np.random.permutation(len(data)) data = data[shuffle_ind] labels = labels[shuffle_ind] else: assert ("valid" == split) # Convert images to [0..1] range if normalize or centered: #normalize max_val = np.empty_like(data) max_val.fill(255.1) assert (max_val > data).any() min_val = np.empty_like(data) min_val.fill(-0.1) assert (min_val < data).any() data = data.astype(np.float32) / 255.0 #center data = data.astype(np.float32) * 2. - 1. max_val = np.empty_like(data) max_val.fill(1.1) assert (max_val > data).any() min_val = np.empty_like(data) min_val.fill(-1.1) assert (min_val < data).any() print(split, data.shape, labels, sum(labels)) return data.astype(np.float32), labels, filename_list
def process(self, data: Image.Image) -> Image.Image: n_bits = np.random.randint( *self.nbs_bits ) if self.nbs_bits[0] != self.nbs_bits[1] else self.nbs_bits[0] n_bits = 8 - n_bits return ImageOps.posterize(data, n_bits)
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): ranges = { 'shearX': np.linspace(0, 0.3, 10), 'shearY': np.linspace(0, 0.3, 10), 'translateX': np.linspace(0, 150 / 331, 10), 'translateY': np.linspace(0, 150 / 331, 10), 'rotate': np.linspace(0, 30, 10), 'color': np.linspace(0.0, 0.9, 10), 'posterize': np.round(np.linspace(8, 4, 10), 0).astype(np.int), 'solarize': np.linspace(256, 0, 10), 'contrast': np.linspace(0.0, 0.9, 10), 'sharpness': np.linspace(0.0, 0.9, 10), 'brightness': np.linspace(0.0, 0.9, 10), 'autocontrast': [0] * 10, 'equalize': [0] * 10, 'invert': [0] * 10 } def rotate_with_fill(img, magnitude): rot = img.convert('RGBA').rotate(magnitude) return Image.composite(rot, Image.new('RGBA', rot.size, (128, ) * 4), rot).convert(img.mode) func = { 'shearX': lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, magnitude * random.choice( [-1, 1]), 0, 0, 1, 0), Image.BICUBIC, fillcolor=fillcolor), 'shearY': lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, 0, magnitude * random. choice([-1, 1]), 1, 0), Image.BICUBIC, fillcolor=fillcolor), 'translateX': lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice( [-1, 1]), 0, 1, 0), fillcolor=fillcolor), 'translateY': lambda img, magnitude: img.transform( img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random. choice([-1, 1])), fillcolor=fillcolor), 'rotate': lambda img, magnitude: rotate_with_fill(img, magnitude), 'color': lambda img, magnitude: ImageEnhance.Color(img).enhance( 1 + magnitude * random.choice([-1, 1])), 'posterize': lambda img, magnitude: ImageOps.posterize(img, magnitude), 'solarize': lambda img, magnitude: ImageOps.solarize(img, magnitude), 'contrast': lambda img, magnitude: ImageEnhance.Contrast(img).enhance( 1 + magnitude * random.choice([-1, 1])), 'sharpness': lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( 1 + magnitude * random.choice([-1, 1])), 'brightness': lambda img, magnitude: ImageEnhance.Brightness(img).enhance( 1 + magnitude * random.choice([-1, 1])), 'autocontrast': lambda img, magnitude: ImageOps.autocontrast(img), 'equalize': lambda img, magnitude: ImageOps.equalize(img), 'invert': lambda img, magnitude: ImageOps.invert(img) } self.p1 = p1 self.operation1 = func[operation1] self.magnitude1 = ranges[operation1][magnitude_idx1] self.p2 = p2 self.operation2 = func[operation2] self.magnitude2 = ranges[operation2][magnitude_idx2]
def process(self, data: Image.Image) -> Image.Image: return ImageOps.equalize(data)
def process(self, data: Image.Image) -> Image.Image: threshold = np.random.randint( *self.thresholds ) if self.thresholds[0] != self.thresholds[1] else self.thresholds[0] threshold = 256 - threshold return ImageOps.solarize(data, threshold)
def mirror(file): img = Image.open(file) return ImageOps.mirror(img)
def process(self, data: Image.Image) -> Image.Image: return ImageOps.invert(data)
def _imequalize(img): # equalize the image using PIL.ImageOps.equalize from PIL import ImageOps, Image img = Image.fromarray(img) equalized_img = np.asarray(ImageOps.equalize(img)) return equalized_img
def negative(file): img = Image.open(file) return ImageOps.invert(img)
def scrape_prices(url, image_width, image_height): """ Extract dates and prices from a camelcamelcamel item URL. Parameters: url (str): camelcamelcamel URL for a product. image_width (int): width of the image to be used, in pixels image_height (int): height of the image to be used, in pixels Returns: dates: numpy array of dates at 12-hour intervals prices: a numpy array of prices """ ################ # Collect data # ################ # Show a message indicating progress progress_string = st.text('Collecting data...') # Define colors of elements of the plot (RGB) # Plotted lines plot_colors = np.array([[194, 68, 68], [119, 195, 107], [51, 51, 102]]) # Gray axis lines gray = np.array([215, 215, 214]) # Black axis lines black = np.array([75, 75, 75]) # Download the image response = requests.get(url) image_temp = Image.open(BytesIO(response.content)) # Convert image to float im = np.array(image_temp) # Get masks for each plot color masks = list() for i in range(3): masks.append(np.all(im == plot_colors[i], axis=-1)) # Check if there image is empty (camel has no data) if not np.any(masks[1]): return None, None ###################### # Find x and y scale # ###################### progress_string.text('Aligning data...') # Find the y axis upper limit # Crop a portion of the image containing the top of the grid top_line_crop = im[:, round(image_width * .5) - 5:round(image_width * .5) + 6, :] # Get the position of the line line_y_value = find_line(top_line_crop, gray) # If it wasn't found, quit # Checks of this nature are rarely needed, as long # as camel keeps their plotting code the same if line_y_value is None: return None, None else: line_y_value = int(line_y_value) # Find x axis limits # Crop the left-most and right-most vertical lines in the grid left_line_crop = np.transpose( im[round(image_height * .5) - 8:round(image_height * .5) + 9, :round(image_width * .1), :], axes=[1, 0, 2]) right_line_crop = np.transpose(im[round(image_height * .5) - 8:round(image_height * .5) + 9, round(image_width * .7):, :], axes=[1, 0, 2]) lo_x_value = find_line(left_line_crop, black) hi_x_value = find_line(right_line_crop[::-1, :, :], gray) if lo_x_value is None or hi_x_value is None: return None, None else: lo_x_value = int(lo_x_value) hi_x_value = int(hi_x_value) # Find price corresponding to the y axis upper limit # First, crop the price text upper_price_crop = im[line_y_value - 8:line_y_value + 10, 0:lo_x_value - 9, :] upper_price_crop = Image.fromarray(upper_price_crop) # Resize and apply OCR upper_price_crop = upper_price_crop.resize( (upper_price_crop.width * 10, upper_price_crop.height * 10)) upper_price_string = pytesseract.image_to_string(upper_price_crop, config='--psm 7') upper_price = float(upper_price_string[1:].replace(',', '')) # Store y position of price limits # The position and price of the lower limit are constant limit_y_positions = np.array([line_y_value, image_height - 49]) # Calculate dollars per pixel dollarspp = upper_price / (np.max(limit_y_positions) - np.min(limit_y_positions)) # Crop year text from bottom of image so that we # can find the date of the first timepoint year_crop = im[-14:, 0:round(image_width / 8), :] year_crop = Image.fromarray(year_crop) # Resize and apply OCR year_crop = year_crop.resize((year_crop.width * 5, year_crop.height * 5)) year_string = pytesseract.image_to_string(year_crop, config='--psm 7') year_string = year_string[:4] # Crop month and day from bottom left corner date_crop = im[-49:-14, (lo_x_value - 40):(lo_x_value + 6), :] # Convert to image date_crop = Image.fromarray(date_crop) # Invert, so that rotation works date_crop = ImageOps.invert(date_crop) # Pad the image date_crop_padded = Image.new( 'RGB', (round(date_crop.width * 1.5), round(date_crop.height * 1.5)), (0, 0, 0)) date_crop_padded.paste(date_crop, box=(0, round(date_crop.height * .5))) # Resize date_crop_padded = date_crop_padded.resize( (date_crop_padded.width * 7, date_crop_padded.height * 7), resample=Image.LANCZOS) # Rotate and invert date_crop_padded = ImageOps.invert(date_crop_padded.rotate(-45)) # Crop date_crop_padded = date_crop_padded.crop((1, 85, 297, 260)) # Apply OCR date_string = pytesseract.image_to_string(date_crop_padded) # Find closest match to a month start_month = difflib.get_close_matches(date_string, [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ], n=1, cutoff=0.2) # Quit if no month was found if np.size(start_month) < 1: return None, None start_month = start_month[0] # Get the day of the first timepoint # Try to fix mixups between 'o' and 0 if date_string[-1] == 'o': date_string = date_string[:-1] + '0' # Remove all whitespace date_string_stripped = "".join(date_string.split()) # Take last 2 digits if the second-to-last is reasonable if date_string_stripped[-2].isdigit() and 0 < int( date_string_stripped[-2]) < 4: start_day = date_string_stripped[-2:] else: start_day = '0' + date_string_stripped[-1] # Store x axis locations of time limits limit_x_positions = [lo_x_value, image_width - hi_x_value] # For debugging purposes # Useful if CCC changes their plotting code #st.image(year_crop) #st.write(year_string) #st.image(date_crop) #st.image(date_crop_padded) # Check if our date is valid try: start_time = datetime.datetime.strptime( start_month + start_day + year_string, '%b%d%Y') except ValueError: return None, None # Get current time end_time = datetime.datetime.now() # Calculate days per pixel time_delta = end_time - start_time dayspp = time_delta.days / int(1 + np.diff(limit_x_positions)) # Get number of observations num_obs = int(np.diff(limit_x_positions)) # Preallocate prices as nan prices = np.ones(num_obs) * np.nan ################## # Extract prices # ################## progress_string.text('Extracting prices...') # Find y-axis value of blue pixels in each time step - # these are the prices we're looking for y = [[i for i, x in enumerate(q) if x] for q in np.transpose( masks[2][:, limit_x_positions[0]:limit_x_positions[1]])] # Adjust values if necessary, then convert to prices # Missing data are set to nan for i in range(num_obs): # Check if the bottom of the blue line is covered by a red or green line if np.size(y[i]) == 1: if masks[0][int(y[i][0]) + 1, limit_x_positions[0] + i] or masks[1][int(y[i][0]) + 1, limit_x_positions[0] + i, ]: y[i][0] += 1 # Check if the blue line is covered by both red and green lines if np.size(y[i]) == 0: red_idx = [ q for q, x in enumerate(masks[0][:, limit_x_positions[0] + i]) if x ] grn_idx = [ q for q, x in enumerate(masks[1][:, limit_x_positions[0] + i]) if x ] if np.size(red_idx) == 1 and np.size(grn_idx) == 1 and np.abs( int(red_idx[0]) - int(grn_idx[0])) == 1: y[i] = grn_idx else: y[i] = np.nan prices[i] = dollarspp * (image_height - np.max(y[i]) - 50) # Adjust periods with no data # First, find nans and convert to a str for regex searching nans = ''.join([str(int(np.isnan(i))) for i in prices]) # Ensure the beginnings of empty periods are correct matches = [m.span() for m in re.finditer('000110011', nans)] for match in matches: prices[match[0] + 3:match[0] + 5] = prices[match[0] + 5] # Then remove empty periods nans = ''.join([str(int(np.isnan(i))) for i in prices]) matches = [m.span() for m in re.finditer('1100', nans)] for match in matches: prices[match[0] + 2:match[0] + 4] = np.nan ################### # Resample prices # ################### progress_string.text('Resampling prices...') # Resample to 2x daily observations at 6:00 and 18:00 # First, get the dates of our observations dates = pd.date_range(start_time, end_time, periods=num_obs).to_pydatetime() # Initialize new dates and prices at the desired interval dates_2x_daily = pd.date_range(datetime.datetime(start_time.year, start_time.month, start_time.day, 6), datetime.datetime(end_time.year, end_time.month, end_time.day, 18), freq='12H').to_pydatetime() prices_2x_daily = np.ones(np.size(dates_2x_daily)) * np.nan # Find price at the closest date to each timepoint for i in range(np.size(dates_2x_daily)): prices_2x_daily[i] = prices[take_closest_date(dates - dates_2x_daily[i])] # Make sure most recent price is correct prices_2x_daily[-1] = prices[-1] # Round prices to 2 decimal places prices_2x_daily = np.around(prices_2x_daily, 2) # Clear the message progress_string.empty() return dates_2x_daily, prices_2x_daily
#!/usr/bin/env python3 from PIL import Image, ImageFilter from PIL import ImageOps import numpy as np img = Image.open("myself.jpg") img = img.convert('L') img = img.filter(ImageFilter.FIND_EDGES) img = ImageOps.invert(img) img = img.convert('RGB') source = np.array(img) bound = np.array(img) for hnum, i in enumerate(source): for wnum, j in enumerate(source[hnum]): if j[0] > 250: #bound[i][j] = [255,255,255] pass else: bound[i][j] = [0, 0, 0] #print(hnum) img = np.array(bound) img = Image.fromarray(img)
def solarize(pil_img, level): level = int_parameter(sample_level(level), 256) return ImageOps.solarize(pil_img, 256 - level)
def get_thumbnail(photo_file=None, photo=None, width=256, height=256, crop='cover', quality=75, return_type='path', force_regenerate=False, force_accurate=False): if not photo_file: if not isinstance(photo, Photo): photo = Photo.objects.get(id=photo) photo_file = photo.base_file elif not isinstance(photo_file, PhotoFile): photo_file = PhotoFile.objects.get(id=photo_file) # If thumbnail image was previously generated and we weren't told to re-generate, return that one output_path = get_thumbnail_path(photo_file.id, width, height, crop, quality) output_url = get_thumbnail_url(photo_file.id, width, height, crop, quality) if os.path.exists(output_path): if return_type == 'bytes': return open(output_path, 'rb').read() elif return_type == 'url': return output_url else: return output_path # Read base image and metadata input_path = photo_file.base_image_path ImageFile.LOAD_TRUNCATED_IMAGES = True im = Image.open(input_path) if im.mode != 'RGB': im = im.convert('RGB') metadata = PhotoMetadata(input_path) # Perform rotations if decalared in metadata if force_regenerate: im = im.rotate(photo_file.rotation, expand=True) elif metadata.get('Orientation') in ['Rotate 90 CW', 'Rotate 270 CCW']: im = im.rotate(-90, expand=True) elif metadata.get('Orientation') in ['Rotate 90 CCW', 'Rotate 270 CW']: im = im.rotate(90, expand=True) # Crop / resize if force_accurate: im = srgbResize(im, (width, height), crop, Image.BICUBIC) else: if crop == 'cover': im = ImageOps.fit(im, (width, height), Image.BICUBIC) else: im.thumbnail((width, height), Image.BICUBIC) # Save to disk (keeping the bytes in memory if we need to return them) if return_type == 'bytes': img_byte_array = io.BytesIO() im.save(img_byte_array, format='JPEG', quality=quality) with open(output_path, 'wb') as f: f.write(img_byte_array.getvalue()) else: im.save(output_path, format='JPEG', quality=quality) # Update PhotoFile DB model with version of thumbnailer if photo_file.thumbnailed_version != THUMBNAILER_VERSION: photo_file.thumbnailed_version = THUMBNAILER_VERSION photo_file.save() # Return accordingly if return_type == 'bytes': return img_byte_array.getvalue() elif return_type == 'url': return output_url return output_path