def genVideoThumb(videofile, imagefile): (width, height) = get_video_size(videofile) frame_step = int(100 / THUMB_FRAME_COUNT) ratio = float(width) / float(height) if width > LIBRARYFILE_THUMB_WIDTH: width = LIBRARYFILE_THUMB_WIDTH height = int(width / ratio) if height > LIBRARYFILE_THUMB_HEIGHT: height = LIBRARYFILE_THUMB_HEIGHT width = int(height * ratio) width_offset = (LIBRARYFILE_THUMB_WIDTH - width) / 2 height_offset = int((LIBRARYFILE_THUMB_HEIGHT - height) / 2) new_image = pil.new("RGB", (LIBRARYFILE_THUMB_WIDTH * THUMB_FRAME_COUNT, LIBRARYFILE_THUMB_HEIGHT)) for i in range(THUMB_FRAME_COUNT): framefile = "%s%d.jpg" % (imagefile, i) # TODO: fix keyframing issue (perhaps flvtool2?) os.system( "ffmpegthumbnailer -i %s -o %s -t %d%% -q %d -s %d" % (videofile, framefile, i * frame_step, JPEG_QUALITY, width) ) img = pil.open(framefile) new_image.paste(img, (LIBRARYFILE_THUMB_WIDTH * i + width_offset, height_offset)) os.remove(framefile) new_image.save(imagefile) return True
def hide_image(public_img, secret_img): s=4 #the bits we are going to overwrite data = Image.open(public_img) #the bits we are going to write key = ImageOps.autocontrast(Image.open(secret_img).resize(data.size)) for x in range(data.size[0]): for y in range(data.size[1]): p = data.getpixel((x, y)) q = key.getpixel((x, y)) red = p[0] - (p[0] % s) + (s * q[0] / 255) if(x > 200 and x < 206 and y > 200 and y < 206): print(p[0] - (p[0] % s) + (s * q[0] / 255), p[1] - (p[1] % s) + (s * q[1] / 255), p[2] - (p[2] % s) + (s * q[2] / 255)) # print('p[0], q[0]') # print(p[0], q[0]) # print('p[1],q[1]') # print(p[1],q[1]) # print('p[2],q[2]') # print(p[2],q[2]) green = p[1] - (p[1] % s) + (s * q[1] / 255) blue = p[2] - (p[2] % s) + (s * q[2] / 255) data.putpixel((x,y), (red,green,blue)) # if (red > 100 and green < 100 and blue < 100): # print('x,y') # print(x, y) # print('Cover IMG, Hide Image') # print(p,q) # print('R,G,B') # print(red,green,blue) return data
def hide(bytes, bitmap, ext='sae'): """ Hides the given bytes in the supplied bitmap. * bytes variable length byte sequence * bitmap bitmap file """ image = Image.open(bitmap) out = Image.new(image.mode, image.size, None) pixels = to_pixels(image) bits = to_bits(bytes) finished = False for x, y, pixel in pixels: r, g, b = pixel bit, finished = move(bits, finished) r = r if bit is None else (r & ~1) | bit bit, finished = move(bits, finished) g = g if bit is None else (g & ~1) | bit bit, finished = move(bits, finished) b = b if bit is None else (b & ~1) | bit out.putpixel((x, y), (r, g, b)) name = bitmap.name + '.' + ext with open(name, "wb") as target: out.save(target, "BMP")
def TestD(): i1 = np.array(Image.open("test_inputs/checker.jpg").convert('L'),dtype="float") i2 = np.array(Image.open("test_inputs/checker_grad.jpg").convert('L'),dtype="float") i3 = np.array(Image.open("test_inputs/checker_blur.jpg").convert('L'),dtype="float") i1Norm = norm(i1) i2Norm = norm(i2) i3Norm = norm(i3) d11 = d(i1,i1,i1Norm,i1Norm) d22 = d(i2,i2,i2Norm,i2Norm) print "\nd(i1,i1) =", d11 print "d(i2,i2) =", d22 # test2: symmetry d12 = d(i1,i2,i1Norm,i2Norm) d21 = d(i2,i1,i2Norm,i1Norm) print "\nd(i1,i2) =", d12 print "d(i2,i1) =", d21 # test3: triangle inequality d23 = d(i2,i3,i2Norm,i3Norm) d13 = d(i1,i3,i1Norm,i3Norm) print "\nd(i1,i2) =", d12 print "d(i2,i3) =", d23 print "d(i1,i3) =", d13 print "d(i1,i3) =< d(i1,i2) + d(i2,i3)" print "d(i1,i3) >= abs(d(i1,i2) - d(i2,i3))" print abs(d12 - d23), "<=", d13, "<=", d12 + d23, "\n"
def write_image(img, fname, apply_gamma=False): """Save a float-3 numpy array image to a file. Supported formats: PNG, JPEG, and others; see PIL docs for more. Image can be 3-channel, which is interpreted as RGB, or can be 1-channel, which is greyscale. Can optionally specify that the image should be gamma-encoded prior to writing it out; this should be done if the image contains linear pixel values, to make the image look "normal". Args: img: Numpy image array data. fname: Path of file to save to; the extension specifies the format. apply_gamma: (Optional) apply gamma to the image prior to writing it. """ if apply_gamma: img = apply_lut_to_image(img, DEFAULT_GAMMA_LUT) (h, w, chans) = img.shape if chans == 3: Image.fromarray((img * 255.0).astype(numpy.uint8), "RGB").save(fname) elif chans == 1: img3 = (img * 255.0).astype(numpy.uint8).repeat(3).reshape(h,w,3) Image.fromarray(img3, "RGB").save(fname) else: raise its.error.Error('Unsupported image type')
def compare_images(file1, file2): """ Compare two images, pixel by pixel, summing up the differences in every component and every pixel. Return the magnitude of the difference between the two images. file1: A path to the first image file on disk file2: A path to the second image file on disk """ img1 = Image.open(file1) img2 = Image.open(file2) if img1.size[0] != img2.size[0] or img1.size[1] != img2.size[1]: raise ValueError("Images are of different sizes: img1 = (" + str(img1.size[0]) + " x " + str(img1.size[1]) + ") , img2 = (" + str(img2.size[0]) + " x " + str(img2.size[1]) + ")") size = img1.size img1 = img1.load() img2 = img2.load() indices = itertools.product(range(size[0]), range(size[1])) diff = 0 for i, j in indices: p1 = img1[i, j] p2 = img2[i, j] diff += abs(p1[0] - p2[0]) + abs(p1[1] - p2[1]) + abs(p1[2] - p2[2]) return diff
def prepare(self, filename, bgcolor = background_default, chatty = chatty_default): """ Prepare a large image for tiling. Load an image from a file. Resize the image so that it is square, with dimensions that are an even power of two in length (e.g. 512, 1024, 2048, ...). Then, return it. """ src = Image.open(filename) self.orig_size = src.size self.new_size = (1, 1) while self.new_size[0] < src.size[0] or self.new_size[1] < src.size[1]: self.new_size = (self.new_size[0] * 2, self.new_size[1] * 2) img = Image.new('RGBA', self.new_size) img.paste("#" + bgcolor) src.thumbnail(self.new_size, scaling_filter) img.paste(src, (int((self.new_size[0] - src.size[0]) / 2), int((self.new_size[1] - src.size[1]) / 2))) return img
def export_icon(icon, size, filename, font, color): image = Image.new("RGBA", (size, size), color=(0,0,0,0)) draw = ImageDraw.Draw(image) # Initialize font font = ImageFont.truetype(font, size) # Determine the dimensions of the icon width,height = draw.textsize(icons[icon], font=font) draw.text(((size - width) / 2, (size - height) / 2), icons[icon], font=font, fill=color) # Get bounding box bbox = image.getbbox() if bbox: image = image.crop(bbox) borderw = int((size - (bbox[2] - bbox[0])) / 2) borderh = int((size - (bbox[3] - bbox[1])) / 2) # Create background image bg = Image.new("RGBA", (size, size), (0,0,0,0)) bg.paste(image, (borderw,borderh)) # Save file bg.save(filename)
def _createIcon(self, nom, extensio): ''' Creates an Icon for the shape PARAMETERS nom : Name of the file without extension extensio: Extension RETURNS Returns (sizex, sizey) ''' # create an icon 22x22 WITHOUT TRANSPARENCY try: img = Image.open(nom + extensio) except: # Yes, I need to detect the error type ... maybe in the next version raise self.Image2DiaErrors(4) midax, miday = img.size # Create a white background to skip 'bad transparency effect' icona = Image.new("RGB",(22,22),(255,255,255)) # Aspect Ratio novaMida = (30, 30 *miday/midax) img.thumbnail([22, 22], Image.ANTIALIAS) elformat = img.format center = ( (22 - img.size[0])/2, (22-img.size[1])/2 ) icona.paste(img, center, img) try: icona.save('{0:>s}-icon.png'.format(nom), elformat, quality=90, optimize=1) except: icona.save("{0:>s}-icon.png".format(nom), elformat, quality=90) # img.close return novaMida
def getPygImage(self, th = False, color = 0): img = self.getImage() w = self.width h = self.height if False == th: m = self.mode d = img.tostring() p = self.pitch else: th = 255.0 * th r, g, b = img.split() if 0 == color: r = Image.eval(r, lambda i: 255 if i < th else 0) g = Image.eval(g, lambda i: 255 if i > th else 0) b = Image.eval(b, lambda i: 255 if i > th else 0) elif 1 == color: r = Image.eval(r, lambda i: 255 if i > th else 0) g = Image.eval(g, lambda i: 255 if i < th else 0) b = Image.eval(b, lambda i: 255 if i > th else 0) else: r = Image.eval(r, lambda i: 255 if i > th else 0) g = Image.eval(g, lambda i: 255 if i > th else 0) b = Image.eval(b, lambda i: 255 if i < th else 0) m = img.mode i = Image.merge(m, (r, g, b)) d = i.tostring() p = -1 * w * len(m) return pyglet.image.ImageData(w, h, m, d, p)
def main(): pool = multiprocessing.Pool() # For the parallel map() if sys.argv[1] == "decode": source = Image.open(sys.argv[1]) print ("Decoding the encoded...") secret = decode (sys.argv[1], 3, 2, 3) output = Image.new("L", source.size) output.putdata(secret) output.save(sys.argv[2]) elif sys.argv[1] == "encode": im = Image.open(sys.argv[1]) print ("Chopping Bits...") secret = hidden(sys.argv[1]) print ("Cooking the Pot...") messenger = carrier(sys.argv[2]) print ("Potting the Bits...") final = zip (secret, messenger) # In the first versions the variables used a disproportionate amount of # RAM del (secret) del (messenger) final = list (pool.map (add, final)) final = list (pool.map (tuple, final)) output = Image.new("RGB",im.size) output.putdata(final) output.save(sys.argv[3])
def takeSnapshot(self, *args): """taco""" if canTakeSnapshots: rawimg = self.device.DevCcdRead(1) try: img = Image.frombuffer( "RGB", (self.getWidth(), self.getHeight()), rawimg ) pixmap = img.tostring("raw", "BGR") img = Image.frombuffer("RGB", img.size, pixmap) # img.save(*args) except BaseException: logging.getLogger("HWR").exception( "%s: could not save snapshot", self.name() ) else: if len(args): try: img.save(*args) except BaseException: logging.getLogger("HWR").exception( "%s: could not save snapshot", self.name() ) else: return True else: return img else: logging.getLogger("HWR").error( "%s: could not take snapshot: sorry PIL is not available :-(", self.name(), ) return False
def decode(self, _, imgObj): """ Convert a image stored (PIL library readable image file format) in a StringIO object to a ROS compatible message (sensor_msgs.Image). """ if not _checkIsStringIO(imgObj): raise TypeError('Given object is not a StringIO instance.') # Checking of image according to django.forms.fields.ImageField try: imgObj.seek(0) img = Image.open(imgObj) img.verify() except: raise ValueError('Content of given image could not be verified.') imgObj.seek(0) img = Image.open(imgObj) img.load() # Everything ok, convert PIL.Image to ROS and return it if img.mode == 'P': img = img.convert('RGB') rosimage = sensor_msgs.msg.Image() rosimage.encoding = ImageConverter._ENCODINGMAP_PY_TO_ROS[img.mode] (rosimage.width, rosimage.height) = img.size rosimage.step = (ImageConverter._PIL_MODE_CHANNELS[img.mode] * rosimage.width) rosimage.data = img.tostring() return rosimage
def __call__(self, file_name): file_path = download_image_to_cache(file_name, self.cache) im = Image.open(file_path) if im.mode != self.mode: im = im.convert(self.mode) if np.all(im.size != self.resize_to): new_shape = (int(self.resize_to[0]), int(self.resize_to[1])) im = im.resize(new_shape, Image.ANTIALIAS) if self.mask is not None: mask = self.mask tmask = ImageOps.invert(mask.convert('RGBA').split()[-1]) im = Image.composite(im, mask, tmask).convert(self.mode) if self._crop != (0, 0,) + self.resize_to: im = im.crop(self._crop) l, t, r, b = self._crop assert im.size == (r - l, b - t) imval = np.asarray(im, self.dtype) rval = imval if self.normalize: rval -= rval.mean() rval /= max(rval.std(), 1e-3) else: rval /= 255.0 assert rval.shape[:2] == self.resize_to return rval
def main(argv): """ main loop """ for arg in argv: print arg if argv[1] == '-f': try: image = Image.open(ABSFILEPATH + '/' + argv[2]) blogsize(image, argv[2]) image.show() except Exception: print 'cant open' elif argv[1] == '-blog': for name in os.listdir(os.getcwd()): try: if name[-3:] == 'jpg' or name[-3:] == 'JPG' : image = Image.open(ABSFILEPATH + '/' + name) blogsize(image, name) except Exception: pass elif argv[1] == '-flickr': #for root,dirs,files in os.walk(ABSFILEPATH.join(argv[1])): for name in os.listdir(os.getcwd()): try: if name[-3:] == 'jpg' or name [-3:] == 'JPG' : image = Image.open(ABSFILEPATH + '/' + name) flickrsize(image, name) except Exception: pass else: print 'unknown parameter!'
def on_stitch(self,*args): self.stat("Generating stitched image...") x1,y1,x2,y2=[self.adj_x1.value,self.adj_y1.value,\ self.adj_x2.value,self.adj_y2.value] squish=self.adj_squish.value workwith=[] for fname in os.listdir(self.folder): if ".jpg" in fname or ".bmp" in fname \ or ".png" in fname or ".tif" in fname: if not "stitched" in fname and \ not self.fname in fname: workwith.append(fname) print "I SEE:",self.folder,fname workwith.sort() im=Image.new("RGB",((x2-x1)*len(workwith),y2-y1)) try:os.mkdir(self.folder+"/stitched/") except:pass #folder there already for i in range(len(workwith)): print "Loading",workwith[i] im2=Image.open(self.folder+workwith[i]) im2=im2.crop((x1,y1,x2,y2)) if self.togglebutton2.get_active()==True: self.stat("saving %03d_"%i+self.fname+"...") im2.save(self.folder+"/stitched/%03d_"%i+self.fname,quality=90) im.paste(im2,(i*(x2-x1),0)) self.stat("saving large image...") im.save(self.folder+"/stitched/"+self.fname,quality=90) im=im.resize((im.size[0]/squish,im.size[1]),Image.ANTIALIAS) im.save(self.folder+"/stitched/squished_"+self.fname,quality=90) f=open("stitchlog.txt",'w') f.write(str([x1,y1,x2,y2,self.imfilename])) f.close() print "DONE" self.stat("COMPLETE!!! Launching folder containing stitched images...") os.startfile(self.folder+"/stitched/")
def __init__(self, width, height=None): ''' Takes either a single string for a filename, or width and height of an empty picture. ''' if height: self.image = Image.new('RGB', (width, height)) self.title = 'Picture' self.width = width self.height = height else: self.image = Image.open(width) # actually filename self.title = width self.width, self.height = self.image.size # Default values for pen self.pen_color = (0, 0, 0) self.pen_position = (0, 0) self.pen_width = 1 self.pen_rotation = 0 # Pixel data of the image self.pixel = self.image.load() # Draw object of the image self.draw = ImageDraw.Draw(self.image) # The main window, and associated widgets. self.root = None self.canvas = None
def getHorizontalAngleForText(image): width, height = image.size longest_axis = math.sqrt(width ** 2 + height ** 2) collage = Image.new('RGBA', (longest_axis, longest_axis), 'white') test_image = image.copy().convert('RGBA') y_to_paste = (longest_axis - test_image.size[1]) / 2 x_to_paste = (longest_axis - test_image.size[0]) / 2 original_paste = collage.copy() original_paste.paste(test_image, (x_to_paste, y_to_paste), test_image) previous_text_begin = getTextBeginHeight(original_paste) paste_image = original_paste.copy() angle = 0 while angle > -360: current_text_begin = getTextBeginHeight(paste_image) if previous_text_begin > current_text_begin or current_text_begin == -1: break previous_text_begin = getTextBeginHeight(paste_image) angle -=5 paste_image = original_paste.rotate(angle) collage = Image.new('RGBA', paste_image.size, 'white') collage.paste(paste_image, (0,0), paste_image) paste_image = collage if angle: return angle + 5 return angle
def make_tile(fname,out,rot=0): image = prep_image(fname) image = image.rotate(rot,expand=1) # chop added extra pixels by expanding d = quality*4 dx,dy = image.size image = image.crop((d,d,dx-d,dy-d)).copy() # Scale Y-axis # convert -resample is much better than PIL. # ideally we'd use GIMP ... image.save("in.png") os.system("convert in.png -resample %dx%d out.png" % (size[0]*quality,size[1]*quality)) image = Image.open("out.png") # Apply tilemask w,h = image.size tmask,imask = make_tilemasks((w,h)) tile = Image.new("RGBA",(w,h),(0,0,0,0)) tile.paste(image,tmask) # Again convert -resize is better ... tile.save("in.png") os.system("convert in.png -resize %dx%d out.png" % (size)) image = Image.open("out.png") image.save(out)
def test_r(self): """ Line of circles (varying r) across image each produces a strong response """ im = Image.new("L", (1000, 200), (0)) npoints = 18 xs = [ (100 + 50 * t) for t in range(npoints) ] for t in range(npoints): r = (2.0+0.5*t) circle(im, xs[t], 100, r, 248) # Add noise into the image. If the image does not contain noise, # then the non maximum suppression can - like Buridan's ass - be # presented with two adjacent responses that are equal, and reject # both because neither is a maximum. The chance of this happening # with real-world images is very remote indeed. noise = Image.fromstring("L", (1000,200), "".join([ chr(random.randrange(0, 8)) for i in range(1000 * 200)])) im = ImageChops.add(im, noise) result = sorted([(x,y,s,response) for (x,y,s,response) in simple(im, 7, 1.0, 999999.0, 999999.0)][-npoints:]) # Must have npoints self.assertEqual(len(result), npoints) # X coordinates must be within 1 point of expected for i,(x,y,s,r) in enumerate(result): self.assert_(abs(x - xs[i]) <= 1) # Already ordered by x, so scale should be increasing ss = [s for (x,y,s,r) in result] self.assertEqual(ss, sorted(ss))
def onSave(self): file_opt = options = {} options['filetypes'] = [('Image Files', '*.tif *.jpg *.png')] options['initialfile'] = 'myImage.jpg' options['parent'] = self.parent fname = tkFileDialog.asksaveasfilename(**file_opt) Image.fromarray(np.uint8(self.Ilast)).save(fname)
def write(self, filename, debug=False): if Image is None: raise JasyError("Missing PIL to create sprite sheets") img = Image.new('RGBA', (self.width, self.height)) draw = ImageDraw.Draw(img) #draw.rectangle((0, 0, self.width, self.height), fill=(255, 255, 0, 255)) # Load images and pack them in for block in self.blocks: res = Image.open(block.image.src) x, y = block.fit.x, block.fit.y if block.rotated: debug('%s is rotated' % block.image.src) res = res.rotate(90) img.paste(res, (x, y)) del res if debug: x, y, w, h = block.fit.x, block.fit.y, block.w, block.h draw.rectangle((x, y , x + w , y + h), outline=(0, 0, 255, 255) if block.rotated else (255, 0, 0, 255)) if debug: for i, block in enumerate(self.packer.getUnused()): x, y, w, h = block.x, block.y, block.w, block.h draw.rectangle((x, y , x + w , y + h), fill=(255, 255, 0, 255)) img.save(filename)
def partC(): mag_image = Image.new('L', (256, 256)) pix_mag = mag_image.load() for y in xrange(256): for x in xrange(256): pix_mag[y,x] =( sin((2*pi*2*x)/256)*127)+127 mag_image.show() mag_image=mag_image.rotate(45) mag_image.show() mag_image.save("partC.png") f = [] for y in xrange(256): f.append([]) for x in xrange(256): f[y].append(mag_image.getpixel((y,x))) sp = np.fft.fft2(f) mag_image = Image.new('L', (256, 256)) pix_mag = mag_image.load() for y in xrange(256): for x in xrange(256): mag = sqrt(sp[y][x].real**2+sp[y][x].imag**2) pix_mag[x,y] = (mag*127)+127 mag_image.show() mag_image.save("F of partC.png")
def toimage(im): if hasattr(im, "toUtf8"): # FIXME - is this really the best way to do this? im = unicode(im.toUtf8(), "utf-8") if Image.isStringType(im): im = Image.open(im) return ImageQt(im)
def from_data_with_min_max(cls, slug, data, extent, min_value, max_value, cdict=None): """ Create GeoImage from slug and data. """ tmp_base = tempfile.mktemp() #print('tmp_base: %s' % tmp_base) #print('step 1') # Step 1: save png + pgw in RD if cdict is None: cdict = { 'red': ((0.0, 51./256, 51./256), (0.5, 237./256, 237./256), (1.0, 83./256, 83./256)), 'green': ((0.0, 114./256, 114./256), (0.5, 245./256, 245./256), (1.0, 83./256, 83./256)), 'blue': ((0.0, 54./256, 54./256), (0.5, 170./256, 170./256), (1.0, 83./256, 83./256)), } colormap = mpl.colors.LinearSegmentedColormap('something', cdict, N=1024) normalize = mpl.colors.Normalize(vmin=min_value, vmax=max_value) rgba = colormap(normalize(data), bytes=True) #rgba[:,:,3] = np.where(rgba[:,:,0], 153 , 0) if 'depth' in slug: # Make transparent where depth is zero or less rgba[:,:,3] = np.where(np.greater(data, 0), 255, 0) Image.fromarray(rgba).save(tmp_base + '.png', 'PNG') write_pgw(tmp_base + '.pgw', extent) return cls.from_rd_png(tmp_base, slug, extent)
def _getBounds(size, glDispID, filename, scale, rotation, partRotation): # Clear the drawing buffer with white glClearColor(1.0, 1.0, 1.0, 1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Draw the piece in black glColor3f(0, 0, 0) adjustGLViewport(0, 0, size, size) rotateToView(rotation, scale) rotateView(*partRotation) glCallList(glDispID) # Use PIL to find the image's bounding box (sweet) pixels = glReadPixels(0, 0, size, size, GL_RGB, GL_UNSIGNED_BYTE) img = Image.fromstring("RGB", (size, size), pixels) bg = bgCache.setdefault(size, Image.new("RGB", img.size, (255, 255, 255))) box = ImageChops.difference(img, bg).getbbox() if box is None: return (0, 0, 0, 0, 0, 0) # Rendered entirely out of frame # if filename: # import os # rawFilename = os.path.splitext(os.path.basename(filename))[0] # img.save("C:\\lic\\tmp\\%s_%dx%d.png" % (rawFilename, box[2] - box[0], box[3] - box[1])) # print filename + "box: " + str(box if box else "No box = shit") # Find the bottom left corner inset, used for placing PLIItem quantity labels data = img.load() leftInset = _getLeftInset(data, size, box[1]) bottomInset = _getBottomInset(data, size, box[0]) return box + (leftInset - box[0], bottomInset - box[1])
def apply_watermark(im, mark, position, opacity=1): """Adds a watermark to an image.""" if opacity < 1: mark = reduce_opacity(mark, opacity) if im.mode != 'RGBA': im = im.convert('RGBA') # create a transparent layer the size of the image and draw the # watermark in that layer. layer = Image.new('RGBA', im.size, (0,0,0,0)) if position == 'tile': for y in range(0, im.size[1], mark.size[1]): for x in range(0, im.size[0], mark.size[0]): layer.paste(mark, (x, y)) elif position == 'scale': # scale, but preserve the aspect ratio ratio = min( float(im.size[0]) / mark.size[0], float(im.size[1]) / mark.size[1]) w = int(mark.size[0] * ratio) h = int(mark.size[1] * ratio) mark = mark.resize((w, h)) layer.paste(mark, ((im.size[0] - w) / 2, (im.size[1] - h) / 2)) elif position == 'bd': #en bas a droite x = im.size[0]-mark.size[0] y = im.size[1]-mark.size[1] layer.paste(mark, (x, y)) else: layer.paste(mark, position) # composite the watermark with the layer return Image.composite(layer, im, layer)
def getTemplate(img_test): im1 = Image.open('Template1/0130') im2 = Image.open('Template1/3214') im3 = Image.open('Template1/7564') im4 = Image.open('Template1/7849') bw_im1 = im1.convert('1') bw_im2 = im2.convert('1') bw_im3 = im3.convert('1') bw_im4 = im4.convert('1') #bw_im1 = numpy.ndarray(im1) dic_0 = np.asarray(bw_im1.crop((0,0,10,10))) dic_1 = np.asarray(bw_im1.crop((10,0,20,10))) dic_2 = np.array(bw_im2.crop((10,0,20,10))) dic_3 = np.array(bw_im2.crop((0,0,10,10))) dic_4 = np.array(bw_im2.crop((30,0,40,10))) dic_5 = np.array(bw_im3.crop((10,0,20,10))) dic_6 = np.array(bw_im3.crop((20,0,30,10))) dic_7 = np.array(bw_im3.crop((0,0,10,10))) dic_8 = np.array(bw_im4.crop((10,0,20,10))) dic_9 = np.array(bw_im4.crop((30,0,40,10))) Dict = [dic_0,dic_1,dic_2,dic_3,dic_4,dic_5,dic_6,dic_7,dic_8,dic_9] dic_0 = np.asarray(dic_0, dtype='bool') dic_1 = np.asarray(dic_1, dtype='bool') a = dic_0^dic_1 b = [dic_0,dic_1] c = b[0] dic_2 = np.asarray(dic_2, dtype='int32') img_test = img_test.convert('1') for i in range(4): subImg = img_test.crop((i*10,0,(i+1)*10,10))
def load(n=None): try: if not n: n = nlist[randint(0,J)] im = Image.open(pjoin(basedir, "dinocomics%06i.png"%n)) wxtest = piltowx(im) # error Interlaced PNGs # print n,fromimage(im).shape # assert(fromimage(im).shape == (500,735,3)), "Not the right shape" # print im.size while im.size != (735,500): # ignore wrong sized images (guest comics) # print im.size # copyPanel(load(1),im,2) n = nlist[randint(0,J)] im = Image.open(pjoin(basedir, "dinocomics%06i.png"%n)) wxtest = piltowx(im) return im # except AssertionError except Exception, e: print "Load Error: %i"%n,e # import sys # sys.exit() # if n < J: n = n%nlist[-1] time.sleep(1) return load(n+1)
def procesamiento_imagen(): ## Convertir a grayscale img = Image.open(rostro).convert('LA') img.save('greyscale.png') ## Resize foo = Image.open("greyscale.png") foo = foo.resize((256,256),Image.ANTIALIAS) foo.save("greyscale.png",optimize=True,quality=95) ## Eliminar ruido img = cv2.imread('greyscale.png') dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) ## Canny detector img = cv2.imread('greyscale.png',0) edges = cv2.Canny(img,256,256) plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Original Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(edges,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.show()