def save_thumbnail(cls, filename, image, content_type, thumbnail_size=None, thumbnail_meta=None, square=False): format = image.format height = image.size[0] width = image.size[1] if square and height != width: sz = max(width, height) if 'transparency' in image.info: new_image = Image.new('RGBA', (sz,sz)) else: new_image = Image.new('RGB', (sz,sz), 'white') if height < width: # image is wider than tall, so center horizontally new_image.paste(image, ((width-height)/2, 0)) elif height > width: # image is taller than wide, so center vertically new_image.paste(image, (0, (height-width)/2)) image = new_image if thumbnail_size: image.thumbnail(thumbnail_size, Image.ANTIALIAS) thumbnail_meta = thumbnail_meta or {} thumbnail = cls( filename=filename, content_type=content_type, **thumbnail_meta) with thumbnail.wfile() as fp_w: if 'transparency' in image.info: image.save(fp_w, format, transparency=image.info['transparency']) else: image.save(fp_w, format) return thumbnail
def partC(): mag_image = Image.new('L', (256, 256)) pix_mag = mag_image.load() for y in xrange(256): for x in xrange(256): pix_mag[y,x] =( sin((2*pi*2*x)/256)*127)+127 mag_image.show() mag_image=mag_image.rotate(45) mag_image.show() mag_image.save("partC.png") f = [] for y in xrange(256): f.append([]) for x in xrange(256): f[y].append(mag_image.getpixel((y,x))) sp = np.fft.fft2(f) mag_image = Image.new('L', (256, 256)) pix_mag = mag_image.load() for y in xrange(256): for x in xrange(256): mag = sqrt(sp[y][x].real**2+sp[y][x].imag**2) pix_mag[x,y] = (mag*127)+127 mag_image.show() mag_image.save("F of partC.png")
def generateActivatedItemIcons(): icons = [ f for f in listdir("icons_activated/") if isfile(join("icons_activated/",f)) and f.split(".")[-1] == "png" ] for i in icons: filename = ".".join(i.split(".")[0:-1]) im = Image.open("icons_activated/"+i).convert("RGBA") color_layer = Image.new('RGBA', im.size, (0, 0, 0)) line_layer = Image.new('RGBA', im.size, (109, 109, 109)) alpha_mask = Image.new('L', im.size, 0) alpha_mask_draw = ImageDraw.Draw(alpha_mask) alpha_mask_draw.line([(im.size[0]//2, im.size[1]//2), (im.size[0]//2, 0)], fill=186, width = 3) newim = Image.blend(im, color_layer, 0.51) newim = Image.composite(line_layer, newim, alpha_mask) newim.save("icons_activated/"+filename+"-activated.png") newim = Image.blend(im, color_layer, 0.29) newim = Image.composite(line_layer, newim, alpha_mask) newim.save("icons_activated/"+filename+"-activated-done.png") rect_layer = Image.new('RGBA', im.size, (0, 0, 0)) rect_alpha_mask = Image.new("L", im.size, 0) rect_alpha_mask_draw = ImageDraw.Draw(rect_alpha_mask) rect_alpha_mask_draw.rectangle((0, 0, im.size[0]//2, im.size[1]), fill=131) rect_alpha_mask_draw.rectangle((im.size[0]//2+1, 0, im.size[0], im.size[1]), fill=75) newim = Image.composite(rect_layer, im, rect_alpha_mask) newim = Image.composite(line_layer, newim, alpha_mask) newim.save("icons_activated/"+filename+"-activated-half.png")
def test_surface_area(): im = bw_img() print "black and white image has surface area %s" % imagescan.surface_area(im.load()) im2 = Image.new('L', (20, 20), 0) pix = im2.load() pix[5,5] = 255 print "with 5,5 surface area %s" % imagescan.surface_area(pix) pix[0,5] = 255 print "adding 0,5 surface area %s" % imagescan.surface_area(pix) for y in range(5,7): for x in range(1,5): pix[x,y] = 255 print "after (%s,%s) surface area %s" % (x, y, imagescan.surface_area(pix)) im3 = Image.new('L', (50, 50), 0) pix = im3.load() for y in range(50): for x in range(50): if (x + y) % 2: pix[x,y] = 255 im3.save('/tmp/grid.png') for i in range(1,21): im4 = im3.crop((0,0,i,i)) im4.load() #print im4, im4.load(), im4.load() #print dir(im4.im) a = imagescan.surface_area(im4.im.pixel_access()) print "%s square surface %s, area %s, linear ratio %s square ration %s" % (i, a, i*i, float(a)/i, float(a)/(i*i) )
def export_icon(icon, size, filename, font, color): image = Image.new("RGBA", (size, size), color=(0,0,0,0)) draw = ImageDraw.Draw(image) # Initialize font font = ImageFont.truetype(font, size) # Determine the dimensions of the icon width,height = draw.textsize(icons[icon], font=font) draw.text(((size - width) / 2, (size - height) / 2), icons[icon], font=font, fill=color) # Get bounding box bbox = image.getbbox() if bbox: image = image.crop(bbox) borderw = int((size - (bbox[2] - bbox[0])) / 2) borderh = int((size - (bbox[3] - bbox[1])) / 2) # Create background image bg = Image.new("RGBA", (size, size), (0,0,0,0)) bg.paste(image, (borderw,borderh)) # Save file bg.save(filename)
def main(): pool = multiprocessing.Pool() # For the parallel map() if sys.argv[1] == "decode": source = Image.open(sys.argv[1]) print ("Decoding the encoded...") secret = decode (sys.argv[1], 3, 2, 3) output = Image.new("L", source.size) output.putdata(secret) output.save(sys.argv[2]) elif sys.argv[1] == "encode": im = Image.open(sys.argv[1]) print ("Chopping Bits...") secret = hidden(sys.argv[1]) print ("Cooking the Pot...") messenger = carrier(sys.argv[2]) print ("Potting the Bits...") final = zip (secret, messenger) # In the first versions the variables used a disproportionate amount of # RAM del (secret) del (messenger) final = list (pool.map (add, final)) final = list (pool.map (tuple, final)) output = Image.new("RGB",im.size) output.putdata(final) output.save(sys.argv[3])
def getHorizontalAngleForText(image): width, height = image.size longest_axis = math.sqrt(width ** 2 + height ** 2) collage = Image.new('RGBA', (longest_axis, longest_axis), 'white') test_image = image.copy().convert('RGBA') y_to_paste = (longest_axis - test_image.size[1]) / 2 x_to_paste = (longest_axis - test_image.size[0]) / 2 original_paste = collage.copy() original_paste.paste(test_image, (x_to_paste, y_to_paste), test_image) previous_text_begin = getTextBeginHeight(original_paste) paste_image = original_paste.copy() angle = 0 while angle > -360: current_text_begin = getTextBeginHeight(paste_image) if previous_text_begin > current_text_begin or current_text_begin == -1: break previous_text_begin = getTextBeginHeight(paste_image) angle -=5 paste_image = original_paste.rotate(angle) collage = Image.new('RGBA', paste_image.size, 'white') collage.paste(paste_image, (0,0), paste_image) paste_image = collage if angle: return angle + 5 return angle
def icons(request,project_name): try: get = request.GET color = get['color'].split('/')[-1] except: return HttpResponseNotFound() cache = "%s/cache/%s_icons.png" % (settings.DATA_DIR,color) if os.path.isfile(cache): response = HttpResponse(mimetype="image/png") cache = open(cache,'r') response.write(cache.read()) cache.close() return response try: im = Image.open("%s/icons_mask.png" % settings.ICONS_DIR) except: return HttpResponseNotFound() bg = Image.new('RGB',im.size,color) tr = Image.new('RGBA',im.size,(0,0,0,0)) result = Image.composite(bg,tr,im) response = HttpResponse(mimetype="image/png") result.save(cache) result.save(response,'PNG') return response
def color_bg_fg(image, bg_color, fg_color): '''change transparent background to bg_color and change everything non-transparent to fg_color''' fg_layer = Image.new('RGBA', image.size, fg_color) bg_layer = Image.new('RGBA', image.size, bg_color) masked_image = Image.composite(fg_layer, bg_layer, image) return masked_image
def on_selection(self, index_range): begin, end = index_range selected_names = [self.ordered_names[i] for i in range(begin, end)] transparent = (0, 0, 0, 0) black = (0, 0, 0, 1) # create a dummy image and get some required dimensions dummy = Image.new('RGBA', (10, 10), transparent) draw = ImageDraw.Draw(dummy) width_height_pairs = [draw.textsize(name) for name in selected_names] max_width = max(width for width, height in width_height_pairs) max_height = max(height for width, height in width_height_pairs) del draw # draw the gene names on a new image image_width = max_width image_height = self.npixels im = Image.new('RGBA', (image_width, image_height), transparent) draw = ImageDraw.Draw(im) n = len(selected_names) blocksize = self.npixels / n for i, name in enumerate(selected_names): w, h = draw.textsize(name) x = 0 dy = max((blocksize - h) / 2, 0) y = (i * self.npixels) / n + dy draw.text((x, y), name, fill=black) del draw # create the tkinter gene name image self.tkim = ImageTk.PhotoImage(im) # remake the canvas with the new image self.canvas.destroy() self.canvas = Tkinter.Canvas(self.parent, width=image_width, height=image_height) self.canvas.create_image(0, 0, image=self.tkim, anchor=Tkinter.NW) self.app.repack()
def texture(request,project_name): try: get = request.GET filename = get['filename'].split('/')[-1] bg_color = get['bg_color'].split('/')[-1] percent = int(get['percent'])/100.0 except: return HttpResponseNotFound() # check cache for previously computed image cache = "%s/cache/%s_%s_%s" % (settings.DATA_DIR,bg_color,percent,filename) if os.path.isfile(cache): response = HttpResponse(mimetype="image/png") cache = open(cache,'r') response.write(cache.read()) cache.close() return response try: path = settings.TEXTURES_DIR+'/'+filename im = Image.open(path) except: return HttpResponseNotFound() transparent = Image.new('RGBA',im.size,(0,0,0,0)) blended = Image.blend(transparent,im,percent) bg = Image.new('RGBA',im.size,bg_color) bg.paste(blended,None,blended) response = HttpResponse(mimetype="image/png") bg.save(cache) bg.save(response,'PNG') return response
def export_frame_all_elements(self,filename): #draw schematic to buffer 1 memorydc = wx.MemoryDC() size = self.SchematicPanel.GetSize() bitmap1 = wx.EmptyBitmap(size[0],size[1]) memorydc.SelectObject(bitmap1) memorydc.SetBackground(wx.Brush('white')) memorydc.Clear() dc = wx.GCDC(memorydc) self.draw_schematic(dc) memorydc.SelectObject(wx.NullBitmap) memorydc.Destroy() #bitmap1.SaveFile(filename,wx.BITMAP_TYPE_PNG) import Image wximg = wx.ImageFromBitmap(bitmap1) img1 = Image.new('RGB', (wximg.GetWidth(), wximg.GetHeight())) img1.fromstring(wximg.GetData()) #draw plots to buffer 2 import StringIO imgdata = StringIO.StringIO() self.plot.figure.savefig(imgdata, dpi=75,format='png') imgdata.seek(0) img2 = Image.open(imgdata) #import VFData to buffer 3 #TODO: do this in a more reliable way! import sys if(sys.platform == 'darwin'): #workaround for macosx import os os.system('screencapture -x __tmp1.png') tmpimg = Image.open('__tmp1.png') os.system('rm __tmp1.png') # pos = self.GetRect() x,y = self.ClientToScreen((0,0)) rect = self.VFData.GetRect() # img3 = tmpimg.crop((rect.x+pos.x,rect.y+pos.y+border.height,rect.x+pos.x+rect.width,rect.y+pos.y+rect.height+border.height)) img3 = tmpimg.crop((rect.x+x,rect.y+y,rect.x+x+rect.width,rect.y+y+rect.height)) else: rect = self.VFData.GetRect() dc = wx.ClientDC(self.VFData) bmp = wx.EmptyBitmap(rect.width, rect.height) memDC = wx.MemoryDC() memDC.SelectObject(bmp) memDC.Blit( 0, 0, rect.width,rect.height, dc,0,0) memDC.SelectObject(wx.NullBitmap) import Image wximg = wx.ImageFromBitmap(bmp) img3 = Image.new('RGB', (wximg.GetWidth(), wximg.GetHeight())) img3.fromstring(wximg.GetData()) width = img1.size[0] if(width < img2.size[0]+img3.size[0]): width = img2.size[0]+img3.size[0] height = img1.size[1]+img2.size[1] final_image = Image.new('RGB', (width, height),'white') final_image.paste(img2,(0,0)) final_image.paste(img3,(img2.size[0],0)) final_image.paste(img1,(0,img2.size[1])) final_image.save(filename)
def __init__(self, im, alpha, start, bgcolours): """im: the image being parsed alpha: a alpha layer of the image start: point where the new blob has been found""" #it doesn't really need a copy yet, (because it doesn't crop) self.im = im mask = Image.new('L', im.size, 0) mpix = mask.load() pix = alpha.load() # blank out the image in alpha, draw it in mask, and return the edge coordinates. self.pos, self.colour, self.volume, self.obviousness_best, self.obviousness_sum, og = \ imagescan.get_element(pix, mpix, im.load(), start[0], start[1], bgcolours) self.obviousness_global = og ** 0.5 #print "obviousness is best:%s sum:%s avg: %s; global: %s, sqrt-> %s" %\ # (self.obviousness_best, self.obviousness_sum, # self.obviousness_sum / self.volume, og, self.obviousness_global) #only save the containing rectangle self.mask = mask.crop(self.pos) #NB. in PIL 1.1.6, load()ing a cropped image doesn't return the pixel_access object self.mask.load() self.mpix = self.mask.im.pixel_access() isize = (self.mask.size[0] + imagescan.INFLUENCE_DIAMETER, self.mask.size[1] + imagescan.INFLUENCE_DIAMETER) self.influence = Image.new('L', isize, 0) self.ipix = self.influence.load() # work out influence based on mask imagescan.influence_map(self.mpix, self.ipix) self.group = set([self])
def halftone(self, im, percentage, cmyk, sample, scale, angles): '''Returns list of half-tone images for cmyk image. sample (pixels), determines the sample box size from the original image. The maximum output dot diameter is given by sample * scale (which is also the number of possible dot sizes). So sample=1 will presevere the original image resolution, but scale must be >1 to allow variation in dot size.''' cmyk = cmyk.split() dots = [] for channel, angle in zip(cmyk, angles): if not percentage and len(dots) == 3: size = channel.size[0]*scale, channel.size[1]*scale half_tone = Image.new('L', size) dots.append(half_tone) continue channel = channel.rotate(angle, expand=1) channel = channel.filter(ImageFilter.MedianFilter(5)) size = channel.size[0]*scale, channel.size[1]*scale half_tone = Image.new('L', size) draw = ImageDraw.Draw(half_tone) for x in xrange(0, channel.size[0], sample): for y in xrange(0, channel.size[1], sample): diameter = (channel.getpixel((x, y)) / 255.0)**0.5 edge = 0.5*(1-diameter) x_pos, y_pos = (x+edge)*scale, (y+edge)*scale box_edge = sample*diameter*scale draw.ellipse((x_pos, y_pos, x_pos + box_edge, y_pos + box_edge), fill=255) half_tone = half_tone.rotate(-angle, expand=1) width_half, height_half = half_tone.size xx=(width_half-im.size[0]*scale) / 2 yy=(height_half-im.size[1]*scale) / 2 half_tone = half_tone.crop((xx, yy, xx + im.size[0]*scale, yy + im.size[1]*scale)) dots.append(half_tone) return dots
def singleHexagram(gua_no, mode=1, save=0): reload(sys) sys.setdefaultencoding('utf-8') yin = Image.new('L', (8, 8), 0) yang = Image.new('L', (8, 8), 255) imgs = [yin, yang] yi_xml_doc = minidom.parse('yi.xml') yi_content = yi_xml_doc.childNodes[0] gua = yi_content.childNodes[gua_no*2-1] yaos = gua.childNodes hexagram_name = gua.attributes['name'].value if mode == 1: merge_img = Image.new('L', (8, 48), 0) for i in range(11, 0, -2): p = int(yaos[i].attributes['p'].value) merge_img.paste(imgs[p], (0, (11-i) * 4)) elif mode == 2: merge_img = Image.new('L', (24, 16), 0) for i in range(1, 6, 2): p = int(yaos[i].attributes['p'].value) merge_img.paste(imgs[p], ((i/2 * 8, 8))) for i in range(7, 12, 2): p = int(yaos[i].attributes['p'].value) merge_img.paste(imgs[p], ((i-6)/2 * 8, 0)) if save: merge_img.save('Visual/{}_{}_mode{}.png'.format(gua_no, hexagram_name, mode), quality=70) else: return merge_img
def load_ttf(self, name, ttfname, size, format): font = ImageFont.truetype(ttfname, size) sizes = [font.getsize(chr(c)) for c in range(32, 128)] fw = max([w for (w, _) in sizes]) fh = max([h for (_, h) in sizes]) # print fw, fh alle = {} for i in range(1, 96): im = Image.new("L", (fw+8, fh)) dr = ImageDraw.Draw(im) dr.text((8,0), chr(32 + i), font=font, fill=255) alle[i] = gd2.prep.extents(im) fw = max([(x1 - x0) for (x0, y0, x1, y1) in alle.values()]) ims = ([None] * 32) + [Image.new("L", (fw, fh)) for i in range(32, 128)] for i in range(33, 127): dr = ImageDraw.Draw(ims[i]) (x0, y0, x1, y1) = alle[i - 32] x = max(0, 8 - x0) if x > 0: sizes[i - 32] = (sizes[i - 32][0] - x, sizes[i - 32][1]) dr.text((x, 0), chr(i), font=font, fill=255) # imgtools.view(im) widths = ([0] * 32) + [w for (w, _) in sizes] self.load_font(name, ims, widths, format)
def difference(ima, ima2, UMBRAL=50): ''' You create a new image and the values of its pixels is the difference of tha images selected. ''' w = ima.size[0] # take a image of the list and get its dimensions h = ima.size[1] pix = ima.load() # load pixels pix2 = ima2.load() # load pixels # create a new image that will be the difference of both pixeles. # To see the moviment. newImage = Image.new('RGB', (w,h)) newPix = newImage.load() soloIma = Image.new('RGB', (w,h)) soloPix = soloIma.load() for y in range(h): for x in range(w): if(pix2[x,y][0] - pix[x,y][0]) > UMBRAL: newPix[x,y] = RED # there was movement soloPix[x,y] = RED else: newPix[x,y] = pix2[x,y] # stays the same soloPix[x,y] = WHITE return(newImage, soloIma)
def rectangle(self, tl, br, fill, gradient="tb"): # create a vertical gradient... width = br[0] - tl[0] height = br[1] - tl[1] gradient = Image.new('L', (1,255)) for y in range(255): gradient.putpixel((0,254-y),y) if gradient == "bt": gradient.rotate(180) elif gradient == "lr": gradient.rotate(90) elif gradient == "rl": gradient.rotate(270) # first create a rectangle image with our skin color rect = Image.new("RGB", (width, height), fill) # resize the gradient to the size of im... alpha = gradient.resize(rect.size) # put alpha in the alpha band of im... rect.putalpha(alpha) self.open() # check if im has Alpha band... if self.img.mode != 'RGBA': self.img = self.img.convert('RGBA') self.img.paste(rect, (tl[0],tl[1],br[0],br[1]), rect) self.img = self.img.convert('RGB') self.save() self.close()
def blend(im1, im2, amount, color=None): """Blend two images with each other. If the images differ in size the color will be used for undefined pixels. :param im1: first image :type im1: pil.Image :param im2: second image :type im2: pil.Image :param amount: amount of blending :type amount: int :param color: color of undefined pixels :type color: tuple :returns: blended image :rtype: pil.Image """ im2 = convert_safe_mode(im2) if im1.size == im2.size: im1 = convert(im1, im2.mode) else: if color is None: expanded = Image.new(im2.mode, im2.size) elif im2.mode in ('1', 'L') and type(color) != int: expanded = Image.new(im2.mode, im2.size, color[0]) else: expanded = Image.new(im2.mode, im2.size, color) im1 = im1.convert(expanded.mode) we, he = expanded.size wi, hi = im1.size paste(expanded, im1, ((we - wi) / 2, (he - hi) / 2), im1.convert('RGBA')) im1 = expanded return Image.blend(im1, im2, amount)
def brightness(image, amount=50): """Adjust brightness from black to white - amount: -1(black) 0 (unchanged) 1(white) - repeat: how many times it should be repeated""" if amount == 0: return image image = imtools.convert_safe_mode(image) if amount < 0: #fade to black im = imtools.blend( image, Image.new(image.mode, image.size, 0), -amount / 100.0) else: #fade to white im = imtools.blend( image, Image.new(image.mode, image.size, ImageColor.getcolor('white', image.mode)), amount / 100.0) #fix image transparency mask if imtools.has_alpha(image): im.putalpha(imtools.get_alpha(image)) return im
def __init__(self,fname=None,data=[],imobj=None,height=0,width=0): """ An image can be created using any of the following keyword parameters. When image creation is complete the image will be an rgb image. fname: A filename containing an image. Can be jpg, gif, and others data: a list of lists representing the image. This might be something you construct by reading an asii format ppm file, or an ascii art file and translate into rgb yourself. imobj: Make a copy of another image. height: width: Create a blank image of a particular height and width. """ if fname: self.im = Image.open(fname) self.imFileName = fname ni = self.im.convert("RGB") self.im = ni elif data: height = len(data) width = len(data[0]) self.im = Image.new("RGB",(width,height)) for row in range(height): for col in range(width): self.im.putpixel((col,row),data[row][col]) elif height > 0 and width > 0: self.im = Image.new("RGB",(width,height)) elif imobj: self.im = imobj.copy() self.width,self.height = self.im.size self.centerX = self.width/2+3 # +3 accounts for the ~3 pixel border in Tk windows self.centerY = self.height/2+3 self.id = None self.pixels = self.im.load()
def captcha_image(request, key): store = get_object_or_404(CaptchaStore, hashkey=key) text = store.challenge if settings.CAPTCHA_FONT_PATH.lower().strip().endswith('ttf'): font = ImageFont.truetype(settings.CAPTCHA_FONT_PATH, settings.CAPTCHA_FONT_SIZE) else: font = ImageFont.load(settings.CAPTCHA_FONT_PATH) size = font.getsize(text) size = (size[0] * 2, int(size[1] * 1.2)) image = Image.new('RGB', size, settings.CAPTCHA_BACKGROUND_COLOR) try: PIL_VERSION = int(NON_DIGITS_RX.sub('', Image.VERSION)) except: PIL_VERSION = 116 xpos = 2 charlist = [] for char in text: if char in settings.CAPTCHA_PUNCTUATION and len(charlist) >= 1: charlist[-1] += char else: charlist.append(char) for char in charlist: fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR) charimage = Image.new('L', font.getsize(' %s ' % char), '#000000') chardraw = ImageDraw.Draw(charimage) chardraw.text((0, 0), ' %s ' % char, font=font, fill='#ffffff') if settings.CAPTCHA_LETTER_ROTATION: if PIL_VERSION >= 116: charimage = charimage.rotate(random.randrange(*settings.CAPTCHA_LETTER_ROTATION), expand=0, resample=Image.BICUBIC) else: charimage = charimage.rotate(random.randrange(*settings.CAPTCHA_LETTER_ROTATION), resample=Image.BICUBIC) charimage = charimage.crop(charimage.getbbox()) maskimage = Image.new('L', size) maskimage.paste(charimage, (xpos, 4, xpos + charimage.size[0], 4 + charimage.size[1])) size = maskimage.size image = Image.composite(fgimage, image, maskimage) xpos = xpos + 2 + charimage.size[0] image = image.crop((0, 0, xpos + 1, size[1])) draw = ImageDraw.Draw(image) for f in settings.noise_functions(): draw = f(draw, image) for f in settings.filter_functions(): image = f(image) out = StringIO() image.save(out, "PNG") out.seek(0) response = HttpResponse() response['Content-Type'] = 'image/png' response.write(out.read()) return response
def setUp(self): "Set up test environment" # load up all the providers and register the test-only provider oembed.autodiscover() # refresh the attribute-cached time the db providers were last updated oembed.site._db_updated = None self.media_root, self.media_url = settings.MEDIA_ROOT, settings.MEDIA_URL settings.MEDIA_ROOT = MEDIA_ROOT settings.MEDIA_URL = MEDIA_URL self.template_dirs = settings.TEMPLATE_DIRS cur_dir = os.path.dirname(__file__) settings.TEMPLATE_DIRS = [os.path.join(os.path.dirname(cur_dir), 'templates')] babel_image_path = os.path.join(MEDIA_ROOT, 'images/breugel_babel2.jpg') kandinsky_image_path = os.path.join(MEDIA_ROOT, 'images/kandinsky.comp-8.jpg') if not all([os.path.exists(babel_image_path), os.path.exists(kandinsky_image_path)]): self.base_path = babel_image_path.rsplit('/', 1)[0] if not os.path.isdir(self.base_path): os.makedirs(self.base_path) babel_image_file = open(babel_image_path, 'w') babel_image = Image.new('CMYK', (800, 661), (255, 255, 255, 255)) babel_image.save(babel_image_file, 'JPEG') kandinsky_image_file = open(kandinsky_image_path, 'w') kandinsky_image = Image.new('CMYK', (10, 10), (255, 255, 255, 255)) kandinsky_image.save(kandinsky_image_file, 'JPEG') map(lambda x: (os.fsync(x), x.close()), [kandinsky_image_file, babel_image_file])
def generate_image(text, font_file, size, colour, image_destination): """ Text transform based on: http://nedbatchelder.com/blog/200801/truly_transparent_text_with_pil.html http://phark.typepad.com/phark/2003/08/accessible_imag.html """ pos = (0,0) image = Image.new("RGB", (1, 1), (0,0,0)) font = ImageFont.truetype(font_file, size) image = image.resize(font.getsize(text)) alpha = Image.new("L", image.size, "black") # Make a grayscale image of the font, white on black. imtext = Image.new("L", image.size, 0) drtext = ImageDraw.Draw(imtext) drtext.text(pos, text, font=font, fill="white") # Add the white text to our collected alpha channel. Gray pixels around # the edge of the text will eventually become partially transparent # pixels in the alpha channel. alpha = ImageChops.lighter(alpha, imtext) # Make a solid colour, and add it to the colour layer on every pixel # that has even a little bit of alpha showing. solidcolour = Image.new("RGBA", image.size, colour) immask = Image.eval(imtext, lambda p: 255 * (int(p != 0))) image = Image.composite(solidcolour, image, immask) # These two save()s are just to get demo images of the process. #image.save("transcolour.png", "PNG") #alpha.save("transalpha.png", "PNG") # Add the alpha channel to the image, and save it out. image.putalpha(alpha) image.save(image_destination, 'PNG') return image
def generate_photo(self, path): if int(self.bracelet.type) == 1: im = Image.new(mode="RGB", size=(self.nofrows * 100 + 16, len(self.strings) / 2 * 160 + 16 + 36), color="#fff") draw = ImageDraw.Draw(im) for i in range(self.nofrows): if i % 2 == 0 and self.odd or i % 2 == 1 and not self.odd: marginTop = 80 else: marginTop = 0 for j in range(len(self.knots_colors[i])): color = str(self.strings[self.knots_colors[i][len(self.knots_colors[i]) - 1 - j]].color) x = 8 + i * 100 y = 8 + j * 160 + marginTop draw.ellipse((x - 8, y - 8, x + 116, y + 116), fill=0x666666) draw.ellipse((x - 4, y - 4, x + 108, y + 108), fill=color) im = im.resize((self.nofrows * 10 + 1, len(self.strings) / 2 * 16 + 4), Image.ANTIALIAS) if int(self.bracelet.type) == 2: im = Image.new(mode="RGB", size=(self.nofrows * 100 + 16, len(self.strings) * 100 + 16), color="#fff") draw = ImageDraw.Draw(im) for i in range(self.nofrows): for j in range(len(self.knots_colors[i])): color = str(self.strings[self.knots_colors[i][len(self.knots_colors[i]) - 1 - j]].color) x = 8 + i * 100 y = 8 + j * 100 draw.ellipse(( x - 8, y - 8, x + 108, y + 108), fill=0x666666) draw.ellipse((x - 4, y - 4, x + 104, y + 104), fill=color) im = im.resize((self.nofrows * 10 + 1, len( self.strings) * 10 + 1), Image.ANTIALIAS) im.save(path)
def etch(message, img_format=None, font="/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf", font_size=30, glasswidth = 500, charsperline=20, boxheight=20, boxwidth=20, spacewidth=13, randomoffset=True, randxoffset=4, randyoffset=5, xmargin=30, ymargin=30): """ Return Python Imaging Library Image with text 'etched' on it Defaults to bmp image """ # TODO decouple the font path, figure out how to pass it in as an object or # otherwise specify it # Create a provisional glass height. We will adjust the glass to fit the message later. glassheight = (len(message) // (glasswidth // boxwidth) + 5) * boxheight pane = Image.new('RGB', (glasswidth + 2*xmargin, glassheight + 2*ymargin), color=(255, 255, 255)) draw = ImageDraw.Draw(pane) fnt = ImageFont.truetype(font, font_size) # Configure the positioning of the letters in the message. left = xmargin top = ymargin box_list = [(left, top)] for i in range(len(message) - 1): if message[i + 1] == " ": left += spacewidth else: left += boxwidth if left + boxwidth > glasswidth: top += boxheight left = xmargin box_list.append((left, top)) letter = iter(message) for ulcorner in box_list: if randomoffset: # Possibly offset letters inside their boxes xoff = randint(0, randxoffset) yoff = randint(0, randyoffset) else: xoff = 0 yoff = 0 draw.text((ulcorner[0]+xoff, ulcorner[1]+yoff), letter.next(), font=fnt, fill=0) if top != ymargin: # Fill up the remainder of the space on the bottom. pane = fill_remaining(pane, message, boxwidth, boxheight, randxoffset, randyoffset, font_size, xmargin, ymargin, widthremaining=glasswidth-(box_list[-1][0]+boxwidth), x=box_list[-1][0]+boxwidth, y=box_list[-1][1]) glass = crop_to_text(pane) glasswidth = glass.getbbox()[2] glassheight = glass.getbbox()[3] pane = Image.new('RGB', (glasswidth + 2*xmargin, glassheight + 2*ymargin), color=(255, 255, 255)) pane.paste(glass, (xmargin, ymargin)) return pane
def create_photo_strips(): '''using the original images we build a color and black and white photo strip and save it to photos/strips''' strip = Image.new('RGB', (PHOTO_HEIGHT + (BORDER_WIDTH * 2) + FOOTER_HEIGHT, (PHOTO_WIDTH * PHOTO_COUNT) + (BORDER_WIDTH * 2)), BG_COLOR) for i in range(PHOTO_COUNT): photo = Image.open(PHOTO_FOLDER + str(i+1) + '.' + PHOTO_FILE_EXTENSION) w, h = map(lambda x: x/2, photo.size) photo = ImageOps.fit(photo, (PHOTO_WIDTH, PHOTO_HEIGHT), centering=(0.5, 0.5)) photo = photo.rotate(270) photo = ImageOps.autocontrast(photo, cutoff=0) strip.paste(photo, (FOOTER_HEIGHT, (i * PHOTO_WIDTH) + (i * BORDER_WIDTH))) #append footer font = ImageFont.truetype('font_1.ttf', 40) footer_img = Image.new("RGB", ((PHOTO_COUNT * PHOTO_WIDTH) + (PHOTO_COUNT * BORDER_WIDTH), FOOTER_HEIGHT), BG_COLOR) draw = ImageDraw.Draw(footer_img) draw.text((220, 40), "ashley & david's wedding, july 28, 2012", font=font, fill=(100,100,0)) strip.paste(footer_img.rotate(270), (0,0)) strip.save(COLOR_FOLDER + current_timestamp() + '.png', PHOTO_FORMAT) ImageOps.grayscale(strip).save(GREYSCALE_FOLDER + current_timestamp() + '.png', PHOTO_FORMAT) strip_to_print = Image.new('RGB', (PAGE_WIDTH, PAGE_HEIGHT), BG_COLOR) strip_to_print.paste(ImageOps.grayscale(strip), (-BORDER_WIDTH, -BORDER_WIDTH)) strip_to_print.save('to_print.png', PHOTO_FORMAT) return 'to_print.png'
def waterMark(img_src, user_name, page_url): logo = Image.open(LOGO_SRC) #原始图片 im = Image.open(img_src) #原始图片 (im_width, im_height) = im.size if im.format != 'GIF' and im_width > 320: (logo_width, logo_height) = logo.size if im.mode != 'RGBA': im = im.convert('RGBA') if logo.mode != 'RGBA': logo = logo.convert('RGBA') font = ImageFont.truetype(FONT, FONT_SIZE) #设置字体及文字尺寸 en_font = ImageFont.truetype(FONT, FONT_SIZE) #设置字体及文字尺寸 #链接水印 (url_width, url_height) = en_font.getsize(page_url) #获得文字水印的尺寸 url_mark = Image.new("RGBA", (url_width, url_height)) #创建水印透明背景图 draw = ImageDraw.ImageDraw(url_mark, "RGBA") #绘制水印透明背景图 draw.text((0,0), page_url, font=en_font, fill=FONT_COLOR) mark_pos = (im_width-url_width-MARGIN-logo_width, im_height-url_height-PADDING) im.paste(url_mark, mark_pos, url_mark) #用户名水印 (name_width, name_height) = font.getsize(user_name) #获得文字水印的尺寸 name_mark = Image.new("RGBA", (name_width, name_height)) #创建水印透明背景图 draw = ImageDraw.ImageDraw(name_mark, "RGBA") #绘制水印透明背景图 draw.text((0,0), user_name, font=font, fill=FONT_COLOR) name_pos = (im_width-name_width-MARGIN-logo_width, im_height-url_height-name_height-PADDING) im.paste(name_mark, name_pos, name_mark) #logo水印 logo_pos = (im_width-logo_width-PADDING+2, im_height-logo_height-PADDING) im.paste(logo, logo_pos, logo) im.save(img_src)
def execute(self): import Image import ImageOps i = Image.open(self.target) i = i.convert("RGB") if getattr(self, 'brightness', None): i = i.point(lambda p: p * self.brightness) r, g, b = i.split() a = { 'r': r, 'g': g, 'b': b, 'x': Image.new("L", r.size, color=0), }.get(self.level) if not a: a = Image.new("L", r.size, color=self.level) if self.level == 'x': pxi = i.load() pxa = a.load() for y in xrange(a.size[1]): for x in xrange(a.size[0]): p = pxi[x, y] pxa[x, y] = int((p[0] + p[1] + p[2]) / 3.0) if getattr(self, 'invert_alpha', None): a = ImageOps.invert(a) i2 = Image.merge("RGBA", (r, g, b, a)) i2.save(self.target) """
def printHDMWalletSeed(headerText, seed, xpub): qrSize = (170,170) qrPad = 10 finalImg = Image.new("RGB", (384, qrSize[1]), "white") finalImg.paste(getQR(seed, qrSize), (qrPad, 0)) finalImg.paste(getQR(xpub, qrSize), (qrSize[0]+qrPad*2+14, 0)) printer = Adafruit_Thermal("/dev/ttyAMA0", 19200, timeout=5) dividerLine = Image.new("RGB", (384, 6), "black") dividerLine.paste(Image.new("RGB", (384, 4), "white"), (0, 0)) printer.printImage(dividerLine, True) printer.println(headerText) printer.println("Seed Mnemonic: "+seed+'\n') printer.println("xpub: "+xpub+'\n') printer.printImage(finalImg, True) printer.feed(1) printer.printImage(dividerLine, True) printer.feed(3) printer.setDefault() # Restore printer to defaults
import Image img = Image.open("assets/cave.jpg") width = img.size[0] height = img.size[1] img1 = Image.new(img.mode, (width, height)) img2 = Image.new(img.mode, (width, height)) for x in range(width): for y in range(height): if x % 2 == y % 2: img1.putpixel((x, y), img.getpixel((x, y))) else: img2.putpixel((x, y), img.getpixel((x, y))) img1.save('img1.jpg') img2.save('img2.jpg')
#!/usr/bin/python import sys import Image buf = sys.stdin.read() # rearrange buf into parallel columns cols = 1 bitw = 1024 imgw = cols * bitw imgh = len(buf)/(cols*bitw/8) print imgw, imgh, len(buf) img = Image.new("1",(imgw,imgh)) for c in range(cols): colimg = Image.fromstring("1",(bitw,imgh),buf[c*imgh*(bitw/8):(c+1)*imgh*(bitw/8)]) img.paste(colimg,(bitw*c,0)) img.save("eprom-font.png") # Create BDF file f = open("eprom-font.bdf","w") f.write("""STARTFONT 2.1 FONT -osbourne-charrom-medium-r-normal--16-160-75-75-c-80-iso10646-1 SIZE 16 75 75 FONTBOUNDINGBOX 8 10 0 -2 STARTPROPERTIES 2
def main(shapefile_path, raster_path): # Load the source data as a gdalnumeric array srcArray = gdalnumeric.LoadFile(raster_path) # Also load as a gdal image to get geotransform # (world file) info srcImage = gdal.Open(raster_path) geoTrans = srcImage.GetGeoTransform() # Create an OGR layer from a boundary shapefile shapef = ogr.Open(shapefile_path) lyr = shapef.GetLayer( os.path.split(os.path.splitext(shapefile_path)[0])[1]) poly = lyr.GetNextFeature() # Convert the layer extent to image pixel coordinates minX, maxX, minY, maxY = lyr.GetExtent() ulX, ulY = world2Pixel(geoTrans, minX, maxY) lrX, lrY = world2Pixel(geoTrans, maxX, minY) # Calculate the pixel size of the new image pxWidth = int(lrX - ulX) pxHeight = int(lrY - ulY) clip = srcArray[ulY:lrY, ulX:lrX] # # EDIT: create pixel offset to pass to new image Projection info # xoffset = ulX yoffset = ulY print "Xoffset, Yoffset = ( %f, %f )" % (xoffset, yoffset) # Create a new geomatrix for the image geoTrans = list(geoTrans) geoTrans[0] = minX geoTrans[3] = maxY # Map points to pixels for drawing the # boundary on a blank 8-bit, # black and white, mask image. points = [] pixels = [] geom = poly.GetGeometryRef() pts = geom.GetGeometryRef(0) for p in range(pts.GetPointCount()): points.append((pts.GetX(p), pts.GetY(p))) for p in points: pixels.append(world2Pixel(geoTrans, p[0], p[1])) rasterPoly = Image.new("L", (pxWidth, pxHeight), 1) rasterize = ImageDraw.Draw(rasterPoly) rasterize.polygon(pixels, 0) mask = imageToArray(rasterPoly) # Clip the image using the mask clip = gdalnumeric.choose(mask, \ (clip, 0)).astype('float64') # This image has 3 bands so we stretch each one to make them # visually brighter #for i in range(1): #clip[:,:] = stretch(clip[:,:]) # Save new tiff # # EDIT: instead of SaveArray, let's break all the # SaveArray steps out more explicity so # we can overwrite the offset of the destination # raster # ### the old way using SaveArray # # gdalnumeric.SaveArray(clip, "OUTPUT.tif", format="GTiff", prototype=raster_path) # ### # gtiffDriver = gdal.GetDriverByName('GTiff') if gtiffDriver is None: raise ValueError("Can't find GeoTiff Driver") gtiffDriver.CreateCopy( "OUTPUT.tif", OpenArray(clip, prototype_ds=raster_path, xoff=xoffset, yoff=yoffset)) # Save as an 8-bit jpeg for an easy, quick preview clip = clip.astype('float64')
#! /usr/bin/env python2 # walk_around.py -- created by Ing. Josef KlotzneIr import Image def get_image(s): return Image.open(s, "r") strip = get_image('wire.png') spiral = Image.new(strip.mode, (100, 100), 0) dirs = [(1, 0), (0, 1), (-1, 0), (0, -1)] x, y, z = 0, 0, 0 for i in range(100): for j in range(100): spiral.putpixel((x, y), strip.getpixel((z, 0))) x += 1 z += 1 y += 1 x = 0 """ for i in range(200): d = dirs[i % 4] for j in range(100 - (i + 1) / 2): x += d[0] y += d[1] spiral.putpixel((x,y), strip.getpixel((z,0))) z += 1 """ spiral.show() #spiral.save('spiral.png')
"file": "images/num_%d_%s.png" }""" meta_data_entries = [] font = ImageFont.truetype(FONT_FILE_PATH, FONT_SIZE) if __name__ == "__main__": # Generate the image tile file for each digit. for clr in ['d', 'b']: for digit in range(0, 12): if clr == 'd': fill = (0, 0, 0, 255) else: fill = (0, 0, 170, 255) # Draw the digit on a large canvas so PIL doesn't crop it. scratch_canvas_image = Image.new( "RGB", size=LARGE_SCRATCH_CANVAS_DIMENSIONS) scratch_canvas_image2 = Image.new( "RGB", size=LARGE_SCRATCH_CANVAS_DIMENSIONS, color=fill) draw = ImageDraw.Draw(scratch_canvas_image) draw2 = ImageDraw.Draw(scratch_canvas_image2) if digit == 10: x = 'A' elif digit == 11: x = 'B' else: x = str(digit) draw.text((0, 0), x, font=font) draw2.text((0, 0), x, font=font) # Discard all the padding cropped_digit_image = scratch_canvas_image2.crop( scratch_canvas_image.getbbox())
print sch if not (sch): continue if not (sch.group(1) in index): index.append(sch.group(1)) index2[sch.group(1)] = [] index2[sch.group(1)].append(sch.groups()) print index2 for img in index2: lst = index2[img] width = 0 height = 0 for subimg in lst: if subimg[1] == '0': height += int(subimg[4]) if subimg[2] == '0': width += int(subimg[3]) image = Image.new('RGBA', (width, height), (255, 255, 255, 255)) for subimg in lst: simg = Image.open( '%s#%s1@%s_%s_%sx%s.PNG' % (subimg[0], subimg[0], subimg[1], subimg[2], subimg[3], subimg[4])) for x in range(0, int(subimg[3])): for y in range(0, int(subimg[4])): try: color = simg.getpixel((x, y)) if color[3] == 0: color = (255, 255, 255, 255) else: color = (color[2], color[1], color[0], 255) if subimg[1] == '0' and subimg[2] == '0': image.putpixel((int(subimg[1]) - simg.size[0] + (int(subimg[3])) + x, int(subimg[2]) -
for char in string.ascii_uppercase: #Iterate through A..Z dData.write( char ) tTxt = str(bin(targ))[2:] for i in range(26): if i == 26-targ: dData.write(" 1") else: dData.write(" 0" ) dData.write("\n") for count in range(1, 21): #For each letter go through 1 to 20 iName = char + str(count) + ".jpg" #Filename A1.jpg ... Z20.jpg print iName im = Image.open("pics/" + iName) #Open the org 30x30 grayscale image imOrg = im.load() #LOad image to manipulate pixels imNew = Image.new("L", (30,30)) #Create new grayscale 10x10 image imNewP = imNew.load() #Load image to manipulate pixels imgArr= [] #Array to hold the 100 avg pixel values for y in range(0,10): #Go through all rows in 10x10 image for x in range(0, 10): #Go through all columns in 10x10 image arr = None #Reset value array arr = [ #Create array from pixel values of the 3x3 subset imOrg[x*3 , y*3], imOrg[x*3+1 , y*3], imOrg[x*3+2 , y*3], imOrg[x*3 , y*3+1], imOrg[x*3+1 , y*3+1], imOrg[x*3+2 , y*3+1], imOrg[x*3 , y*3+2], imOrg[x*3+1 , y*3+2], imOrg[x*3+2 , y*3+2] ] imNewP[x*3, y*3] = sum(arr)/len(arr) #Insert avg pixel val to image imNewP[x*3+1, y*3] = sum(arr)/len(arr) #Insert avg pixel val to image imNewP[x*3+2, y*3] = sum(arr)/len(arr) #Insert avg pixel val to image
def handle_cmd(self, cmd): cmd_arr = cmd.strip().split(' ') cmd = cmd_arr[0] if cmd == '': return else: self.response_print('') if cmd == 'play': if len(cmd_arr) == 1: if self.state == 'prerecord': self.state = 'record' else: self.state = 'play' elif len(cmd_arr) == 2: try: self.file = [] for row in csv.reader( open(config.save_path + cmd_arr[1], 'r')): self.file.append(row) self.t = float(self.file[0][0]) self.playback_index = 0 self.robot.to_pos(self.file[0][1:]) self.response_print('openened ' + cmd_arr[1] + ' for playback') self.state = 'playback' except: self.response_print('there was an error opening ' + cmd_arr[1] + ' for playback') self.robot.trace = [] elif cmd == 'stop': if self.state == 'record': self.response_print('stopping the recording') try: self.file_pointer.close() except: pass self.state = 'halted' elif cmd == 'server': c2 = cmd_arr[1] if c2 == 'start': self.state = 'server' port = cmd_arr[2] self.response_print('starting server on port ' + port) self.port = int(port) elif c2 == 'stop': self.port = None self.state = 'halted' self.response_print('stopped server') else: self.response_print('unknown server command') elif cmd == 'status': if self.state == '': self.response_print( 'the simulator is currently doing nothing') else: self.response_print( 'the simulator is currently in this state: ' + self.state) elif cmd == 'record': file = cmd_arr[1] + '.csv' try: self.file_pointer = open(config.save_path + file, 'a') self.state = 'prerecord' self.response_print('file ' + file + ' opened for recording') except: self.response_print( 'problem opening that file for recording') self.response_print('') elif cmd == 'load': try: robot_file = '../robots/' + cmd_arr[1] + '.json' self.robot = create_robot(robot_file) self.robot.timestep() except: self.response_print('Error with this robot file: ' + traceback.format_exc()) print traceback.format_exc() elif cmd == 'list': for r in os.listdir('../robots'): if r.split('.')[-1] == 'json': self.response_print(" " + r.split('.')[0]) elif cmd == 'axis': if len(cmd_arr) == 1: config.enable_axis = not config.enable_axis elif cmd_arr[1] == 'on': config.enable_axis = True elif cmd_arr[1] == 'off': config.enable_axis = False self.draw() elif cmd == 'trace': if len(cmd_arr) == 1: config.enable_trace = not config.enable_trace elif cmd_arr[1] == 'on': config.enable_trace = True elif cmd_arr[1] == 'off': config.enable_trace = False elif cmd_arr[1] == 'clear': self.robot.trace = [] elif cmd_arr[1] == 'limit': config.max_trace = int(cmd_arr[2]) elif cmd == 'ghost': if len(cmd_arr) == 1: config.enable_ghost = not config.enable_ghost elif cmd_arr[1] == 'on': config.enable_ghost = True elif cmd_arr[1] == 'off': config.enable_ghost = False elif cmd_arr[1] == 'interval': config.ghost_interval = int(cmd_arr[2]) elif cmd == 'axis': if len(cmd_arr) == 1: config.enable_axis = not config.enable_axis elif cmd_arr[1] == 'on': config.enable_axis = True elif cmd_arr[1] == 'off': config.enable_axis = False elif cmd == 'floor': if len(cmd_arr) == 1: config.floor_on = not config.floor_on elif cmd_arr[1] == 'on': config.floor_on = True elif cmd_arr[1] == 'off': config.floor_on = False elif cmd == 'eval': try: self.response_print(str(eval('self.' + cmd_arr[1]))) except: self.response_print("error evaluating command") elif cmd == 'set': var = cmd_arr[1] val = ' '.join(cmd_arr[2:]) try: if var in self.robot.syms: exec 'self.robot.syms[\'' + var + '\'] = \'' + val + '\'' self.response_print( str(eval('self.robot.syms[\'' + var + '\']'))) self.robot.trace = [] else: exec 'self.' + var + ' = ' + val self.response_print(str(eval('self.' + var))) self.robot.trace = [] except: self.response_print('error setting expression') elif cmd == 'quit' or cmd == 'exit': sys.exit(0) elif cmd == 'hide': self.response_print('hiding this command window') self.response_print('press \'t\' to get it back') self.hide_cli = True self.aux_msg = 'terminal is hidden. press \'t\' to get it back.' self.aux_msg_enabled = True elif cmd == 'skew': self.response_print('entering skew mode:') self.response_print(' arrow keys to to translate') self.response_print(' \'j\' and \'k\' to zoom in and out') self.response_print( ' \'f\' and \'d\' speed and slow simulation') self.response_print(' \'t\' to quit skew mode') self.skew_mode = True elif cmd == 'help': if len(cmd_arr) == 1: self.response_print("available commands:") for k, _ in help.d.iteritems(): self.response_print(" " + k) self.response_print("") self.response_print( "type \'help <command>\' to get help on an individual command" ) else: try: helpcmd = help.d[cmd_arr[1]] ref = helpcmd['reference'] desc = helpcmd['description'] self.response_print("syntax:") self.response_print(" " + ref) self.response_print("") self.response_print("description:") self.response_print(" " + desc) except: self.response_print("couldn't find command") elif cmd == 'screendump': cs1, cs2 = self.aux_msg_enabled, self.hide_cli self.aux_msg_enabled, self.hide_cli = False, True self.draw() glutPostRedisplay() try: s = glReadPixels(0, 0, self.width, self.height, GL_RGB, GL_UNSIGNED_BYTE) img = Image.new('RGB', (self.width, self.height)) img.fromstring(s) img2 = img.transpose(Image.FLIP_TOP_BOTTOM) strtime = str(time.time()).split('.')[0] filename = config.save_path + "screendump" + strtime + ".png" self.response_print('check out ' + filename + ' in the working directory') img2.save(filename) self.aux_msg_enabled, self.hide_cli = cs1, cs2 except: self.aux_msg_enabled, self.hide_cli = cs1, cs2 self.response_print( 'error taking screenshot, do you have the python imaging library installed?' ) else: self.response_print('are you sure that\'s a command?') glutPostRedisplay() self.response_print('')
midTimeColor = (255, 255, 0) # Medium arrival time = yellow shortTimeColor = (255, 0, 0) # Short arrival time = red minsColor = (110, 110, 110) # Commans and 'minutes' labels noTimesColor = (0, 0, 255) # No predictions = blue # TrueType fonts are a bit too much for the Pi to handle -- slow updates and # it's hard to get them looking good at small sizes. A small bitmap version # of Helvetica Regular taken from X11R6 standard distribution works well: font = ImageFont.load( os.path.dirname(os.path.realpath(__file__)) + '/helvR08.pil') fontYoffset = -2 # Scoot up a couple lines so descenders aren't cropped # Main application ----------------------------------------------------------- # Drawing takes place in offscreen buffer to prevent flicker image = Image.new('RGB', (width, height)) draw = ImageDraw.Draw(image) currentTime = 0.0 prevTime = 0.0 # Clear matrix on exit. Otherwise it's annoying if you need to break and # fiddle with some code while LEDs are blinding you. def clearOnExit(): matrix.Clear() atexit.register(clearOnExit) # Populate a list of predict objects (from predict.py) from stops[]. # While at it, also determine the widest tile width -- the labels
def canny(im, sigma, thresHigh=50, thresLow=10): ''' Takes an input image in the range [0, 1] and generate a gradient image with edges marked by 1 pixels. ''' imin = im.copy() * 255.0 # Create the gauss kernel for blurring the input image # It will be convolved with the image # wsize should be an odd number wsize = 5 gausskernel = gaussFilter(sigma, window=wsize) # fx is the filter for vertical gradient # fy is the filter for horizontal gradient # Please not the vertical direction is positive X fx = createFilter([0, 1, 0, 0, 0, 0, 0, -1, 0]) fy = createFilter([0, 0, 0, -1, 0, 1, 0, 0, 0]) imout = conv(imin, gausskernel, 'valid') # print "imout:", imout.shape gradxx = conv(imout, fx, 'valid') gradyy = conv(imout, fy, 'valid') gradx = np.zeros(im.shape) grady = np.zeros(im.shape) padx = (imin.shape[0] - gradxx.shape[0]) / 2.0 pady = (imin.shape[1] - gradxx.shape[1]) / 2.0 gradx[padx:-padx, pady:-pady] = gradxx grady[padx:-padx, pady:-pady] = gradyy # Net gradient is the square root of sum of square of the horizontal # and vertical gradients grad = hypot(gradx, grady) theta = arctan2(grady, gradx) theta = 180 + (180 / pi) * theta # Only significant magnitudes are considered. All others are removed xx, yy = where(grad < 10) theta[xx, yy] = 0 grad[xx, yy] = 0 # The angles are quantized. This is the first step in non-maximum # supression. Since, any pixel will have only 4 approach directions. x0, y0 = where(((theta < 22.5) + (theta > 157.5) * (theta < 202.5) + (theta > 337.5)) == True) x45, y45 = where(((theta > 22.5) * (theta < 67.5) + (theta > 202.5) * (theta < 247.5)) == True) x90, y90 = where(((theta > 67.5) * (theta < 112.5) + (theta > 247.5) * (theta < 292.5)) == True) x135, y135 = where(((theta > 112.5) * (theta < 157.5) + (theta > 292.5) * (theta < 337.5)) == True) theta = theta Image.fromarray(theta).convert('L').save('Angle map.jpg') theta[x0, y0] = 0 theta[x45, y45] = 45 theta[x90, y90] = 90 theta[x135, y135] = 135 x, y = theta.shape temp = Image.new('RGB', (y, x), (255, 255, 255)) for i in range(x): for j in range(y): if theta[i, j] == 0: temp.putpixel((j, i), (0, 0, 255)) elif theta[i, j] == 45: temp.putpixel((j, i), (255, 0, 0)) elif theta[i, j] == 90: temp.putpixel((j, i), (255, 255, 0)) elif theta[i, j] == 45: temp.putpixel((j, i), (0, 255, 0)) retgrad = grad.copy() x, y = retgrad.shape for i in range(x): for j in range(y): if theta[i, j] == 0: test = nms_check(grad, i, j, 1, 0, -1, 0) if not test: retgrad[i, j] = 0 elif theta[i, j] == 45: test = nms_check(grad, i, j, 1, -1, -1, 1) if not test: retgrad[i, j] = 0 elif theta[i, j] == 90: test = nms_check(grad, i, j, 0, 1, 0, -1) if not test: retgrad[i, j] = 0 elif theta[i, j] == 135: test = nms_check(grad, i, j, 1, 1, -1, -1) if not test: retgrad[i, j] = 0 init_point = stop(retgrad, thresHigh) # Hysteresis tracking. Since we know that significant edges are # continuous contours, we will exploit the same. # thresHigh is used to track the starting point of edges and # thresLow is used to track the whole edge till end of the edge. while (init_point != -1): #Image.fromarray(retgrad).show() # print 'next segment at',init_point retgrad[init_point[0], init_point[1]] = -1 p2 = init_point p1 = init_point p0 = init_point p0 = nextNbd(retgrad, p0, p1, p2, thresLow) while (p0 != -1): #print p0 p2 = p1 p1 = p0 retgrad[p0[0], p0[1]] = -1 p0 = nextNbd(retgrad, p0, p1, p2, thresLow) init_point = stop(retgrad, thresHigh) # Finally, convert the image into a binary image x, y = where(retgrad == -1) retgrad[:, :] = 0 retgrad[x, y] = 1.0 return retgrad
f = open("egatiles.dd2", "rb") tiles = f.read() f.close() # open the wanted level f = open(sys.argv[1], "rb") level = unrle(f.read()) f.close() print "Level unpacked to %d bytes" % len(level) width = ord(level[4]) | (ord(level[5]) << 8) height = ord(level[6]) | (ord(level[7]) << 8) height = height - 1 im = Image.new("P", (width * 16, height * 16)) im.im.putpalette("RGB", ega_palette.palette) for y in range(height): print "\rRendering Y=%d" % y, for x in range(width): offset = 0x24 + (y * width * 2) + (x * 2) tile_index = (ord(level[offset])) | (ord(level[offset + 1]) << 8) tile_offset = tile_index * 128 if True: offset = offset + (width * height * 2) tag = (ord(level[offset])) | (ord(level[offset + 1]) << 8) if tag > 0:
windDir = data['currently']['windBearing'] windUnits = "mph" # print(temperature) # print(humidity) # print(windSpeed) # print(windDir) # print(windUnits) # Although the Python Imaging Library does have nice font support, # I opted here to use a raster bitmap for all of the glyphs instead. # This allowed lots of control over kerning and such, and I didn't # want to spend a lot of time hunting down a suitable font with a # permissive license. symbols = Image.open("gfx/timetemp.png") # Bitmap w/all chars & symbols img = Image.new("1", [330, 117], "white") # Working 'background' image draw = ImageDraw.Draw(img) # These are the widths of certain glyphs within the 'symbols' bitmap TimeDigitWidth = [38, 29, 38, 36, 40, 35, 37, 37, 38, 37, 13] TempDigitWidth = [33, 25, 32, 31, 35, 30, 32, 32, 33, 32, 17, 14] DateDigitWidth = [16, 13, 16, 15, 17, 15, 16, 16, 16, 16] HumiDigitWidth = [14, 10, 14, 13, 15, 12, 13, 13, 13, 13, 18] DayWidth = [104, 109, 62, 110, 88, 110, 95] MonthWidth = [53, 52, 60, 67, 59, 63, 59, 56, 51, 48, 54, 53] DirWidth = [23, 35, 12, 27, 15, 33, 19, 41, 23] DirAngle = [23, 68, 113, 157, 203, 247, 293, 336] # Generate a list of sub-image glyphs cropped from the symbols image def croplist(widths, x, y, height):
#!/usr/bin/python import Image, ImageDraw, math, colorsys dimensions = (800, 800) scale = 1.0 / (dimensions[0] / 3) center = (2.2, 1.5) # Use this for Mandelbrot set #center = (1.5, 1.5) # Use this for Julia set iterate_max = 100 colors_max = 50 img = Image.new("RGB", dimensions) d = ImageDraw.Draw(img) # Calculate a tolerable palette palette = [0] * colors_max for i in xrange(colors_max): f = 1 - abs((float(i) / colors_max - 1)**15) r, g, b = colorsys.hsv_to_rgb(.66 + f / 3, 1 - f / 2, f) palette[i] = (int(r * 255), int(g * 255), int(b * 255)) # Calculate the mandelbrot sequence for the point c with start value z def iterate_mandelbrot(c, z=0): for n in xrange(iterate_max + 1): z = z * z + c if abs(z) > 2: return n return None # Draw our image
def hook(epoch, t, ll): if epoch % 10 != 0: return ll_valid, _ = model.est_loglik(x_valid, n_samples=L_valid, n_batch=n_batch, byteToFloat=byteToFloat) if math.isnan(ll_valid): print("NaN detected. Reverting to saved best parameters") ndict.set_value(model.v, ndict.loadz(logdir + 'v.ndict.tar.gz')) ndict.set_value(model.w, ndict.loadz(logdir + 'w.ndict.tar.gz')) return if ll_valid > ll_valid_stats[0]: ll_valid_stats[0] = ll_valid ll_valid_stats[1] = 0 ndict.savez(ndict.get_value(model.v), logdir + 'v_best') ndict.savez(ndict.get_value(model.w), logdir + 'w_best') else: ll_valid_stats[1] += 1 # Stop when not improving validation set performance in 100 iterations if False and ll_valid_stats[1] > 1000: print("Finished") with open(logdir + 'hook.txt', 'a') as f: print("Finished", file=f) exit() # Log ndict.savez(ndict.get_value(model.v), logdir + 'v') ndict.savez(ndict.get_value(model.w), logdir + 'w') print(epoch, t, ll, ll_valid) with open(logdir + 'hook.txt', 'a') as f: print(t, ll, ll_valid, file=f) if gfx: # Graphics v = {i: model.v[i].get_value() for i in model.v} w = {i: model.w[i].get_value() for i in model.w} tail = '-' + str(epoch) + '.png' image = paramgraphics.mat_to_img(f_dec(v['w0x'][:].T), dim_input, True, colorImg=colorImg) image.save(logdir + 'q_w0x' + tail, 'PNG') image = paramgraphics.mat_to_img(f_dec(w['out_w'][:]), dim_input, True, colorImg=colorImg) image.save(logdir + 'out_w' + tail, 'PNG') _x = {'y': np.random.multinomial(1, [1. / n_y] * n_y, size=144).T} _, _, _z_confab = model.gen_xz(_x, {}, n_batch=144) image = paramgraphics.mat_to_img(f_dec(_z_confab['x']), dim_input, colorImg=colorImg) image.save(logdir + 'samples' + tail, 'PNG') _, _, _z_confab = model.gen_xz(y_sample, z_sample, n_batch=144) image = paramgraphics.mat_to_img(f_dec(_z_confab['x']), dim_input, colorImg=colorImg) image.save(logdir + 'samples_fixed' + tail, 'PNG') if n_z == 2: import ImageFont import ImageDraw n_width = 10 submosaic_offset = 15 submosaic_width = (dim_input[1] * n_width) submosaic_height = (dim_input[0] * n_width) mosaic = Image.new( "RGB", (submosaic_width * mosaic_w, submosaic_offset + submosaic_height * mosaic_h)) for digit in range(0, n_y): if digit >= mosaic_h * mosaic_w: continue _x = {} n_batch_plot = n_width * n_width _x['y'] = np.zeros((n_y, n_batch_plot)) _x['y'][digit, :] = 1 _z = {'z': np.zeros((2, n_width**2))} for i in range(0, n_width): for j in range(0, n_width): _z['z'][0, n_width * i + j] = scipy.stats.norm.ppf( float(i) / n_width + 0.5 / n_width) _z['z'][1, n_width * i + j] = scipy.stats.norm.ppf( float(j) / n_width + 0.5 / n_width) _x, _, _z_confab = model.gen_xz(_x, _z, n_batch=n_batch_plot) x_samples = _z_confab['x'] image = paramgraphics.mat_to_img(f_dec(x_samples), dim_input, colorImg=colorImg, tile_spacing=(0, 0)) #image.save(logdir+'samples_digit_'+str(digit)+'_'+tail, 'PNG') mosaic_x = (digit % mosaic_w) * submosaic_width mosaic_y = submosaic_offset + int( digit / mosaic_w) * submosaic_height mosaic.paste(image, (mosaic_x, mosaic_y)) draw = ImageDraw.Draw(mosaic) draw.text((1, 1), "Epoch #" + str(epoch) + " Loss=" + str(int(ll))) #plt.savefig(logdir+'mosaic'+tail, format='PNG') mosaic.save(logdir + 'mosaic' + tail, 'PNG')
#!/usr/bin/python import zlib import binascii import Image IDAT = "789c9d940b0e80300843af04f1fe7733313196f6cd5fc874930e3ae8dc6afb673dac8e717acea7631475216abc13a3b12eafee6bc4f017cffbb44bd93b738fac5ee7cfe7ca1ae6f01adef7a41628cd32f934fa1a7666169d3943d247430caf8acf9517afbd7e8a59ad93a9abd63badb3b779e85c69a49d8ce7f57cab8954a2736436f3acb312d4ad44937ee8eeccbb9c78ba9515fe953af91fa11ee7c1fd48544734d73b292cb37cb21d0a2d1b33".decode( 'hex') result = zlib.decompress(IDAT) str = result.decode('hex') MAX = 27 pic = Image.new("RGB", (MAX, MAX)) i = 0 for y in range(0, MAX): for x in range(0, MAX): if (str[i] == '1'): pic.putpixel([x, y], (0, 0, 0)) else: pic.putpixel([x, y], (255, 255, 255)) i = i + 1 pic.show() pic.save("flag.png")
def set_pixels(self, color=(0, 0, 0)): """Set all pixels of the image to the given color.""" surf = _Image.new("RGB", self._surf.size, color) self._reset(surf)
def makeImageRGB(rows, cols): image = Image.new("RGB", (cols, rows), (0, 0, 0)) # cols=>width,rows=>height pixels = image.load() return image
def test_crop_to_fit_lower(): img = Image.new('RGBA', (500, 750)) ret = image.crop_to_fit(img, (320, 240)) assert ret.size == ( 320, 240), 'Got expected size 320x240, got %rx%r.' % ret.size
def makeTag(serialNum, assetTag, outputfile): """ create a single image with two barcodes in it sized for individual labels, 62mmx28mm """ if (serialNum is None) and (assetTag is None): raise Exception("must provide at least one value for label") code39 = barcode.get_barcode_class('code39') twoBarcodes = (serialNum is not None) and (assetTag is not None) wrt = ImageWriter() label = (696, 271) #pixels for a 62mmx28mm label margin = 3 #mm width = px2mm(label[0]) - 2 * margin #showable width in mm if twoBarcodes: modHeight = 7 # bardcode height else: modHeight = 14 #code39 5 bars, 4 spaces per symbol. 3 wide, 6 narrow, 3:1 ratio #settings for the Serial number #resize the width of a line to make them fit in the printable width #16 modules per symbol if serialNum is not None: a = code39(serialNum, add_checksum=False) wrt.set_options({ 'text': 'SN: ' + a.code, 'text_distance': 0.5, 'quiet_zone': 0, 'module_height': modHeight, 'module_width': width / ((2 + len(a.get_fullcode())) * 16) }) apil = wrt.render(a.build()) else: apil = fake() apil.size = (0, 0) if assetTag is not None: b = code39(assetTag, add_checksum=False) #settings for the Asset Tag wrt.set_options({ 'text': 'TAG: ' + b.code, 'text_distance': 0.5, 'quiet_zone': 0, 'module_height': modHeight, 'module_width': width / ((2 + len(b.get_fullcode())) * 16) }) bpil = wrt.render(b.build()) else: bpil = fake() bpil.size = (0, 0) #print (apil.size) #print (bpil.size) if (apil.size[1] + bpil.size[1]) > label[1]: raise Exception("images dont fit") #create a custom canvas of the correct size #paste both barcodes into it, aproximately centered im = Image.new('RGB', label, 'white') top = int((label[1] - (apil.size[1] + bpil.size[1])) / 2) left = int((label[0] - apil.size[0]) / 2) if serialNum is not None: im.paste(apil, (0 + left, top, apil.size[0] + left, top + apil.size[1])) left = int((label[0] - bpil.size[0]) / 2) if assetTag is not None: im.paste(bpil, (0 + left, top + apil.size[1], bpil.size[0] + left, top + apil.size[1] + bpil.size[1])) im.save(outputfile, 'PNG')
81, 162, 90, 165, 97, 167, 99, 171, 109, 171, 107, 161, 111, 156, 113, 170, 115, 185, 118, 208, 117, 223, 121, 239, 128, 251, 133, 259, 136, 266, 139, 276, 143, 290, 148, 310, 151, 332, 155, 348, 156, 353, 153, 366, 149, 379, 147, 394, 146, 399 ] second = [ 156, 141, 165, 135, 169, 131, 176, 130, 187, 134, 191, 140, 191, 146, 186, 150, 179, 155, 175, 157, 168, 157, 163, 157, 159, 157, 158, 164, 159, 175, 159, 181, 157, 191, 154, 197, 153, 205, 153, 210, 152, 212, 147, 215, 146, 218, 143, 220, 132, 220, 125, 217, 119, 209, 116, 196, 115, 185, 114, 172, 114, 167, 112, 161, 109, 165, 107, 170, 99, 171, 97, 167, 89, 164, 81, 162, 77, 155, 81, 148, 87, 140, 96, 138, 105, 141, 110, 136, 111, 126, 113, 129, 118, 117, 128, 114, 137, 115, 146, 114, 155, 115, 158, 121, 157, 128, 156, 134, 157, 136, 156, 136 ] im = Image.open('good.jpg', 'r') image = Image.new(im.mode, im.size) color = (255, 255, 255) points = [(first[i], first[i + 1]) for i in xrange(0, len(first), 2)] points.extend([(second[i], second[i + 1]) for i in xrange(0, len(second), 2)]) image.putpixel2 = partial(image.putpixel, value=color) for point in points: image.putpixel2(point) image.show()
print "<c> Arjo Chakravarty- GNU GPL" print "" print "THIS IS DISTRIBUTED WITHOUT ANY WARRANTY" print "Getting information (this will take time)..." (x, y) = im.size x = x - 2 #fix x and y for co-ordinate systems y = y - 2 i = 1 n = 1 print "\r" + Green + " [Done]" print "" print System + "processing..." im2 = Image.new("RGB", (x + 1, y + 1)) # now find out which pixels are bad and enhance the image while (i < x): k = 0 while (n < y): pixel = im.getpixel((i, n)) pixel1 = im.getpixel((i + 1, n)) pixel2 = im.getpixel((i - 1, n)) pixel3 = im.getpixel((i, n + 1)) pixel4 = im.getpixel((i + 1, n + 1)) pixel5 = im.getpixel((i - 1, n + 1)) pixel6 = im.getpixel((i + 1, n - 1)) pixel7 = im.getpixel((i - 1, n - 1)) pixel8 = im.getpixel((i, n - 1)) intensity = (pixel[0] + pixel[1] + pixel[2]) / 3 intensity1 = (pixel1[0] + pixel1[1] + pixel1[2]) / 3
def star_region(): position = (random.randint(0, screen_size[0]), random.randint(0, screen_size[1])) power = random.randint(200, 400) region = [] while (power > 0): size = random.randint(int(.0 * power), int( 0.07 * power)) + random.randint(12, 34) region = region + disk_region(position[0], position[1], int(size * .7389)) power -= size to_center = map(lambda x, y: x / 2 - y, screen_size, position) length_to_center = length(to_center) to_center = map(lambda x: int(10 * x / length_to_center), to_center) position = map(lambda x, y: x - y + random.randint(-45, 45), position, to_center) return region # main img = Image.new('RGB', screen_size, (240, 235, 193)) # create a new black image pixels = img.load() # create the pixel map for _ in range(1500): color_all(random.choice(color_list), star_region()) img.save('myim.bmp')
def draw(request, code): font_name, fontfile = choice(settings.AVAIL_FONTS) cache_name = '%s-%s-size' % (PREFIX, font_name) text = generate_text() cache.set('%s-%s' % (PREFIX, code), text, 600) def fits(font_size): font = ImageFont.truetype(fontfile, font_size) size = font.getsize(text) return size[0] < WIDTH and size[1] < HEIGHT font_size = cache.get(cache_name, 10) if fits(font_size): while True: font_size += 1 if not fits(font_size): font_size -= 1 break else: while True: font_size -= 1 if fits(font_size): break cache.set(cache_name, font_size, 600) font = ImageFont.truetype(fontfile, font_size) text_size = font.getsize(text) icolor = 'RGB' if len(BG_COLOR) == 4: icolor = 'RGBA' im = Image.new(icolor, (WIDTH, HEIGHT), BG_COLOR) d = ImageDraw.Draw(im) if JUMP: if COLORIZE: get_color = lambda: choice(FG_COLORS) else: color = choice(FG_COLORS) get_color = lambda: color position = [(WIDTH - text_size[0] + 8) / 2, 0] shift_max = HEIGHT - text_size[1] shift_min = shift_max / 4 shift_max = shift_max * 3 / 4 for char in text: l_size = font.getsize(char) try: position[1] = choice(range(shift_min, shift_max + 1)) except IndexError: position[1] = shift_min d.text(position, char, font=font, fill=get_color()) position[0] += l_size[0] - 8 else: position = [(WIDTH - text_size[0]) / 2, (HEIGHT - text_size[1]) / 2] d.text(position, text, font=font, fill=choice(FG_COLORS)) response = HttpResponse(mimetype=MIME_TYPE) response[ 'cache-control'] = 'no-store, no-cache, must-revalidate, proxy-revalidate' im = SineWarp().render(im) for f in settings.FILTER_CHAIN: im = im.filter(getattr(ImageFilter, f)) im.save(response, ENC_TYPE) return response
format = 'RGBA' #get a list of PNG files in the current directory names = glob.glob("*.png") if args.outfile in names: names.remove(args.outfile) # don't include any pre-existing output #create a list of PIL Image objects, sorted by size print "Create a list of PIL Image objects, sorted by size" images = sorted([(i.size[0] * i.size[1], name, i) for name, i in ((x, Image.open(x).convert(format)) for x in names)], reverse=args.largest_first) print "Create tree" tree = PackNode(args.size) image = Image.new(format, args.size) #insert each image into the PackNode area for i, (area, name, img) in enumerate(images): print name, img.size uv = tree.insert(img.size) if uv is None: raise ValueError('Pack size ' + str(args.size) + ' too small, cannot insert ' + str(img.size) + ' image.') image.paste(img, uv.area) if args.tempfiles: image.save("temp" + str(i).zfill(4) + ".png") image.save(args.outfile) image.show()
def render(self, data={}, context={}): try: import Image import ImageDraw import ImageColor assert self.storage is not None, "'sticker.storage' must be set" # Initialize accumulators if self.accuD == None: self.logger.info("Initializing accumulators") # Accumulator for yearly data self.accuY = AccumulatorDatasource() self.accuY.slice = 'year' self.accuY.span = 1 self.accuY.caching = True self.accuY.storage = self.storage self.accuY.formulas = { 'data': { 'max_temp': MaxFormula('temp'), 'min_temp': MinFormula('temp'), 'max_gust': MaxFormula('wind_gust'), 'rain_fall': SumFormula('rain') } } # Accumulator for monthly data self.accuM = AccumulatorDatasource() self.accuM.slice = 'month' self.accuM.span = 1 self.accuM.storage = self.storage self.accuM.caching = True self.accuM.formulas = { 'data': { 'max_temp': MaxFormula('temp'), 'min_temp': MinFormula('temp'), 'max_gust': MaxFormula('wind_gust'), 'rain_fall': SumFormula('rain') } } # Accumulator for daily and current data self.accuD = AccumulatorDatasource() self.accuD.slice = 'day' self.accuD.span = 1 self.accuD.storage = self.storage self.accuD.caching = True self.accuD.formulas = { 'data': { 'max_temp': MaxFormula('temp'), 'min_temp': MinFormula('temp'), 'max_gust': MaxFormula('wind_gust'), 'rain_fall': SumFormula('rain') }, 'current': { 'temp': LastFormula('temp'), 'hum': LastFormula('hum'), 'pressure': LastFormula('pressure'), 'gust': LastFormula('wind_gust'), 'wind_deg': LastFormula('wind_dir'), 'time': LastFormula('localtime') } } # Calculate data self.logger.info("Calculating ...") current = self._calculateCurrentData(self.accuD) # Create Sticker green = ImageColor.getrgb("#007000") wheat = ImageColor.getrgb("#F5DEB3") dark_wheat = ImageColor.getrgb("#8D7641") black = ImageColor.getrgb("#000000") width = 260 height = 100 corner = 10 im = Image.new('RGBA', (width, height), wheat) draw = ImageDraw.Draw(im) # 1) Transparency mask = Image.new('L', im.size, color=0) mdraw = ImageDraw.Draw(mask) mdraw.rectangle((corner, 0, width - corner, height), fill=255) mdraw.rectangle((0, corner, width, height - corner), fill=255) mdraw.chord((0, 0, corner * 2, corner * 2), 0, 360, fill=255) mdraw.chord((0, height - corner * 2 - 1, corner * 2, height - 1), 0, 360, fill=255) mdraw.chord((width - corner * 2 - 1, 0, width - 1, corner * 2), 0, 360, fill=255) mdraw.chord((width - corner * 2 - 1, height - corner * 2 - 1, width - 1, height - 1), 0, 360, fill=255) im.putalpha(mask) # 2) Borders draw.arc((0, 0, corner * 2, corner * 2), 180, 270, fill=dark_wheat) draw.arc((0, height - corner * 2 - 1, corner * 2, height - 1), 90, 180, fill=dark_wheat) draw.arc((width - corner * 2 - 1, 0, width, corner * 2), 270, 360, fill=dark_wheat) draw.arc((width - corner * 2 - 1, height - corner * 2 - 1, width - 1, height - 1), 0, 90, fill=dark_wheat) draw.line((corner, 0, width - corner - 1, 0), fill=dark_wheat) draw.line((corner, height - 1, width - corner - 1, height - 1), fill=dark_wheat) draw.line((0, corner, 0, height - corner - 1), fill=dark_wheat) draw.line((width - 1, corner, width - 1, height - corner - 1), fill=dark_wheat) # 3) Logo logo = Image.open(self.logo_file) im.paste(logo, (4, 3), logo) # using the same image with transparencies as mask # 4) Current data draw.text((65, 5), self.station_name, fill=green) draw.text((65, 25), "%0.1fC %d%% %0.1fKm/h %dmb" % current[0], fill=black) draw.text((65, 38), current[1], fill=dark_wheat) draw.text((6, 60), " Today: %4.1fC-%4.1fC %4.1fKm/h %5.1fl." % self._calculateAggregData(self.accuD), fill=dark_wheat) draw.text((6, 72), " Monthly: %4.1fC-%4.1fC %4.1fKm/h %5.1fl." % self._calculateAggregData(self.accuM), fill=dark_wheat) draw.text((6, 84), " Yearly: %4.1fC-%4.1fC %4.1fKm/h %5.1fl." % self._calculateAggregData(self.accuY), fill=dark_wheat) # Save sticker im.save(self.filename) self.logger.info("Sticker generated") f = open(self.filename, "rb") d = f.read() f.close() return ['image/png', d] except Exception, e: self.logger.warning("Error rendering sticker: %s" % str(e)) return None
def create_paper(): return Image.new("CMYK", PAPER)
def showtext(text, fill, font=None, size=None, cursor=None, portrait=False, flipx=False, flipy=False, oldimage=None, spacing=0): """Draw a string on the screen""" # set order of h, w according to orientation image = Image.new('1', (_PAPER_WIDTH, _PAPER_HEIGHT) if portrait else (_PAPER_HEIGHT, _PAPER_WIDTH), _WHITE) # create the Draw object and draw the text draw = ImageDraw.Draw(image) draw.text((0, 0), text, font=font, fill=fill, spacing=spacing) # check if font is a TrueType font truetype = isinstance(font, ImageFont.FreeTypeFont) # if we want a cursor, draw it - the most convoluted part if cursor: cur_x, cur_y = cursor[0], cursor[1] # get the width of the character under cursor # (in case we didn't use a fixed width font...) fw = font.getsize(cursor[2])[0] # dirty trick to get "maximum height" fh = font.getsize('hg')[1] # desired cursor width cur_width = fw - 1 # get descent value descent = font.getmetrics()[1] if truetype else 0 # the reported font size size = font.size if truetype else fh # Why descent/2? No idea, but it works "well enough" with # big and small sizes HEIGHT = size - (descent / 2) + spacing # starting X is the font width times current column start_x = cur_x * fw # add 1 because rows start at 0 and we want the cursor at the bottom start_y = (cur_y + 1) * HEIGHT - 1 - spacing # draw the cursor line draw.line((start_x, start_y, start_x + cur_width, start_y), fill=_BLACK) # rotate image if using landscape if not portrait: image = image.rotate(90, expand=True) # apply flips if desired if flipx: image = image.transpose(Image.FLIP_LEFT_RIGHT) if flipy: image = image.transpose(Image.FLIP_TOP_BOTTOM) # find out which part changed and draw only that on the display if oldimage: # create a bounding box of the altered region and # make the X coordinates divisible by 8 diff_bbox = band(img_diff(image, oldimage)) # crop the altered region and draw it on the display if diff_bbox: replace_area(diff_bbox[0], diff_bbox[1], image.crop(diff_bbox)) else: # if no previous image, draw the entire display replace_area(0, 0, image) return image
#!/usr/bin/env python # First will crop the image and will then make background transparent import os import Image, ImageChops from glob import glob os.chdir("/home2/data/Projects/CWAS/development+motion/viz") fpaths = glob("*.jpg") for fpath in fpaths: print fpath im = Image.open(fpath) im = im.convert("RGBA") bg = Image.new("RGBA", im.size, (255, 255, 255, 255)) diff = ImageChops.difference(im, bg) bbox = diff.getbbox() im2 = im.crop(bbox) im2.save(fpath)
os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise image_paths = [] with open('dataset.csv','rb') as fin: rank = 0 reader = UnicodeReader(fin) for line in reader: if rank == 0: pass else: image = Image.new('RGBA', (120,120),(0,0,0)) draw = ImageDraw.Draw(image) font = ImageFont.truetype("simsun.ttc", 120) draw.text((0,0), unicode(line[0].encode("utf-8"),"UTF-8"), font=font) del draw filename = "../machine_teaching/teacher/static/" back = "teacher/images/chinese/" + str(rank) + "/" + line[2] + ".JPG" filename += back image_paths.append(back) dir = os.path.dirname(filename) try: os.stat(dir) except: mkdir_p(dir) image.save(filename, "JPEG") rank += 1
def recv_pixel(ser): while True: byte = ser.read(1) if byte == '\0': continue return ord(byte) if len(sys.argv) != 4: print "Usage:", sys.argv[0], "<width> <height> <dst file>" size = (int(sys.argv[1]), int(sys.argv[2])) dst_file = sys.argv[3] ser = serial.Serial("/dev/ttyUSB0", 19200, parity='N', rtscts=False, xonxoff=False) img = Image.new('RGB', size) pixels = img.load() for y in range(img.size[1]): for x in range(img.size[0]): px = recv_pixel(ser) pixels[x, y] = (px, px, px) img.save(dst_file) ser.close()