def mk_bar(): # need to do this in two parts as composite input limit is 16 images part = images.composite([(_pix, 0, y, 1., images.TOP_LEFT) for y in xrange(10)], 1, 10, 0x00000000, images.PNG) return images.composite([(part, 0, y * 10, 1., images.TOP_LEFT) for y in xrange(3)], 1, 30, 0x00000000, images.PNG), \ 30
def _char_img(bits, scale=None): if scale and not 0 < scale <= 10: raise ValueError, "Scale %d out of limits" % scale if scale > 1: _bar = images.composite([(bar, x, 0, 1., images.TOP_LEFT) for x in xrange(scale)], scale, bar_height, 0x00000000, images.PNG) else: _bar = bar return images.composite([(_bar, x * scale, 0, 1., images.TOP_LEFT) for x, c in enumerate(bits) if c == '1'], len(bits) * scale, bar_height, 0xffffffff, images.PNG)
def post(self): try: # json lib doesn't like trailing comma request = json.loads(re.sub(',[ \t\r\n]*\]', ']', self.request.body)) inputs, heights, widths = [], [], [] for element in request: # open file if fails then bad file image = images.Image(image_data=(open("img/"+element['filename'], "rb").read())) # check positive x = int(element['x']) if (x < 0): raise ValueError('X Offset is invalid: ' + str(x)) y = int(element['y']) if (y < 0): raise ValueError('Y Offset is invalid: ' + str(y)) widths.append(image.width + x) heights.append(image.height + y) inputs.append((image, x, y, 1.0, images.TOP_LEFT)) response = images.composite(inputs, max(widths), max(heights)) except IOError as err: self.response.headers['Content-Type'] = 'text/html' self.response.out.write("Image error: {0}".format(err)) except ValueError as err: self.response.headers['Content-Type'] = 'text/html' self.response.out.write("Request error: {0}".format(err) + " : " + request_json) else: self.response.headers['Content-Type'] = "image/png" self.response.out.write(response)
def composite_tile(tilespec): tile = {p: int(request.args.get(p)) for p in ("z", "x", "y")} tile_layers = parse_tilespec(tilespec) logging.info( "tile_layers: %r", [(l.__class__.__name__, {t: getattr(l, t) for t in l.trait_names()}) for l in tile_layers]) tile_images = [ get_tile(tile_layer, tile, return_pil=app.debug) for tile_layer in tile_layers ] tile_images = [i.get_result() for i in tile_images] logging.info("compositing") if not app.debug: composite = images.composite( [(i, 0, 0, l.opacity, images.TOP_LEFT) for i, l in zip(tile_images, tile_layers)], width=256, height=256, output_encoding=images.PNG) else: b = io.BytesIO() overlay_image(*[ clip_image_alpha(i, int(255) * l.opacity) for i, l in zip(tile_images, tile_layers) ]).save(b, "png") composite = b.getvalue() return Response(composite, content_type='image/png')
def get(self): ### height:421px width:400px ### width will be about 800px # self.response.headers['Content-Type'] = 'image/png' ### put out resized png image which matches Twitter card. ### Basic concept ### 1: Prepare white back png which width is 800px, height is 421px. ### 2: put diagram image center of the images img = '' img_list = [] ### Make white background image (800x421 px) with open('img/whitebase.png', 'rb') as f: img = f.read() img = images.resize(img, self.WIDTH, self.HEIGHT, allow_stretch=True) img_list += [(img, 0, 0, 1.0, images.TOP_LEFT)] url = 'http://' + app_identity.get_default_version_hostname() + '/sfen?' + self.request.query_string diagram_img = urllib2.urlopen(url).read() diagram_img_obj = Image(diagram_img) ### for width, height # x = (self.WIDTH - diagram_img_obj.width) // 2 x = (self.WIDTH - diagram_img_obj.width) // 2 img_list += [(diagram_img, x, 0, 1.0, images.TOP_LEFT)] img = images.composite(img_list, self.WIDTH, self.HEIGHT, color=0xFFFFFFFF) self.response.headers['Content-Type'] = 'image/png' self.response.out.write(img)
def get(self, display_type, rparameters, pic_key): """Dynamically serves a PNG image from the datastore. Args: type: a string describing the type of image to serve (image or thumbnail) pic_key: the key for a Picture model that holds the image """ try: image = db.get(pic_key) data = image.data th_data = image.thumbnail_data except: from bindata import PictureErr image = PictureErr() data = image.data th_data = image.thumbnail_data if display_type == 'image': self.response.headers['Content-Type'] = 'image/png' from bindata import LogoButton imagelogo=LogoButton() data2=imagelogo.data xpng = images.Image(data) ypng = images.Image(data2) org_width, org_height = xpng.width, xpng.height composite = images.composite([(xpng, 0, 0, 1.0, images.TOP_LEFT),(ypng, 0, 0, 1.0, images.BOTTOM_RIGHT)], org_width, org_height,0,images.PNG) self.response.out.write(composite) elif display_type == 'thumbnail': self.response.headers['Content-Type'] = 'image/png' self.response.out.write(th_data) else: self.error(500) self.response.out.write( 'Couldn\'t determine what type of image to serve.')
def layer(ims, marks, position): """ imprints a PIL image with the indicated text in lower-right corner """ side_offset = 20 # Convert from image data to an image im = images.Image(ims) mark = images.Image(marks) if position == 'left': x = side_offset y = (im.height - mark.height) / 2 elif position == 'right': x = im.width - mark.width - side_offset y = (im.height - mark.height) / 2 elif position == 'center': x = (im.width - mark.width) / 2 y = (im.height - mark.height) / 2 result = images.composite( [(im, 0 ,0, 1.0, images.TOP_LEFT), (mark, x, y, 1.0, images.TOP_LEFT)], im.width, im.height, 0, images.PNG) return result
def get(self): path_file = open('normalized_2.txt') paths = path_file.readlines() path_file.close() image_data = [] offsets = [] alpha = 1.0 / len(paths) for parts in paths: file_name, x, y = parts.strip().split(' ') f = open(file_name) data = f.read()#images.resize(f.read(), width=800) w = images.Image(data).width data = images.resize(data, width=w / 4) f.close() image_data.append(data) offsets.append((-int(x) / 4, -int(y) / 4)) base_x, base_y = min(x[0] for x in offsets), min(x[1] for x in offsets) comp_tuples = [(data, x - base_x, y - base_y, alpha, images.TOP_LEFT) for (data, (x, y)) in zip(image_data, offsets)] comp_data = images.composite(comp_tuples, 640, 480) self.response.headers['Content-Type'] = 'image/jpg' self.response.out.write(comp_data)
def get(self): path_file = open('normalized_2.txt') paths = path_file.readlines() path_file.close() image_data = [] offsets = [] alpha = 1.0 / len(paths) for parts in paths: file_name, x, y = parts.strip().split(' ') f = open(file_name) data = f.read() #images.resize(f.read(), width=800) w = images.Image(data).width data = images.resize(data, width=w / 4) f.close() image_data.append(data) offsets.append((-int(x) / 4, -int(y) / 4)) base_x, base_y = min(x[0] for x in offsets), min(x[1] for x in offsets) comp_tuples = [(data, x - base_x, y - base_y, alpha, images.TOP_LEFT) for (data, (x, y)) in zip(image_data, offsets)] comp_data = images.composite(comp_tuples, 640, 480) self.response.headers['Content-Type'] = 'image/jpg' self.response.out.write(comp_data)
def get(self): url = "http://www.hindified.com/lib/captcha/" #url = "http://127.0.0.1/captcha/" userhash = sha.new(str(datetime.datetime.now().microsecond)).hexdigest()[5:9] randlist = list(userhash) userid = md5.new(str(datetime.datetime.now().microsecond)).hexdigest()[5:11] memcache.add(key=userid, value=userhash, time=3600) cookie = Cookie.SimpleCookie() cookie["userid"] = userid cookie["userid"]["expires"] = (datetime.datetime.now()+datetime.timedelta(minutes=30)).strftime("%a, %d-%b-%Y %H:%M:%S GMT") print cookie.output() print 'Pragma: no-cache' print 'Cache-Control: no-cache' print 'Expires: -1' inputimage = urlfetch.fetch(url+'main.png') inputimage1 = urlfetch.fetch(url+randlist[0]+'.png') inputimage2 = urlfetch.fetch(url+randlist[1]+'.png') inputimage3 = urlfetch.fetch(url+randlist[2]+'.png') inputimage4 = urlfetch.fetch(url+randlist[3]+'.png') img0 = inputimage.content img1 = inputimage1.content img2 = inputimage2.content img3 = inputimage3.content img4 = inputimage4.content if inputimage.status_code == 200: img5 = images.composite([(img0,0,0,1.0,images.TOP_LEFT),(img1,105,12,1.0,images.TOP_LEFT),(img2,125,12,1.0,images.TOP_LEFT),(img3,145,12,1.0,images.TOP_LEFT),(img4,165,12,1.0,images.TOP_LEFT)],187,45,1,output_encoding=images.PNG,) self.response.headers['Content-Type'] = "image/png" self.response.out.write(img5)
def update_map_image(user, zoom, width, height, northlat, westlng): input_tuples = [] for offset_x_px in range (0, width, 256): for offset_y_px in range (0, height, 256): new_tile = tile.CustomTile(user, int(zoom), northlat, westlng, offset_x_px, offset_y_px) input_tuples.append((new_tile.image_out(), offset_x_px, offset_y_px, 1.0, images.TOP_LEFT)) # http://code.google.com/appengine/docs/python/images/functions.html img = images.composite(inputs=input_tuples, width=width, height=height, color=0, output_encoding=images.PNG) return img
def get(self, value): patterns = self.patterns(value.decode('utf-8')) scale = int(self.request.get('s', '1')) imgs = [(len(p) * scale, _char_img(p, scale)) for p in patterns] (width, final), imgs = imgs[0], imgs[1:] for w, img in imgs: final = images.composite([(final, 0, 0, 1., images.TOP_LEFT), (img, width, 0, 1., images.TOP_LEFT)], width + w, bar_height, 0x00000000, images.PNG) width += w if scale > 1: final = images.composite([(final, 0, y * bar_height, 1., images.TOP_LEFT) for y in xrange(scale)], width, bar_height * scale, 0x00000000, images.PNG) self.response.headers['Content-Type'] = mail.EXTENSION_MIME_MAP['png'] self.response.headers['Cache-Control'] = "public, max-age=%d" % (24 * 60 * 60) self.response.out.write(final)
def post(self): self.response.write('test') to_composite = [] for img in json.loads(self.request.body): img_path = os.path.join(ROOT_DIR, 'img', img['filename']) to_composite.append([img_path, img['x'], img['y'], 1.0, images.TOP_LEFT]) composite = images.composite(to_composite, 300, 300)
def create_thumbail(w,h,image,format): if(image==None): return None try: img = images.Image(image) except: return None src_w=img.width src_h=img.height #Windows8のタイル用にアスペクト比を整数にする if(format=="tile"): if(src_w<src_h): img.crop(0.0,0.0,1.0,1.0*src_w/src_h) else: img.crop(0.0,0.0,1.0*src_h/src_w,1.0) #奇数サイズの画像を縮小すると画面外参照が発生して縦線が乗るので1画素大きめにリサイズして端を切り取る if(h==0): h=w*src_h/src_w margin=1 try: img.resize(width=w+margin*2,height=h+margin*2) margin_w=1.0*margin/(w+margin*2) margin_h=1.0*margin/(h+margin*2) img.crop(margin_w,margin_h,1.0-margin_w,1.0-margin_h) except: return None #size error code=None if(format=="jpeg"): try: img.execute_transforms() #exec resize #アルファ付きPNGの背景色が黒になってしまう問題の対策 code=images.composite([(img, 0, 0, 1.0, images.TOP_LEFT)], img.width, img.height, 0xffffffff, images.JPEG, 90) except: return None content_type='image/jpeg' else: if(format=="png" or format=="tile"): try: code=img.execute_transforms(output_encoding=images.PNG) except: return None content_type='image/png' else: return None if(code==None): return None return {"code":code,"width":src_w,"height":src_h,"content_type":content_type}
def post(self): self.response.write('test') to_composite = [] for img in json.loads(self.request.body): img_path = os.path.join(ROOT_DIR, 'img', img['filename']) to_composite.append( [img_path, img['x'], img['y'], 1.0, images.TOP_LEFT]) composite = images.composite(to_composite, 300, 300)
def create_map(self, shengs, w, h): shengs = ['base'] + shengs img = images.composite( [(self.load(sheng), 0, 0, 1.0, images.TOP_LEFT) for sheng in shengs], self.WIDTH, self.HEIGHT) if w != self.WIDTH or h != self.HEIGHT: img = images.resize(img, w, h) return img
def get(self): self.response.headers['Content-Type'] = 'image/jpeg' from google.appengine.api import images xpng = open('2.jpg').read() ypng = open('3.png').read() ypng = images.resize(ypng, width=15, height=15) composite = images.composite([(xpng, 0, 0, 1.0, images.TOP_LEFT), (ypng, -1, -1, 1.0, images.BOTTOM_RIGHT)], 80, 80, output_encoding=images.JPEG) self.response.out.write(composite) return
def get_image(self): """ Returns the completed mosaic If it hasn't been rendered yet, render it, otherwise return the already rendered image""" if self._output_image == None: comp_blobs = [(blob, x, y, 1.0, images.TOP_LEFT) for (blob, x, y) in self._output_blobs] # Alas, composite only supports 16 images at a time #logging.debug( comp_blobs ) self._output_image = images.composite(comp_blobs[0:15], self._output_width, self._output_height) for blobindex in range(15,len(comp_blobs),15): fifteen_blobs = comp_blobs[blobindex:blobindex + 15] self._output_image = images.composite([(self._output_image, 0, 0, 0.0, images.TOP_LEFT)] + fifteen_blobs, self._output_width, self._output_height) return self._output_image
def compose_and_save(key, tile, x, y): # but this has to be done in a transaction - otherwise the different threads will overwrite each other's progress on the shared mapimage mapimage = db.get(key) input_tuples = [(tile.image_out(), x, y, 1.0, images.TOP_LEFT)] if mapimage.img: input_tuples.append((mapimage.img, 0, 0, 1.0, images.TOP_LEFT)) img = images.composite(inputs=input_tuples, width=mapimage.width, height=mapimage.height, color=0, output_encoding=images.PNG) # redraw main image every time to show progress mapimage.img = db.Blob(img) mapimage.tiles_remaining -= 1 mapimage.last_updated = datetime.now() mapimage.put()
def watermark(image): img = images.Image(images.resize(str(image), 800, 600)) with open(settings.WATERMARK_PATH, 'rb') as f: watermark = images.Image(images.resize(f.read(), img.width, img.height)) x = (img.width - watermark.width)/2 y = (img.height- watermark.height)/2 return images.composite([ (img, 0, 0, 1.0, images.TOP_LEFT, ), (watermark, x, y, 0.2, images.TOP_LEFT, ), ], img.width, img.height, 0, images.JPEG,)
def composite(self, img_list): if len(img_list) == 1: return (img_list[0][0], img_list) img = images.composite(img_list, self.IMAGE_WIDTH, self.IMAGE_HEIGHT + self.max_title_height, color = 0xFFFFFFFF) img_list = [(img, 0, 0, 1.0, images.TOP_LEFT)] logging.debug("composite success:") return (img, img_list)
def generate_img_from_setting(self, setting): oad = Image(setting['file']); otup = ( oad, 0, 0, 1.0, images.TOP_LEFT ); cplist = [ otup ]; for adconfig in setting['current_ad_list']: ad_img = adconfig['data']; ( coord , size ) = self.transform_coordinate( adconfig['location'] , setting, adconfig ); ad_img = images.resize( ad_img, size[0], size[1] ); itup = ( ad_img, coord[0]+1, coord[1]+2, 1.0, images.TOP_LEFT ); cplist.append( itup ); return images.composite( cplist , oad.width, oad.height,0,images.JPEG);
def update_map_image(user, google_data, width, height, northlat, westlng): result = urlfetch.fetch(url="http://maps.google.com/maps/api/staticmap?" + urllib.urlencode(google_data), method=urlfetch.GET) input_tuples = [] input_tuples.append((result.content, 0, 0, 1.0, images.TOP_LEFT)) for offset_x_px in range (0, width, 256): for offset_y_px in range (0, height, 256): new_tile = tile.CustomTile(user, int(google_data['zoom']), northlat, westlng, offset_x_px, offset_y_px) input_tuples.append((new_tile.image_out(), offset_x_px, offset_y_px, 1.0, images.TOP_LEFT)) # http://code.google.com/appengine/docs/python/images/functions.html img = images.composite(inputs=input_tuples, width=width, height=height, color=0, output_encoding=images.PNG) return img
def create_watermark(binary, watermark, position=None, opacity=0.4): from google.appengine.api import images if position == None: position = images.BOTTOM_RIGHT img = images.Image(binary) width = img.width height = img.height img = images.composite([(img._image_data, 0, 0, 1.0, images.TOP_LEFT), (watermark, 0, 0, opacity, position), ], width, height, 0, images.PNG) return img
def get(self): image_list = [(self.random_data(), 0, 0, 0.5 + (random.random()/2.0), images.TOP_LEFT,)] a = 0 r = int(random.random()*255) g = int(random.random()*255) b = int(random.random()*255) color = (a * 255 * 255 * 255) + (r * 255 * 255) + (g * 255) + b image = images.composite(image_list, 100, 100, color) self.response.headers['Content-Type'] = 'image/png' self.response.out.write(image)
def add_trial_service_overlay(image): image_width = images.Image(image).width scale = image_width / 50.0 overlay = _get_trial_service_overlay() if scale != 1: overlay_img = images.Image(overlay) new_size = int(scale * overlay_img.width) overlay_img.resize(new_size, new_size) overlay = overlay_img.execute_transforms(overlay_img.format, 100) return composite([(image, 0, 0, 1.0, TOP_LEFT), (overlay, int(5 * scale), int(-5 * scale), 1.0, BOTTOM_LEFT)], image_width, image_width)
def addResult(self, result, type, X, Y): logging.info('type: %s ' % type) if not ( X in self.tiles[type]): self.tiles[type][X] = {} self.tiles[type][X][Y] = result self.tilesDone = self.tilesDone + 1 if type == 'base': if self.image == None: self.image = result logging.info('compositing base %i %i' % (X, Y)) self.image = images.composite( [(self.image,0,0,1.0,images.TOP_LEFT), (result, (X*256), (Y*256), 1.0, images.TOP_LEFT)], self.pixWidth, self.pixHeight) if X in self.tiles['map']: if Y in self.tiles['map'][X]: logging.info('compositing map %i %i' % (X, Y)) self.image = images.composite( [(self.image,0,0,1.0,images.TOP_LEFT), (self.tiles['map'][X][Y], (X*256), (Y*256), 0.55, images.TOP_LEFT)], self.pixWidth, self.pixHeight) if type == 'map': if X in self.tiles['base']: if Y in self.tiles['base'][X]: logging.info('compositing map %i %i' % (X, Y)) self.image = images.composite( [(self.image,0,0,1.0,images.TOP_LEFT), (result, (X*256), (Y*256), 0.55, images.TOP_LEFT)], self.pixWidth, self.pixHeight) logging.info( '%i tiles done, %i tiles requested' % (self.tilesDone, self.tilesRequested))
def post(self): if ( self.request.get("img1") ): PresentTCard = TblCard.all() PresentTCard.order('CardNum').fetch(limit=1) NextId = 0 for r in PresentTCard: if PresentTCard: NextId = r.CardNum + 1 else: NextId = 0 TCard = TblCard() TCard.CardNum = NextId if self.request.get("name1"): TCard.CardName = self.request.get("name1") else: TCard.CardName = u"aainc member" if self.request.get("desc1"): TCard.CardDesc = self.request.get("desc1") else: TCard.CardDesc = u"thanks" Img = images.resize(self.request.get("img1").decode('base64'),600,600) framePath = 'http://' + os.environ['HTTP_HOST'] + self.request.get("framePath") try: ImgFrame = images.resize(urlfetch.Fetch(framePath).content,600,600) except: time.sleep(3) ImgFrame = images.resize(urlfetch.Fetch(framePath).content,600,600) try: ImgFrame = images.resize(urlfetch.Fetch(framePath).content,600,600) except: time.sleep(3) ImgFrame = images.resize(urlfetch.Fetch(framePath).content,600,600) ImgComp = images.composite([(Img,0,0,1.0,0),(ImgFrame,0,0,1.0,0)],600,600) TCard.CardImage = db.Blob(ImgComp) TCard.CardDate = datetime.datetime.today() + datetime.timedelta(hours=9) TCard.CardIPAddress = os.environ['REMOTE_ADDR'] db.put(TCard) cUrl = "/Detail?cid=" + str( TCard.CardNum ) self.response.out.write( cUrl ) else: self.response.out.write( "/Err" )
def get(self): try: # パスの解析 name = self.request.path.split('/')[2] (key, type) = name.split('.') key = db.Key(key) output_encoding = [images.PNG, images.JPEG][['png', 'jpg'].index(type)] # カウンターの取得 counter = Counter.get(key) if not counter: responses.display_error(self, 404) return # アクセス履歴情報の取得 record = { 'referer' : self.request.headers.get('Referer'), 'user_agent' : self.request.headers.get('User-Agent'), 'remote_addr' : self.request.remote_addr, } # カウントのインクリメント result = db.run_in_transaction(self.increment_count, counter.key(), record) count = Counter.get(result).count logging.debug(count) # 新しいカウントを桁毎に区切る digits = [] while count / 10 != 0: digits.append(count % 10) count /= 10 digits.append(count) # 使用する画像データの読み込み image_data = {} for number_image in NumberImage.all().ancestor(key).filter('number in', digits): image_data[number_image.number] = number_image.data # 合成するデータの決定 image_list = [] offset = 0 for i in reversed(digits): image_list.append((image_data[i], offset, 0, 1.0, images.TOP_LEFT)) offset += 64 # 合成して出力 image = images.composite(image_list, offset, 128, output_encoding=output_encoding) if output_encoding == images.PNG: self.response.headers['Content-Type'] = 'image/png' elif output_encoding == images.JPEG: self.response.headers['Content-Type'] = 'image/jpeg' self.response.out.write(image) except ValueError, error: logging.error('invalid path') responses.display_error(self, 404)
def insert(self, image_data): """Stable on appspot, not local""" """Hm, unstable everywhere""" image = images.Image(image_data) result = urlfetch.Fetch(self.__watermark_addr) watermark = images.Image(result.content) if watermark.width > image.width: watermark.resize(image.width) watermark.execute_transforms(images.JPEG) watermarked_image = images.composite( [ (image_data, 0, 0, 1.0, images.TOP_LEFT), (watermark._image_data, 0, 0, 0.5, images.BOTTOM_CENTER) ], image.width, image.height, 0, images.JPEG) return watermarked_image
def get(self, full_count, unknown_count, empty_count): fc = int(full_count) uc = int(unknown_count) ec = int(empty_count) total = fc + uc + ec multiplier = 1.0 * total_width / total full_pos = int(round(fc * multiplier)) unknown_pos = int(round(uc * multiplier)) empty_pos = int(round(ec * multiplier)) if full_pos < height: full_width = height else: full_width = full_pos if unknown_pos < height: unknown_width = height else: unknown_width = unknown_pos if empty_pos < height: empty_width = height else: empty_width = empty_pos full = images.resize(red_png, full_width, full_width, output_encoding=images.PNG) unknown = images.resize(yellow_png, unknown_width, unknown_width, output_encoding=images.PNG) empty = images.resize(green_png, empty_width, empty_width, output_encoding=images.PNG) full_in = (full, 0, 0, 1.0, images.TOP_LEFT) unknown_in = (unknown, full_pos, 0, 1.0, images.TOP_LEFT) empty_in = (empty, full_pos + unknown_pos, 0, 1.0, images.TOP_LEFT) inputs = (full_in, unknown_in, empty_in) self.response.headers['Content-Type'] = 'image/png' self.response.out.write(images.composite(inputs, total_width, height))
def get(self): path_file = open('offsets_1.txt') paths = path_file.readlines() path_file.close() image_data = [] offsets = self.__BuildOffsets(paths) alpha = 1.0 / len(paths) start = int(self.request.get('start', '')) end = self.request.get('end') start_date = self.__ToDate(paths[start]) if end: end = int(end) end_date = self.__ToDate(paths[end]) else: end = start end_date = None key_name = StoredImage.KeyForImage('2', resized=True, start_date=start_date, end_date=end_date) maybe_stored = StoredImage.get_by_key_name(key_name) if not maybe_stored: maybe_stored = StoredImage(key_name=key_name) alpha = 1.0 / (1 + end - start) for parts in paths[start:1 + end]: file_name, x, y = parts.strip().split(' ') f = open(file_name) data = f.read() #images.resize(f.read(), width=800) #w = images.Image(data).width #data = images.resize(data, width=w / 4) f.close() image_data.append(data) base_x, base_y = min(offsets) comp_tuples = [ (data, x - base_x, y - base_y, alpha, images.TOP_LEFT) for (data, (x, y)) in zip(image_data, offsets[start:1 + end]) ] comp_data = images.composite(comp_tuples, 640, 480) maybe_stored.processed_image = comp_data maybe_stored.put() self.response.headers['Content-Type'] = 'image/jpeg' self.response.out.write(maybe_stored.processed_image)
def create_post_image(choosie_post): img1_blob_reader = blobstore.BlobReader(choosie_post.photo1_blob_key) img2_blob_reader = blobstore.BlobReader(choosie_post.photo2_blob_key) img1 = images.Image(image_data=img1_blob_reader.read()) img2 = images.Image(image_data=img2_blob_reader.read()) icon_1_path = os.path.join(os.path.split(__file__)[0], '1.png') icon_2_path = os.path.join(os.path.split(__file__)[0], '2.png') icon1 = open(icon_1_path).read() icon2 = open(icon_2_path).read() img_icon_1 = images.Image(image_data=icon1) img_icon_2 = images.Image(image_data=icon2) composite = images.composite([(img1, 0, 0, 1.0, images.TOP_LEFT), (img2, img1.width, 0, 1.0, images.TOP_LEFT), (img_icon_1, 0, 0, 0.3, images.TOP_LEFT), (img_icon_2, img1.width, 0, 0.3, images.TOP_LEFT)], img1.width + img2.width, img1.height) logging.info('created image') return composite
def get(self): path_file = open('offsets_1.txt') paths = path_file.readlines() path_file.close() image_data = [] offsets = self.__BuildOffsets(paths) alpha = 1.0 / len(paths) start = int(self.request.get('start', '')) end = self.request.get('end') start_date = self.__ToDate(paths[start]) if end: end = int(end) end_date = self.__ToDate(paths[end]) else: end = start end_date = None key_name = StoredImage.KeyForImage( '2', resized=True, start_date=start_date, end_date=end_date) maybe_stored = StoredImage.get_by_key_name(key_name) if not maybe_stored: maybe_stored = StoredImage(key_name=key_name) alpha = 1.0 / (1 + end - start) for parts in paths[start:1 + end]: file_name, x, y = parts.strip().split(' ') f = open(file_name) data = f.read()#images.resize(f.read(), width=800) #w = images.Image(data).width #data = images.resize(data, width=w / 4) f.close() image_data.append(data) base_x, base_y = min(offsets) comp_tuples = [(data, x - base_x, y - base_y, alpha, images.TOP_LEFT) for (data, (x, y)) in zip(image_data, offsets[start:1 + end])] comp_data = images.composite(comp_tuples, 640, 480) maybe_stored.processed_image = comp_data maybe_stored.put() self.response.headers['Content-Type'] = 'image/jpeg' self.response.out.write(maybe_stored.processed_image)
def create_post_image(choosie_post): img1_blob_reader = blobstore.BlobReader(choosie_post.photo1_blob_key) img2_blob_reader = blobstore.BlobReader(choosie_post.photo2_blob_key) img1 = images.Image(image_data=img1_blob_reader.read()) img2 = images.Image(image_data=img2_blob_reader.read()) icon_1_path = os.path.join(os.path.split(__file__)[0], '1.png') icon_2_path = os.path.join(os.path.split(__file__)[0], '2.png') icon1 = open(icon_1_path).read() icon2 = open(icon_2_path).read() img_icon_1 = images.Image(image_data=icon1) img_icon_2 = images.Image(image_data=icon2) composite = images.composite( [(img1, 0, 0, 1.0, images.TOP_LEFT), (img2, img1.width, 0, 1.0, images.TOP_LEFT), (img_icon_1, 0, 0, 0.3, images.TOP_LEFT), (img_icon_2, img1.width, 0, 0.3, images.TOP_LEFT)], img1.width + img2.width, img1.height) logging.info('created image') return composite
def smart_composite(layers, w, h, output_encoding): good_layers = [] for layer in layers: try: # this transform is meant to be thrown away. # it's just being used to find out if GAE # images API will accept the image data. img = images.Image(layer[0]) img.resize(width=500, height=500) img = img.execute_transforms() good_layers.append(layer) except: # bad layer pass if len(layers) > 0: return images.composite(good_layers, w, h, output_encoding=output_encoding) else: return None
def compose_two_images(img1, img2): img_icon_1 = Utils.load_image('1.png') img_icon_2 = Utils.load_image('2.png') corner_tr = Utils.load_image('corner-tr.png') corner_bl = Utils.load_image('corner-bl.png') corner_br = Utils.load_image('corner-br.png') margin = 6 # img_icon_1 img_icon_2 # | corner_tr | # | margin | | margin # / ------- \ / -------- \ <-- corner_tr # | | | | # | img1 | | img2 | # margin| |margin| |margin # | | | | # \ ------- / \ -------- / # ^ margin ^ ^ margin ^ # corner_bl corner_br corner_bl corner_br composite = images.composite([ (img1, margin, margin, 1.0, images.TOP_LEFT), (img2, img1.width + 2 * margin, margin, 1.0, images.TOP_LEFT), (corner_tr, margin + img1.width - corner_tr.width, margin, 1.0, images.TOP_LEFT), (corner_tr, 2 * margin + img1.width + img2.width - corner_tr.width, margin, 1.0, images.TOP_LEFT), (corner_br, margin + img1.width - corner_br.width, margin + img1.height - corner_br.height, 1.0, images.TOP_LEFT), (corner_br, 2 * margin + img1.width + img2.width - corner_br.width, margin + img1.height - corner_br.height, 1.0, images.TOP_LEFT), (corner_bl, margin, margin + img1.height - corner_br.height, 1.0, images.TOP_LEFT), (corner_bl, 2 * margin + img1.width, margin + img1.height - corner_br.height, 1.0, images.TOP_LEFT), (img_icon_1, margin, margin, 1.0, images.TOP_LEFT), (img_icon_2, img1.width + 2 * margin, margin, 1.0, images.TOP_LEFT) ], img1.width + img2.width + 3 * margin, img1.height + 2 * margin) logging.info('created image') return composite
def get(self, full_count, unknown_count, empty_count) : fc = int(full_count) uc = int(unknown_count) ec = int(empty_count) total = fc + uc + ec multiplier = 1.0 * total_width / total full_pos = int(round(fc * multiplier)) unknown_pos = int(round(uc * multiplier)) empty_pos = int(round(ec * multiplier)) if full_pos < height : full_width = height else : full_width = full_pos if unknown_pos < height : unknown_width = height else : unknown_width = unknown_pos if empty_pos < height : empty_width = height else : empty_width = empty_pos full = images.resize(red_png, full_width, full_width, output_encoding=images.PNG) unknown = images.resize(yellow_png, unknown_width, unknown_width, output_encoding=images.PNG) empty = images.resize(green_png, empty_width, empty_width, output_encoding=images.PNG) full_in = (full, 0, 0, 1.0, images.TOP_LEFT) unknown_in = (unknown, full_pos, 0, 1.0, images.TOP_LEFT) empty_in = (empty, full_pos+unknown_pos, 0, 1.0, images.TOP_LEFT) inputs = (full_in, unknown_in, empty_in) self.response.headers['Content-Type'] = 'image/png' self.response.out.write(images.composite(inputs, total_width, height))
def generate_thumbnail(image_data, min_source_height, max_source_height, min_source_width, max_source_width, content_type, width, height, overlay_path, valign, top_crop_pct=None, bottom_crop_pct=None, left_crop_pct=None, right_crop_pct=None, crop_x=None, crop_y=None, post_crop_uniform_scale_pct=None): """ Generate a thumbnail and return the image data as a binary string. If unable to create the thumbnail, will return None. :min_source_height: If specified, a thumbnail will only be generated if the incoming image is at least this high. :min_source_width: If specified, a thumbnail will only be generated if the incoming image is at least this wide. :max_source_height: If specified, a thumbnail will only be generated if the incoming image is less than this many pixels high. :max_source_width: If specified, a thumbnail will only be generated if the incoming image is less than this many pixels wide. :image_data: Image data, as a bytestring :content_type: The MIME content type of the image. :width: Width of the thumbnail :height: Height of the thumbnail :overlay_path: Full path to an image file to overlay on top of the image data, or None to not use an overlay. :valign: A string, one of: "top" "bottom" "middle" describing how the image should be aligned along the Y-axis when cropping. :top_crop_pct: :bottom_crop_pct: Optional. Floats indicating how much from the top and bottom of the original image to crop in before rescaling. Numbers between 0 and 1.0 incl. :crop_x: :crop_y: Optional. If specified with width and height, will simply cut out a rectangle of the incoming image which is width x height and has its upper-left corner pegged to (crop_x, cropy_y). NOTE: For crop_x and crop_y to work, the following other options must be None: valign, top_crop_pct, bottom_crop_pct :post_crop_uniform_scale_pct: If not None, will scale image after cropping by the indicated percent. Should be None or a float between 0.0 and 1.0 """ # figure out the width/height of the image from the datastore # img = images.Image(image_data=image_data) # img.crop(left_x=0.25, top_y=0.25, right_x=0.25, bottom_y=0.25) # img.resize(width=width, height=height) # logging.info('(b) w=%i, h=%i' % (img.width, img.height)) # output = img.execute_transforms(output_encoding=img.format) image = images.Image(image_data) if min_source_height is not None and image.height < min_source_height: return None if max_source_height is not None and image.height > max_source_height: return None if min_source_width is not None and image.width < min_source_width: return None if max_source_width is not None and image.width > max_source_width: return None if content_type == 'image/png': output_encoding = images.PNG else: output_encoding = images.JPEG if crop_x is not None and crop_y is not None and valign is None and top_crop_pct is None and bottom_crop_pct is None and ( image.width >= crop_x + width) and (image.height >= crop_y + height): fw = float(image.width) fh = float(image.height) try: output = images.crop(image_data, float(crop_x) / fw, float(crop_y) / fh, float(crop_x + width) / fw, float(crop_y + height) / fh, output_encoding=output_encoding) except: output = image_data else: if width > image.width and height > image.height: output = image_data # # this would result in scaling the image UP, that's no good # if image.width > image.height: # width = image.width # else: # height = image.height # # output = images.resize(image_data, width, height, output_encoding) else: output = rescale(image, width, height, halign='middle', valign=valign, top_crop_pct=top_crop_pct, bottom_crop_pct=bottom_crop_pct, left_crop_pct=left_crop_pct, right_crop_pct=right_crop_pct) if post_crop_uniform_scale_pct is not None: output = images.resize(output, width=int(width * post_crop_uniform_scale_pct), output_encoding=output_encoding) if overlay_path is not None: # read the overlay into memory overlay_data = open(overlay_path, 'r').read() # composite the overlay onto the rescaled output if content_type == 'image/png': output_encoding = images.PNG else: output_encoding = images.JPEG output = images.composite(inputs=[ (output, 0, 0, 1.0, images.CENTER_CENTER), (overlay_data, 0, 0, 1.0, images.CENTER_CENTER), ], width=width, height=height, output_encoding=output_encoding) return output
def get(self): self.width = 400 self.height = 300 self.response.headers['Content-Type'] = 'text/plain' file_name = os.path.join(os.path.dirname(__file__), 'original_msg.txt') message = mail.InboundEmailMessage(open(file_name, 'r').read()) if False: for content_type, body in message.bodies(): self.response.out.write("body %r\n" % [content_type, body.decode()]) for attachment in message.attachments: self.handle_attachment(attachment) reverse_geocode_result = auth.googlemaps.reverse_geocode((40.714224, -73.961452)) # self.response.out.write(json.dumps(reverse_geocode_result, sort_keys=True, indent=4, separators=(',', ': '))) address = None if len(reverse_geocode_result) > 0 and "formatted_address" in reverse_geocode_result[0]: address = reverse_geocode_result[0]["formatted_address"] # self.response.out.write("\n\nAddress: %r\n\n" % address) email = users.get_current_user().email() msg = mail.EmailMessage( sender=email, to=email, subject="new hello", ) mime = MIMEMultipart('related') # alternative = MIMEMultipart('alternative') # mime.attach(alternative) # alternative.attach(MIMEText("""some img""", 'plain', 'utf-8')) # alternative.attach(MIMEText("""<img src="cid:foo">Hello World""",'html', 'utf-8')) mime.attach(MIMEText("""<img src="cid:foo">Hello World""",'html', 'utf-8')) image_data = attachment.payload.decode() img = images.Image(image_data=image_data) img.rotate(0) img.execute_transforms(parse_source_metadata=True) meta = img.get_original_metadata() orientaion = meta.get("Orientation", 1) if orientaion == 3: img.rotate(180) img.execute_transforms() text_layer = self.get_pie(135) # TODO(ark) handle orientation == 3 (rotate 180 degrees) # http://www.impulseadventure.com/photo/exif-orientation.html merged = images.composite([(img, 0, 0, 1.0, images.TOP_LEFT), (text_layer, 0, 0, 1.0, images.TOP_LEFT)], 640, 480) img_part = MIMEImage(merged, name="whut.png") img_part.add_header("Content-ID", "<foo>") mime.attach(img_part) msg.update_from_mime_message(mime) # msg.send() # self.response.out.write(mime.as_string()) # get the image from google maps api # map_image = self.get_map(36.9869276,-122.0321512) self.response.headers['Content-Type'] = 'image/png' self.response.write(map_image) return # self.response.headers['Content-Type'] = 'image/png' # self.response.write(merged) self.response.headers['Content-Type'] = 'text/plain' self.response.write(api_key)
def get(self): # special pre-defined sizes to use in lieu of number dimensions. taken from dummyimage.com. sizes = { # Ad Sizes 'mediumrectangle': '300x250', 'squarepopup': '250x250', 'verticalrectangle': '240x400', 'largerectangle': '336x280', 'rectangle': '180x150', 'popunder': '720x300', 'fullbanner': '468x60', 'halfbanner': '234x60', 'microbar': '88x31', 'button1': '120x90', 'button2': '120x60', 'verticalbanner': '120x240', 'squarebutton': '125x125', 'leaderboard': '728x90', 'wideskyscraper': '60x600', 'skyscraper': '120x600', 'halfpage': '300x600', # Screen Resolutions 'cga': '320x200', 'qvga': '320x240', 'vga': '640x480', 'wvga': '800x480', 'svga': '800x480', 'wsvga': '1024x600', 'xga': '1024x768', 'wxga': '1280x800', 'wsxga': '1440x900', 'wuxga': '1920x1200', 'wqxga': '2560x1600', # Video Resolutions 'ntsc': '720x480', 'pal': '768x576', 'hd720': '1280x720', 'hd1080': '1920x1080', } size = None path = re.match(r"/(?P<size>[a-zA-Z0-9x]+)(\.(?P<ext>jpg|jpeg|png))?(,(?P<bgcolor>[0-9a-fA-F]{6}|[0-9a-fA-F]{3}|[0-9a-fA-F]{2}|[0-9a-fA-F]{1}))?$", self.request.path_info) if path: path_size = self.request.get('d', path.group('size')) if path_size in sizes: path_size = sizes[path_size] size = re.match(r"(?P<width>\d+)(x(?P<height>\d+))?", path_size) if size: ## determine extension/file type ext = self.request.get('t', path.group('ext') or 'png') if ext == 'png': mimetype = 'image/png' encoding = images.PNG else: mimetype = 'image/jpeg' encoding = images.JPEG ## determine colors bgcolor = self.request.get('b', path.group('bgcolor') or 'aaaaaa') fgcolor = self.request.get('f', '000000') # handle shortcut colors if len(bgcolor) == 1: bgcolor = bgcolor[0] + bgcolor[0] + bgcolor[0] + bgcolor[0] + bgcolor[0] + bgcolor[0] elif len(bgcolor) == 2: bgcolor = bgcolor[0] + bgcolor[1] + bgcolor[0] + bgcolor[1] + bgcolor[0] + bgcolor[1] elif len(bgcolor) == 3: bgcolor = bgcolor[0] + bgcolor[0] + bgcolor[1] + bgcolor[1] + bgcolor[2] + bgcolor[2] bgmatch = re.match(r"^[0-9a-fA-F]{6}$", bgcolor) if not bgmatch: bgcolor = 'aaaaaa' if len(fgcolor) == 1: fgcolor = fgcolor[0] + fgcolor[0] + fgcolor[0] + fgcolor[0] + fgcolor[0] + fgcolor[0] elif len(fgcolor) == 2: fgcolor = fgcolor[0] + fgcolor[1] + fgcolor[0] + fgcolor[1] + fgcolor[0] + fgcolor[1] elif len(fgcolor) == 3: fgcolor = fgcolor[0] + fgcolor[0] + fgcolor[1] + fgcolor[1] + fgcolor[2] + fgcolor[2] fgmatch = re.match(r"^[0-9a-fA-F]{6}$", fgcolor) if not fgmatch: fgcolor = 'aaaaaa' ## determine size width = int(size.group('width')) # if only one dimension defined, image is square try: height = int(size.group('height')) except TypeError: height = width ## dimensions dimensions = str(width) + 'x' + str(height) ## label is what will be shown default_label = str(width) + u'×' + str(height) if path.group('size') in sizes: default_label = path.group('size') + '|(' + default_label + ')' label = self.request.get('l', default_label) ## determine size of the font used for title needs to vary depending on width title_font_size = (float(width)/max(len(label),1)) * (1.4 if width > 16 else 1.9) title_font_size = min(title_font_size, 100) # make sure it is reasonably sized based on height too title_font_size = min(title_font_size, height*0.6) # allow font size to be overridden try: title_font_size = int(self.request.get('s')) except ValueError: pass if width <= 4000 and height <= 4000: # the label images must be 1000 pixels or less in each dimension label_height = min(height, 1000, int(title_font_size + 8) * (1 + label.count('|')) + 10*label.count('|')) label_width = min(width, 1000, int(300000/label_height)) # must also be less than 300000 total pixels url = 'http://chart.apis.google.com/chart?chs=' + str(label_width) + 'x' + str(label_height) + '&cht=p3&chtt=' + urllib.quote_plus(label.encode('utf-8')) + '&chts=' + fgcolor + ',' + str(title_font_size) + '&chf=bg,s,' + bgcolor # see if we've already generated this image and have it cached in memcache cache_key = os.environ['CURRENT_VERSION_ID'] + "|" + url + "|" + str(width) + "|" + str(height) + "|" + str(bgcolor) + "|" + str(encoding) full_img = memcache.get(cache_key) if full_img is None: # not found in memcache, so try creating it try: label_img = fetch(url=url, deadline=10) full_img = images.composite([(label_img.content, 0, 0, 1.0, images.CENTER_CENTER)], width, height, int('ff'+bgcolor,16), encoding) memcache.add(cache_key, full_img) except images.BadImageError: # self.response.set_status(400) # self.response.out.write("Error from Google Chart: '" + label_img.content + "'.") logging.error("Error from Google Chart: '" + label_img.content + "'.") full_img = images.composite([empty_png_def], width, height, int('ff'+bgcolor,16), encoding) # since this is the result of an error, we're returning an image without intended label now but we want client to try again next time self.response.headers['Cache-Control'] = 'no-cache' except Exception, ex: logging.warning(ex) full_img = images.composite([empty_png_def], width, height, int('ff'+bgcolor,16), encoding) # since this is the result of an error, we're returning an image without intended label now but we want client to try again next time self.response.headers['Cache-Control'] = 'no-cache' self.response.headers['Content-Type'] = mimetype self.response.out.write(full_img) ## image requested is too damn big else: self.response.set_status(400) self.response.out.write("Dimensions requested (" + dimensions + ") are bigger than supported.")
def post(self): id = self.request.get('id') color_name = ColorName.get_by_id(int(id)) if color_name is None: logging.error('color name not found.') return self.error(200) path = os.path.join(os.path.dirname(__file__), 'psd/layer02.png') pr = png.Reader(file=open(path, 'rb')) x, y, pixdata, meta = pr.asRGBA8() new_pixdata = [] for pixcel in pixdata: i = 0 new_p = [] new_p_set = [] for p in pixcel: new_p_set.append(p) i += 1 if i >= 4: if new_p_set == [255, 0, 0, 255]: new_p_set = [ color_name.red, color_name.green, color_name.blue, 255 ] new_p.extend(new_p_set) i = 0 new_p_set = [] new_pixdata.append(new_p) pw = png.Writer(x, y, interlace=False, bitdepth=meta['bitdepth'], planes=meta['planes'], alpha=True) o = StringIO() pw.write(o, new_pixdata) new_image_binary_data = o.getvalue() layer02_image = images.Image(new_image_binary_data) path = os.path.join(os.path.dirname(__file__), 'psd/YasashisaBold.ttf') im = Image.new('RGBA', (562, 168), (0, 0, 0, 0)) draw = ImageDraw.Draw(im) font = ImageFont.truetype(path, 45) str = rgb_to_hex((color_name.red, color_name.green, color_name.blue)) (w, h) = draw.textsize(str, font=font) draw.text(((562 - w - 60) / 2, (168 - h - 13) / 2), str, font=font, fill='rgb(0,0,0)') fio = StringIO() im.save(fio, "PNG") data = fio.getvalue() fio.close() layer03_image = images.Image(data) path = os.path.join(os.path.dirname(__file__), 'psd/layer01.png') layer01_image = images.Image(file(path, 'rb').read()) all_images = [] all_images.append((layer02_image, 0, 0, 1.0, images.TOP_LEFT)) all_images.append((layer01_image, 0, 0, 1.0, images.TOP_LEFT)) all_images.append((layer03_image, 0, 0, 1.0, images.TOP_LEFT)) image_c = images.composite(all_images, 562, 168, 0, images.PNG, 100) img = images.Image(image_c) img.resize(width=320) result = img.execute_transforms(output_encoding=images.PNG) crayon_data = CrayonData.all().filter('color_name =', color_name).get() if crayon_data is None: logging.info('new data.') crayon_data = CrayonData() else: logging.info('saved data.') crayon_data.color_name = color_name crayon_data.image = db.Blob(result) crayon_data.put()
class ImageCache(db.Model): url = db.StringProperty() image = db.BlobProperty() # fetches images from the internet, manipulates then, and then caches in the datastore. # TODO - once you can programatically write to the blobstore, we should do that, because # GAE will let you serve binaries directly from the blobstore without having to activate # a handler. @classmethod def generate_image(cls, url): image_data = urlfetch.Fetch(url).content image = cls.weave(image_data) if not image: return None blob = db.Blob(image) cache = cls(url=url, image=blob) cache.save() return cache @classmethod def weave(cls, image_data): # where in the image to crop the box. numbers are relative to image # make sure max width + crop width can never be > 1 !! Likewise height. left = 0.0 + random.uniform(0, 0.4) top = 0.4 + random.uniform(0, 0.2) crop_width = 0.1 + random.uniform( 0, 0.3) # width of the box, as a proportion of the image width # target tile width and height. Image will be twice as wide/high target_width = 500 / 2 target_height = 320 / 2 # the box wants to be in the aspect ratio of the output file. try: image = images.Image(image_data) ratio = float(image.width) / image.height except images.NotImageError, e: logging.error("Can't parse image: %s" % e) return None # not a lot we can do here. logging.info("width is %s height is %s Ratio is %s" % (image.width, image.height, ratio)) crop_height = (crop_width * ratio) * (float(target_height) / target_width) # don't fall outside of the bounding box. Really shouldn't happen, and it # messes with the nice distribution of the boxes, but it's better than dying. if crop_height + top > 1: top = 1 - crop_height if crop_width + left > 1: left = 1 - crop_width image.crop(left, top, left + crop_width, top + crop_height) tile = image.execute_transforms(images.JPEG) tile_image = images.Image( tile) # need to do this to get width/height. Sigh. logging.info("image is %s by %s" % (tile_image.width, tile_image.height)) # issue here - the crop function might not produce an image of the # exact required aspect ratio, so when we resize to the desired tile size # here, we dont get an image of the exact right size. To avoid hairline # cracks, make sure we composite before we resize. w = tile_image.width - 1 h = tile_image.height - 1 one = images.composite([ (tile, 0, 0, 1.0, images.TOP_LEFT), (images.horizontal_flip(tile), w, 0, 1.0, images.TOP_LEFT), (images.vertical_flip(tile), 0, h, 1.0, images.TOP_LEFT), (images.horizontal_flip( images.vertical_flip(tile)), w, h, 1.0, images.TOP_LEFT), ], w * 2, h * 2, output_encoding=images.JPEG) return images.resize(one, target_width * 2, target_height * 2, output_encoding=images.JPEG)
def create_figure_image(profile, figure): """Создание изображения фигуры из кубиков Функция возвращает список из следующих элементов [изображение, ширина изображения, высота изображения] """ cube_points = [] top = left = right = bottom = -1 grid_aspect = float(profile.grid_width) / float(profile.grid_height) cube_images = {} for str in figure.cubes: p = parse_cube_point(str) nna = p.na * profile.grid_width nnb = p.nb * profile.grid_width p.x = int(round((nna + nnb) / 2)) p.y = int(round((p.x - nna) / grid_aspect)) cube_points.append(p) if left < 0 or p.x < left: left = p.x if top < 0 or p.y < top: top = p.y if right < 0 or p.x > right: right = p.x if bottom < 0 or p.y > bottom: bottom = p.y if not cube_images.has_key(p.binary): cube_images[p.binary] = get_profile_cube(profile, p.binary) top -= 10 left -= 10 right += profile.cube_width + 10 bottom += profile.cube_height + 10 figure_image_width = right - left figure_image_height = bottom - top image_inputs = [] max_composites = images.MAX_COMPOSITES_PER_REQUEST comp_image = None for cube_point in cube_points: image_inputs.append( (cube_images[cube_point.binary], cube_point.x - left, cube_point.y - top, 1.0, images.TOP_LEFT)) if len(image_inputs) == images.MAX_COMPOSITES_PER_REQUEST: comp_image = images.composite(image_inputs, figure_image_width, figure_image_height, 0, images.PNG) image_inputs = [(comp_image, 0, 0, 1.0, images.TOP_LEFT)] if (comp_image and len(image_inputs) > 1) or (not comp_image and len(image_inputs) > 0): comp_image = images.composite(image_inputs, figure_image_width, figure_image_height, 0, images.PNG), return [ images.composite(image_inputs, figure_image_width, figure_image_height, 0, images.PNG), figure_image_width, figure_image_height ]
def compose_yes_no_image(img1): composite = images.composite([(img1, 0, 0, 1.0, images.TOP_LEFT), (img1, 0, 0, 1.0, images.TOP_LEFT)], img1.width, img1.height) logging.info('created image') return composite
def get(self): image_key = self.request.get('id') if image_key == '': return self.error(404) style = self.request.get('style') if style in ('icon48', 'icon120', 'size240', 'size300'): cache_key = 'cached_image_%s_%s' % (image_key, style) else: style = None cache_key = 'cached_image_%s' % image_key logging.info('memcache_key: %s' % cache_key) thumbnail = memcache.get(cache_key) if thumbnail is None: archive_list_query = ArchiveList().all() archive_list_query.filter('image_key =', image_key) archive_list_query.filter('delete_flg =', False) archive_list = archive_list_query.get() if archive_list is None: memcache.add(cache_key, 404, 3600) return self.error(404) mask_image_key = hashlib.md5( '%s-%s' % (SECRET_MASK_KEY, image_key)).hexdigest() mask_image_query = MaskImage().all() mask_image_query.filter('access_key =', mask_image_key) mask_image = mask_image_query.get() original_image_key = hashlib.md5( '%s-%s' % (SECRET_IMAGE_KEY, image_key)).hexdigest() original_image_query = OriginalImage().all() original_image_query.filter('access_key =', original_image_key) original_image = original_image_query.get() if mask_image is None or original_image is None: return self.error(404) mask_image = images.Image(mask_image.image) mask_image.resize(width=640) #mask_image.im_feeling_lucky() new_mask_image = mask_image.execute_transforms( output_encoding=images.PNG) new_mask_image = convert_square(new_mask_image, 640, 640) original_image = images.Image(original_image.image) original_image.resize(width=640) #original_image.im_feeling_lucky() new_original_image = original_image.execute_transforms( output_encoding=images.PNG) new_original_image = convert_square(new_original_image, 640, 640) all_images = [] all_images.append((new_original_image, 0, 0, 1.0, images.TOP_LEFT)) all_images.append((new_mask_image, 0, 0, 1.0, images.TOP_LEFT)) image_c = images.composite(all_images, 640, 640, 0, images.PNG, 100) img = images.Image(image_c) width = 320 if style is not None: if style == 'size240': width = 240 elif style == 'size300': width = 300 img.resize(width=width) #img.im_feeling_lucky() thumbnail = img.execute_transforms(output_encoding=images.JPEG) if style is not None: if style == 'icon48': thumbnail = convert_square(thumbnail, 48, 48) elif style == 'icon120': thumbnail = convert_square(thumbnail, 120, 120) memcache.add(cache_key, thumbnail, 3600) logging.info('Image from DB. image_key: %s' % image_key) else: logging.info('Image from memcache. image_key: %s' % image_key) if thumbnail == 404: return self.error(404) self.response.headers['Content-Type'] = 'image/jpeg' self.response.out.write(thumbnail)