def merge_overlay(self, overlay): self.open() overlay.open() background_image = Image(self.image) # remove any transparent areas around the logo overlay_image = Image( Geometry(overlay.image.size().width() + 2, overlay.image.size().height() + 2), 'transparent') overlay_image.composite(overlay.image, GravityType.CenterGravity, CompositeOperator.OverCompositeOp) overlay_image.trim() border_width = int( math.ceil(background_image.size().width() * PIconBackground.BORDER_RATIO)) overlay_image.zoom( Geometry(background_image.size().width() - (border_width * 2), background_image.size().height() - (border_width * 2))) # we need to calculate exact position since we get 1-pixel error # if the height of the logo is odd and using GravityType.CenterGravity if overlay_image.size().width() + ( 2 * border_width) == background_image.size().width(): x = border_width y = int( math.ceil((background_image.size().height() / 2.0) - (overlay_image.size().height() / 2.0))) else: x = int( math.ceil((background_image.size().width() / 2.0) - (overlay_image.size().width() / 2.0))) y = border_width background_image.composite(overlay_image, x, y, CompositeOperator.OverCompositeOp) return PIcon(background_image)
def test_image_resize(self): im = Image(Geometry(300, 200), Color('transparent')) g = Geometry(150, 100) ft = FilterTypes.BlackmanFilter blur = 0.5 im.resize(g, ft, blur) im.resize(g, ft) im.resize(g)
def get_image(self): # Open the image try: img_file = urlopen(self.path) img_data = img_file.read() bytes_read = len(img_data) except HTTPError as e: raise ImageRetrievalError(self.path, "Error code: %s" % e.code) except URLError as e: raise ImageRetrievalError(self.path, e.reason) blob = Blob(img_data) image = Image(blob) # Check if the whole image should be used and cropped if necessary. src_width = image.size().width() src_height = image.size().height() if self.width != src_width or self.height != src_height: box = Geometry(self.width, self.height, self.x_min_src, self.y_min_src) image.crop(box) # Estimates the size in Bytes of this image part by scaling the number # of Bytes read with the ratio between the needed part of the image and # its actual size. self.estimated_size = bytes_read * abs( float(self.width * self.height) / float(src_width * src_height)) return image
def crop(self, left, top, right, bottom): offset_left = int(left) offset_top = int(top) width = int(right - left) height = int(bottom - top) self.image.crop(Geometry(width, height, offset_left, offset_top))
def get_image(self): # Open the image try: r = requests.get(self.path, allow_redirects=True, verify=verify_ssl) if not r: raise ValueError("Could not get " + self.path) if r.status_code != 200: raise ValueError("Unexpected status code ({}) for {}".format( r.status_code, self.path)) img_data = r.content bytes_read = len(img_data) except requests.exceptions.RequestException as e: raise ImageRetrievalError(self.path, str(e)) blob = Blob(img_data) image = Image(blob) # Check if the whole image should be used and cropped if necessary. src_width = image.size().width() src_height = image.size().height() if self.width != src_width or self.height != src_height: box = Geometry(self.width, self.height, self.x_min_src, self.y_min_src) image.crop(box) # Estimates the size in Bytes of this image part by scaling the number # of Bytes read with the ratio between the needed part of the image and # its actual size. self.estimated_size = bytes_read * abs( float(self.width * self.height) / float(src_width * src_height)) return image
def _watermark(self, image, watermark_path, opacity, size, position_str): with open(watermark_path, 'rb') as watermark_file: watermark = self.get_image(watermark_file) image_size = self.get_image_size(image) layer = Image(Geometry(image_size[0], image_size[1]), 'transparent') if opacity < 1: self._reduce_opacity(watermark, opacity) if not size: mark_size = self.get_image_size(watermark) else: mark_size = tuple( self._get_new_watermark_size(size, self.get_image_size(watermark))) options = { 'crop': 'center', 'upscale': mark_size > self.get_image_size(watermark) } watermark = self.scale(watermark, mark_size, options) watermark = self.crop(watermark, mark_size, options) if position_str == 'tile': for x_pos in range(0, image_size[0], mark_size[0]): for y_pos in range(0, image_size[1], mark_size[1]): layer.composite(watermark, x_pos, y_pos, CoOp.OverCompositeOp) else: position = self._define_watermark_position(position_str, image_size, mark_size) layer.composite(watermark, position[0], position[1], CoOp.OverCompositeOp) image.composite(layer, 0, 0, CoOp.OverCompositeOp) return image
def crop(self, left, top, right, bottom): offset_left = left offset_top = top width = right - left height = bottom - top self.image.crop(Geometry(width, height, offset_left, offset_top))
def test_scale_jpeg(self): img = api.Image((400, 400), 'blue') img.write(self.tmp_filename_jpg) img2 = Image(Blob(open(self.tmp_filename_jpg).read()), Geometry(200, 200)) img2.scale('200x200') img2.write(self.tmp_filename_jpg)
def resize7(srcFile="", destFile="", w=200, h=200): img = Image(srcFile) #白色背景图 backImg = None #sw源图宽度 sw = img.columns() #sh源图高度 sh = img.rows() #若目标图的宽或高都比源图大则不处理 if (sw <= w and sh <= h): backImg = Image(Geometry(w, h), 'white') backImg.composite(img, Geometry(sw, sh, 0, 0), co.OverCompositeOp) backImg.profile("*", Blob()) backImg.write(destFile) return "True" #目标的宽或高都比源图的小则进行裁剪 elif (sw > w and sh > h): #源图的宽高比 sratio = float(sw) / float(sh) rratio = float(w) / float(h) #若源图宽高比大于目标图的宽高比的话,则就高缩放,从0,0位置裁前源图宽 #print sratio,rratio if (sratio > rratio): hscale = float(h) / float(sh) rw = int(sw * hscale) rh = int(sh * hscale) else: wscale = float(w) / float(sw) rw = int(sw * wscale) rh = int(sh * wscale) img.scale("%dx%d" % (rw, rh)) img.crop(Geometry(w, h, 0, 0)) img.profile("*", Blob()) img.write(destFile) return "True" elif (sw > w): backImg = Image(Geometry(w, h), 'white') img.crop(Geometry(w, sh)) backImg.composite(img, Geometry(w, h, 0, 0), co.OverCompositeOp) backImg.profile("*", Blob()) backImg.write(destFile) return "True" elif (sh > h): backImg = Image(Geometry(w, h), 'white') img.crop(Geometry(sw, h)) backImg.composite(img, Geometry(w, h, 0, 0), co.OverCompositeOp) backImg.profile("*", Blob()) backImg.write(destFile) return "True" return "True"
def resize5(srcFile="", destFile="", w=200, h=200): imgs = ImageList() outImgs = ImageList() imgs.readImages(srcFile) gifFrameLen = len(imgs) #取得gif的第0帧 img = imgs.__getitem__(0) #sw源图宽度 sw = img.columns() sh = img.rows() #要缩略的宽度 rw = w #要缩略的高度 rh = h #源图的宽高比 sratio = float(sw) / float(sh) #目标图的宽高比 rratio = float(rw) / float(rh) if (sw > w): imgs.scaleImages("%dx" % w) # #??长大高小的图片处理问题:1600x94 转换为160x298按照宽度等比缩放 #??长大高小的图片处理问题:1600x94 转换为522x294 #若源图的宽高比大于目标图的宽高比时,则按照高进行缩放后再裁剪宽度 else: if (sratio > rratio): hscale = float(rh) / float(sh) w = int(sw * hscale) h = int(sh * hscale) #print (sw,sh,w,h,rw,rh,hscale) #就高缩放 imgs.scaleImages("%dx" % (w)) #若源图的宽高比小于目标图的宽高比时,则按照宽进行缩放后再裁剪高度 else: wscale = float(rw) / float(sw) w = int(sw * wscale) h = int(sh * wscale) #print (sw,sh,w,h,rw,rh,wscale) #就宽缩放 imgs.scaleImages("%dx%d" % (w, h)) #缩放完后遍历裁剪 for i in range(gifFrameLen): tmpImg = imgs.__getitem__(i) tmpImg.crop(Geometry(rw, rh, 0, 0)) tmpImg.profile("*", Blob()) outImgs.append(tmpImg) #(102, 900, 160, 1411, 160, 298) #print( sw,sh,w,h,rw,rh) if (len(outImgs) > 0): outImgs.writeImages(destFile) else: imgs.writeImages(destFile) return "True"
def bobross(self,imgStr): print "bobross()" bob=Image('bob-transparent-canvas.png') bob.matte(True) #print "1" img=Image(imgStr) #print "2" newsize=Geometry(200,343) newsize.aspect(True) img.scale(newsize) #print "3" img=self.watercolor(img) #print "4" result=Image(bob.size(),'white') result.composite(img,392,22,CompositeOperator.OverCompositeOp) result.composite(bob,0,0,CompositeOperator.OverCompositeOp) return result
def deferred_pgmagick(): im = PGImage(imagename) im.filterType(FilterTypes.CatromFilter) im.zoom(Geometry(1024, 768)) im.quality(85) im.magick('jpeg') im.write(Blob())
def test_image_setpixels(self): img = Image(Geometry(300, 200), Color('transparent')) pixels = img.setPixels(40, 50, 5, 5) for pixel in pixels: pixel.red = 50 img.syncPixels() for pixel in img.getPixels(40, 50, 5, 5): self.assertEqual(50, pixel.red)
def resize1(srcFile="", destFile="", w=200, h=200): img = Image(srcFile) #sw源图宽度 sw = img.columns() sh = img.rows() #要缩略的宽度 rw = w #要缩略的高度 rh = h #源图的宽高比 sratio = float(sw) / float(sh) #目标图的宽高比 rratio = float(rw) / float(rh) #若源图的宽高比大于目标图的宽高比时,则按照高进行缩放后再裁剪宽度 if (sratio > rratio): hscale = float(rh) / float(sh) w = sw * hscale h = sh * hscale #print (sw,sh,w,h,rw,rh,hscale) #就高缩放 img.scale("%dx%d" % (w, h)) #计算裁剪宽的部分的横坐标,超出的宽的部分进行裁剪 tmpRowsPos = int((sw * hscale - rw) / 2) img.crop(Geometry(rw, rh, tmpRowsPos, 0)) #若源图的宽高比小于目标图的宽高比时,则按照宽进行缩放后再裁剪高度 else: wscale = float(rw) / float(sw) w = sw * wscale h = sh * wscale #print (sw,sh,w,h,rw,rh,wscale) #就宽缩放 img.scale("%dx%d" % (w, h)) tmpColsPos = int((sh * wscale - rh) / 2) img.crop(Geometry(rw, rh, 0, tmpColsPos)) #只有宽大于目标宽度的时候才进行缩略 #elif ( sw > w ): # pass #unicodestring.encode("utf-8") img.profile("*", Blob()) img.write(destFile) return "True"
def dummy_image(self, width, height): d = self._get_dummy_image_data(width, height) im = Image(Geometry(width, height), Color(*d['canvas_color'])) im.strokeColor(Color(*d['line_color'])) im.strokeWidth(1) for line in d['lines']: im.draw(DrawableLine(*line)) im.fillColor(Color()) im.draw(DrawableRectangle(*d['rectangle'])) return im
def resize5(srcFile="", destFile="", w=200, h=200): #CONVERT_RESIZE_CROP = "%s -resize %d" + "x" + " -crop %d" + "x" + "%d" + "+0+0 +repage %s" img = Image(srcFile) sw = img.columns() sh = img.rows() #源图宽高比 sratio = float(sw) / float(sh) #目标图宽高比 tratio = float(w) / float(h) #若源图的宽高比大于目标图的宽高比,则 if (sratio == tratio and (w == sw) and (h == sh)): imb.profile("*", Blob()) img.write(destFile) return "True" elif (sratio > tratio): hscale = float(w) / float(sw) tw = sw * hscale th = sh * hscale img.scale("%dx" % (tw)) if (th > h): img.crop(Geometry(w, h)) img.profile("*", Blob()) img.write(destFile) return "True" elif (sratio < tratio): wscale = float(w) / float(sw) tw = int(sw * wscale) th = int(sh * wscale) #260 132 670 502 0.388059701493 260 194 img.scale("%dx%d" % (tw, th)) if (th > h): img.crop(Geometry(w, h)) img.profile("*", Blob()) img.write(destFile) return "True" return "True"
def resize2(srcFile="", destFile="", w=200, h=200): blobData = Blob(open(srcFile).read()) if (h != -1): img = Image(blobData, Geometry(w, h)) img.scale("%dx%d!" % (w, h)) else: img = Image(blobData) img.scale("%dx!" % w) img.profile("*", Blob()) img.write(destFile) return "True"
def resize8(srcFile="", destFile="", w=200, h=200): img = Image(srcFile) #.def("extent", (void (Magick::Image::*)(const Magick::Geometry&, const Magick::Color&, const Magick::GravityType))&Magick::Image::extent) #白色背景图 backImg = None #sw源图宽度 sw = img.columns() #sh源图高度 sh = img.rows() #若目标图的宽或高都比源图大则不处理 if (sw <= w and sh <= h): backImg = Image(Geometry(w, h), 'white') backImg.composite(img, GravityType.CenterGravity, co.OverCompositeOp) backImg.profile("*", Blob()) backImg.write(destFile) return "True" #目标的宽或高都比源图的小则进行裁剪 elif (sw > w and sh > h): #源图的宽高比 sratio = float(sw) / float(sh) rratio = float(w) / float(h) #若源图宽高比大于目标图的宽高比的话,则就高缩放,从0,0位置裁前源图宽 #print sratio,rratio if (sratio > rratio): hscale = float(h) / float(sh) rw = int(sw * hscale) rh = int(sh * hscale) else: wscale = float(w) / float(sw) rw = int(sw * wscale) rh = int(sh * wscale) linePos = int((rw - w) / 2) colPos = int((rh - h) / 2) img.scale("%dx%d" % (rw, rh)) img.crop(Geometry(w, h, linePos, colPos)) img.profile("*", Blob()) img.write(destFile) return "True" elif (sw > w): backImg = Image(Geometry(w, h), 'white') img.crop(Geometry(w, sh, int((sw - w) / 2))) backImg.composite(img, GravityType.CenterGravity, co.OverCompositeOp) backImg.profile("*", Blob()) backImg.write(destFile) return "True" elif (sh > h): backImg = Image(Geometry(w, h), 'white') img.crop(Geometry(sw, h, 0, int((sh - h) / 2))) backImg.composite(img, GravityType.CenterGravity, co.OverCompositeOp) backImg.profile("*", Blob()) backImg.write(destFile) return "True" return "True"
def getpixels_test_template(self, use_const): img = Image(Geometry(300, 200), Color('transparent')) getPixelsMethod = img.getConstPixels if use_const else img.getPixels pixels = getPixelsMethod(40, 50, 10, 10) self.assertEqual(10 * 10, len(pixels)) with self.assertRaises(IndexError): pixels[2000] colorMax = Color.scaleDoubleToQuantum(1.0) self.assertEqual(0, pixels[0].red) self.assertEqual(0, pixels[0].blue) self.assertEqual(0, pixels[0].green) self.assertEqual(colorMax, pixels[0].opacity) return pixels
def test_color_histogram(self): redColor = Color('red') im = Image(Geometry(30, 20), redColor) histogram = im.colorHistogram() self.assertEqual(1, len(histogram)) # test in, __getitem__ self.assertIn(redColor, histogram) self.assertEqual(30 * 20, histogram[redColor]) # iteration example for packet in histogram: color, count = packet.key(), packet.data() self.assertEqual(redColor, color) self.assertEqual(30 * 20, count)
def resize9(srcFile="", destFile="", w=200, h=200, color="", crop=False, align="center"): img = Image(srcFile) #白色背景图 backImg = None #sw源图宽度 sw = img.columns() #sh源图高度 sh = img.rows() #目标图与源图的宽比例 wScale = float(w) / float(sw) #目标图与源图的高比例 hScale = float(h) / float(sh) if (w > sw or h > sh): if (wScale == hScale): tw = w th = h elif (wScale < hScale): th = h tw = sw * wScale else: tw = w th = sh * hScale elif (w < sw or h < sh): if (wScale == hScale): tw = w th = h elif (wScale < hScale): th = h tw = sw * wScale else: tw = w th = sh * hScale else: tw = sw th = sh img.scale("%dx%d" % (tw, th)) backImg = Image(Geometry(w, h), 'white') backImg.composite(img, GravityType.CenterGravity, co.OverCompositeOp) backImg.profile("*", Blob()) backImg.write(destFile) return "True"
def bobross(self,imgStr): bob=Image('bob-transparent-canvas.png') bob.matte(True) img=self.watercolor(imgStr) #img.matte(True) newsize=Geometry(210,380) newsize.aspect(True) img.scale(newsize) #img.oilPaint(3) #img.enhance() #img.sharpen() #img.blur(2,2) #img.shear(-25,-15) result=Image(bob.size(),'white') result.composite(img,390,20,CompositeOperator.OverCompositeOp) result.composite(bob,0,0,CompositeOperator.OverCompositeOp) #img.debug(True) #bob.composite(img,390,20,CompositeOperator.OverCompositeOp) return result
def test_scale_jpeg(self): img = api.Image((400, 400), 'blue') img.write(self.tmp_filename_jpg) with open(self.tmp_filename_jpg, 'rb') as fp: b = Blob(str(fp.read())) img2 = Image(b, Geometry(200, 200)) if sys.platform.lower() == 'darwin': # NOTE: error occur when use '200x200' param # ----------------------------------------------------- # RuntimeError: Magick: Application transferred too few # scanlines (x.jpg) reported by coders/jpeg.c:344 (JPEGErrorHandler) img2.scale('199x199') else: img2.scale('200x200') img2.write(self.tmp_filename_jpg)
def create_test_data(self): im = Image(root('resources', 'color_circle.png').encode('utf-8')) if self.mode == 'RGB': im.type(ImageType.TrueColorType) elif self.mode == 'RGBA': im.type(ImageType.TrueColorMatteType) elif self.mode == 'L': im.type(ImageType.GrayscaleType) elif self.mode in 'LA': im.type(ImageType.GrayscaleMatteType) else: raise ValueError('Unknown mode: {}'.format(self.mode)) im.filterType(FilterTypes.CatromFilter) im.zoom(Geometry(b"{}x{}!".format(self.size[0], self.size[1]))) return [im]
def get_image(self): # Open the image try: img_file = urllib.urlopen(self.path) except urllib.HTTPError as e: raise ImageRetrievalError(self.path, "Error code: %s" % e.code) except urllib.URLError as e: raise ImageRetrievalError(self.path, e.reason) blob = Blob(img_file.read()) image = Image(blob) # Check if the whole image should be used and cropped if necessary. src_width = image.size().width() src_height = image.size().height() if self.width != src_width or self.height != src_height: box = Geometry(self.width, self.height, self.x_min_src, self.y_min_src) image.crop(box) return image
def enhanceImage(localpath): """Crop to 50%, and resize to original size sharpen, save out to same image name Returns True if image processing succeeds""" try: img = Image(localpath) size = img.size() geo = Geometry(round(size.width() * 0.5), round(size.height() * 0.5), round(size.width() * 0.25), round(size.height() * 0.25)) img.crop(geo) img.scale(size) img.quality(80) img.sharpen(1) img.write(localpath) print("Image written to %s" % localpath) except: return False return True
def _watermark(self, image, watermark_path, opacity, size, position_str): watermark = self.get_image(open(watermark_path)) image_size = self.get_image_size(image) layer = Image(Geometry(image_size[0], image_size[1]), 'transparent') if opacity < 1: self._reduce_opacity(watermark, opacity) if not size: mark_size = self.get_image_size(watermark) else: mark_size = self._get_new_watermark_size( size, self.get_image_size(watermark)) options = {'crop': 'center', 'upscale': False} watermark = self.scale(watermark, mark_size, options) watermark = self.crop(watermark, mark_size, options) position = self._define_watermark_position(position_str, image_size, mark_size) layer.composite(watermark, position[0], position[1], CoOp.OverCompositeOp) image.composite(layer, 0, 0, CoOp.OverCompositeOp) return image
def _crop(self, image, width, height, x_offset, y_offset): geometry = Geometry(width, height, x_offset, y_offset) image.crop(geometry) return image
def _scale(self, image, width, height): geometry = Geometry(width, height) image.scale(geometry) return image
def _cropbox(self, image, x, y, x2, y2): geometry = Geometry(x2 - x, y2 - y, x, y) image.crop(geometry) return image
def extract_substack(job): """ Extracts a sub-stack as specified in the passed job while respecting rotation requests. A list of pgmagick images is returned -- one for each slice, starting on top. """ # Make sure tile source getters have been initialized on the job if job.needs_initialization: job.initialize() # Treat rotation requests special if abs(job.rotation_cw) < 0.00001: # No rotation, create the sub-stack cropped_stack = extract_substack_no_rotation(job) elif abs(job.rotation_cw - 90.0) < 0.00001: # 90 degree rotation, create the sub-stack and do a simple rotation cropped_stack = extract_substack_no_rotation(job) for img in cropped_stack: img.rotate(270.0) elif abs(job.rotation_cw - 180.0) < 0.00001: # 180 degree rotation, create the sub-stack and do a simple rotation cropped_stack = extract_substack_no_rotation(job) for img in cropped_stack: img.rotate(180.0) elif abs(job.rotation_cw - 270.0) < 0.00001: # 270 degree rotation, create the sub-stack and do a simple rotation cropped_stack = extract_substack_no_rotation(job) for img in cropped_stack: img.rotate(90.0) else: # Some methods do counter-clockwise rotation rotation_ccw = 360.0 - job.rotation_cw # There is rotation requested. First, backup the cropping # coordinates and manipulate the job to create a cropped # stack of the bounding box of the rotated box. real_x_min = job.x_min real_x_max = job.x_max real_y_min = job.y_min real_y_max = job.y_max # Rotate bounding box counter-clockwise around center. center = [0.5 * (job.x_max + job.x_min), 0.5 * (job.y_max + job.y_min)] rot_p1 = rotate2d(rotation_ccw, [real_x_min, real_y_min], center) rot_p2 = rotate2d(rotation_ccw, [real_x_min, real_y_max], center) rot_p3 = rotate2d(rotation_ccw, [real_x_max, real_y_max], center) rot_p4 = rotate2d(rotation_ccw, [real_x_max, real_y_min], center) # Find new (larger) bounding box of rotated ROI and write # them into the job job.x_min = min([rot_p1[0], rot_p2[0], rot_p3[0], rot_p4[0]]) job.y_min = min([rot_p1[1], rot_p2[1], rot_p3[1], rot_p4[1]]) job.x_max = max([rot_p1[0], rot_p2[0], rot_p3[0], rot_p4[0]]) job.y_max = max([rot_p1[1], rot_p2[1], rot_p3[1], rot_p4[1]]) # Create the enlarged sub-stack cropped_stack = extract_substack_no_rotation(job) # Next, rotate the whole result stack counterclockwise to have the # actual ROI axis aligned. for img in cropped_stack: img.rotate(rotation_ccw) # Last, do a second crop to remove the not needed parts. The region # to crop is defined by the relative original crop-box coordinates to # to the rotated bounding box. rot_bb_p1 = rotate2d(rotation_ccw, [job.x_min, job.y_min], center) rot_bb_p2 = rotate2d(rotation_ccw, [job.x_min, job.y_max], center) rot_bb_p3 = rotate2d(rotation_ccw, [job.x_max, job.y_max], center) rot_bb_p4 = rotate2d(rotation_ccw, [job.x_max, job.y_min], center) # Get bounding box minimum coordinates in world space bb_x_min = min( [rot_bb_p1[0], rot_bb_p2[0], rot_bb_p3[0], rot_bb_p4[0]]) bb_y_min = min( [rot_bb_p1[1], rot_bb_p2[1], rot_bb_p3[1], rot_bb_p4[1]]) # Create relative final crop coordinates crop_p1 = [abs(real_x_min - bb_x_min), abs(real_y_min - bb_y_min)] crop_p2 = [abs(real_x_min - bb_x_min), abs(real_y_max - bb_y_min)] crop_p3 = [abs(real_x_max - bb_x_min), abs(real_y_min - bb_y_min)] crop_p4 = [abs(real_x_max - bb_x_min), abs(real_y_max - bb_y_min)] crop_x_min = min([crop_p1[0], crop_p2[0], crop_p3[0], crop_p4[0]]) crop_y_min = min([crop_p1[1], crop_p2[1], crop_p3[1], crop_p4[1]]) crop_x_max = max([crop_p1[0], crop_p2[0], crop_p3[0], crop_p4[0]]) crop_y_max = max([crop_p1[1], crop_p2[1], crop_p3[1], crop_p4[1]]) crop_x_min_px = to_x_index(crop_x_min, job, False) crop_y_min_px = to_y_index(crop_y_min, job, False) crop_x_max_px = to_x_index(crop_x_max, job, False) crop_y_max_px = to_y_index(crop_y_max, job, False) crop_width_px = crop_x_max_px - crop_x_min_px crop_height_px = crop_y_max_px - crop_y_min_px # Crop all images (Geometry: width, height, xOffset, yOffset) crop_geometry = Geometry(crop_width_px, crop_height_px, crop_x_min_px, crop_y_min_px) for img in cropped_stack: img.crop(crop_geometry) # Reset the original job parameters job.x_min = real_x_min job.x_max = real_x_max job.y_min = real_y_min job.y_max = real_y_max return cropped_stack
def extract_substack_no_rotation(job) -> List: """ Extracts a sub-stack as specified in the passed job without respecting rotation requests. A list of pgmagick images is returned -- one for each slice, starting on top. """ # The actual bounding boxes used for creating the images of each stack # depend not only on the request, but also on the translation of the stack # wrt. the project. Therefore, a dictionary with bounding box information for # each stack is created. s_to_bb = {} for stack_mirror in job.stack_mirrors: stack = stack_mirror.stack # Retrieve translation relative to current project translation = ProjectStack.objects.get(project_id=job.project_id, stack_id=stack.id).translation x_min_t = job.x_min - translation.x x_max_t = job.x_max - translation.x y_min_t = job.y_min - translation.y y_max_t = job.y_max - translation.y z_min_t = job.z_min - translation.z z_max_t = job.z_max - translation.z # Calculate the slice numbers and pixel positions # bound to the stack data. px_x_min = to_x_index(x_min_t, stack, job.zoom_level) px_x_max = to_x_index(x_max_t, stack, job.zoom_level) px_y_min = to_y_index(y_min_t, stack, job.zoom_level) px_y_max = to_y_index(y_max_t, stack, job.zoom_level) px_z_min = to_z_index(z_min_t, stack, job.zoom_level) px_z_max = to_z_index(z_max_t, stack, job.zoom_level) # Because it might be that the cropping goes over the # stack bounds, we need to calculate the unbounded height, # with and an offset. px_x_min_nobound = to_x_index(x_min_t, stack, job.zoom_level, False) px_x_max_nobound = to_x_index(x_max_t, stack, job.zoom_level, False) px_y_min_nobound = to_y_index(y_min_t, stack, job.zoom_level, False) px_y_max_nobound = to_y_index(y_max_t, stack, job.zoom_level, False) width = px_x_max_nobound - px_x_min_nobound height = px_y_max_nobound - px_y_min_nobound px_x_offset = abs(px_x_min_nobound) if px_x_min_nobound < 0 else 0 px_y_offset = abs(px_y_min_nobound) if px_y_min_nobound < 0 else 0 # Create a dictionary entry with a simple object bb = BB() bb.px_x_min = px_x_min bb.px_x_max = px_x_max bb.px_y_min = px_y_min bb.px_y_max = px_y_max bb.px_z_min = px_z_min bb.px_z_max = px_z_max bb.px_x_offset = px_x_offset bb.px_y_offset = px_y_offset bb.width = width bb.height = height s_to_bb[stack.id] = bb # Get number of wanted slices px_z_min = to_z_index(job.z_min, job.ref_stack, job.zoom_level) px_z_max = to_z_index(job.z_max, job.ref_stack, job.zoom_level) n_slices = px_z_max + 1 - px_z_min # The images are generated per slice, so most of the following # calculations refer to 2d images. # Each stack to export is treated as a separate channel. The order # of the exported dimensions is XYCZ. This means all the channels of # one slice are exported, then the next slice follows, etc. cropped_stack = [] # Accumulator for estimated result size estimated_total_size = 0 # Iterate over all slices for nz in range(n_slices): for mirror in job.stack_mirrors: stack = mirror.stack bb = s_to_bb[stack.id] # Shortcut for tile width and height tile_width = mirror.tile_width tile_height = mirror.tile_height # Get indices for bounding tiles (0 indexed) tile_x_min = int(bb.px_x_min / tile_width) tile_x_max = int(bb.px_x_max / tile_width) tile_y_min = int(bb.px_y_min / tile_height) tile_y_max = int(bb.px_y_max / tile_height) # Get the number of needed tiles for each direction num_x_tiles = tile_x_max - tile_x_min + 1 num_y_tiles = tile_y_max - tile_y_min + 1 # Associate image parts with all tiles image_parts = [] x_dst = bb.px_x_offset for nx, x in enumerate(range(tile_x_min, tile_x_max + 1)): # The min x,y for the image part in the current tile are 0 # for all tiles except the first one. cur_px_x_min = 0 if nx > 0 else bb.px_x_min - x * tile_width # The max x,y for the image part of current tile are the tile # size minus one except for the last one. if nx < (num_x_tiles - 1): cur_px_x_max = tile_width - 1 else: cur_px_x_max = bb.px_x_max - x * tile_width # Reset y destination component y_dst = bb.px_y_offset for ny, y in enumerate(range(tile_y_min, tile_y_max + 1)): cur_px_y_min = 0 if ny > 0 else bb.px_y_min - y * tile_height if ny < (num_y_tiles - 1): cur_px_y_max = tile_height - 1 else: cur_px_y_max = bb.px_y_max - y * tile_height # Create an image part definition z = bb.px_z_min + nz path = job.get_tile_path(stack, mirror, (x, y, z)) try: part = ImagePart(path, cur_px_x_min, cur_px_x_max, cur_px_y_min, cur_px_y_max, x_dst, y_dst) image_parts.append(part) except: # ignore failed slices pass # Update y component of destination position y_dst += cur_px_y_max - cur_px_y_min # Update x component of destination position x_dst += cur_px_x_max - cur_px_x_min # Write out the image parts and make sure the maximum allowed file # size isn't exceeded. cropped_slice = Image(Geometry(bb.width, bb.height), ColorRGB(0, 0, 0)) for ip in image_parts: # Get (correctly cropped) image image = ip.get_image() # Estimate total file size and abort if this exceeds the # maximum allowed file size. estimated_total_size = estimated_total_size + ip.estimated_size if estimated_total_size > settings.GENERATED_FILES_MAXIMUM_SIZE: raise ValueError("The estimated size of the requested image " "region is larger than the maximum allowed " "file size: %0.2f > %s Bytes" % \ (estimated_total_size, settings.GENERATED_FILES_MAXIMUM_SIZE)) # Draw the image onto result image cropped_slice.composite(image, ip.x_dst, ip.y_dst, co.OverCompositeOp) # Delete tile image - it's not needed anymore del image if cropped_slice: # Optionally, use only a single channel if job.single_channel: cropped_slice.channel(ChannelType.RedChannel) # Add the image to the cropped stack cropped_stack.append(cropped_slice) return cropped_stack