def get_image_data(self, z=0, fmt='RGBA', gl_format=gl.GL_RGBA): """Get the image data of this texture. Changes to the returned instance will not be reflected in this texture. :Parameters: `z` : int For 3D textures, the image slice to retrieve. :rtype: :py:class:`~pyglet.image.ImageData` """ gl.glBindTexture(self.target, self.id) # # Always extract complete RGBA data. Could check internalformat # # to only extract used channels. XXX # fmt = 'RGBA' # gl_format = gl.GL_RGBA gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) buffer = (gl.GLubyte * (self.width * self.height * self.images * len(fmt)))() gl.glGetTexImage(self.target, self.level, gl_format, gl.GL_UNSIGNED_BYTE, buffer) data = pyglet.image.ImageData(self.width, self.height, fmt, buffer) if self.images > 1: data = data.get_region(0, z * self.height, self.width, self.height) return data
def cleanUP(self): a = (gl.GLfloat * (args["resolution"]**3))() gl.glBindTexture(gl.GL_TEXTURE_3D, self.A0_tex) gl.glGetTexImage(gl.GL_TEXTURE_3D, 0, gl.GL_RED, gl.GL_FLOAT, a) #self.flip() # This updates the screen, very much important. gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) bufA = np.frombuffer(a, dtype=np.float32) bufA = bufA.reshape( (args["resolution"], args["resolution"], args["resolution"])) #consider casting to float64 args["Aout"] = bufA
def read(self, level: int = 0, alignment: int = 1) -> bytearray: """ Read the contents of the texture. :param int level: The texture level to read :param int alignment: Alignment of the start of each row in memory in number of bytes. Possible values: 1,2,4 :rtype: bytearray """ gl.glBindTexture(self._target, self._glo) gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, alignment) buffer = (gl.GLubyte * (self.width * self.height * self._component_size * self._components))() gl.glGetTexImage(gl.GL_TEXTURE_2D, level, self._format, self._type, buffer) return bytearray(buffer)
def cleanUP(self): a = (gl.GLfloat * (self.dimx * self.dimy * self.dimz))() #need a new way to read out pixels #gl.glReadPixels(0, 0, self.dimx, self.dimy , gl.GL_RGBA, gl.GL_FLOAT, b) #gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.framebufferA1); #gl.glReadPixels(0, 0, self.dimx, self.dimy , gl.GL_RGBA, gl.GL_FLOAT, a) gl.glBindTexture(gl.GL_TEXTURE_3D, self.A1_tex) gl.glGetTexImage(gl.GL_TEXTURE_3D, 0, gl.GL_RED, gl.GL_FLOAT, a) #self.flip() # This updates the screen, very much important. gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) bufA = np.frombuffer(a, dtype=np.float32) bufA = bufA.reshape(args["A"].shape) #consider casting to float64 args["Aout"] = bufA
def get_image_data(self): """Return a pyglet image with the contents of the FBO.""" # props to pyprocessing! self.data = (ctypes.c_ubyte * (self.width * self.height * 4))() gl.glBindTexture( gl.GL_TEXTURE_2D, # target self.texture_id, # texture id ) gl.glGetTexImage( gl.GL_TEXTURE_2D, # target 0, # mipmap level gl.GL_RGBA, # format gl.GL_UNSIGNED_BYTE, # type, self.data, # image data ) return pyglet.image.ImageData(self.width, self.height, 'RGBA', self.data)
def test_float_raw(self): width, height = 8, 4 gpu_format = gl.GL_R32F input_format = gl.GL_RED input_type = gl.GL_FLOAT in_array = np.linspace(0, width * height - 1, num=width * height, dtype="float32") ptr = np.ctypeslib.as_ctypes(in_array) tex = Texture2D() tex.create() tex.bind() gl.glTexImage2D(tex.target, 0, gpu_format, width, height, 0, input_format, input_type, ptr) out_array = np.zeros([height * width], dtype="float32") ptr = np.ctypeslib.as_ctypes(out_array) gl.glGetTexImage(tex.target, 0, gl.GL_RED, gl.GL_FLOAT, ptr) print(in_array) print(out_array)
def main(): t = time.time() parser = argparse.ArgumentParser() parser.add_argument('--tex', help='input tiff file (*.tiff for sequence)', default='') parser.add_argument('--in_box', help='--insize x y z w h d', type=int, nargs=6, default=[0, 0, 0, 0, 0, 0]) parser.add_argument('--code', help='--code "return texture(tex,p).r;"', default='return texture(tex,p).r;') parser.add_argument('--size', help='--size w h d', nargs=3, type=int, default=[0, 0, 0]) parser.add_argument('--seam', help='--seam dd', nargs=1, type=int, default=10) parser.add_argument('--compress', help='compression level', type=int, default=2) parser.add_argument('out_tiff', help='output tiff file', type=str) args = parser.parse_args() seam = args.seam if '*' in args.tex: import os dr = os.path.split(args.tex)[0] + '/' print(dr, os.path.isdir(dr)) seq = tifffile.TiffSequence(args.tex) in_data = seq.asarray() print(in_data.shape, in_data.dtype) print(in_data[0][0][0]) # in_data = in_data.astype(np.uint32) # print(in_data[0][0][0]) elif os.path.isfile(args.tex): in_data = tifffile.imread(args.tex) else: in_data = np.zeros((1, 1, 1), dtype=np.uint32) if len(in_data.shape) != 3: print('Not a 3D grayscale tiff:', in_data.shape) # if len(in_data.shape)==4: # print('Using red channel') in_d, in_h, in_w = in_data.shape in_size = in_w, in_h, in_d bx, by, bz, bw, bh, bd = args.in_box bw = in_w - bx if bw == 0 or bx + bw > in_w else bw bh = in_h - by if bh == 0 or by + bh > in_h else bh bd = in_d - bz if bd == 0 or bz + bd > in_d else bd time_loaded = time.time() print('Loading input {} took {:.2f} seconds'.format( in_size, time_loaded - t)) if (bw, bh, bd) != in_size: print('Clipping to [{},{})x[{},{})x[{},{})'.format( bx, bx + bw, by, by + bh, bz, bz + bd)) in_size = (bw, bh, bd) in_w, in_h, in_d = in_size in_data = in_data[bz:bz + bd, by:by + bh, bx:bx + bw] out_w, out_h, out_d = args.size out_w = in_w if out_w <= 0 else out_w out_h = in_h if out_h <= 0 else out_h out_d = in_d if out_d <= 0 else out_d out_size = (out_w, out_h, out_d) pixPerZ = in_w * in_h zPerBatch = SAFE_TEX_SIZE // pixPerZ - 2 * seam batches = [(i, i + zPerBatch) for i in range(0, in_d, zPerBatch)] setup_framebuffer() setup_render_program(args.code) setup_render_vertexbuffer() time_setup = time.time() print('Setup took {:.2f} seconds'.format(time_setup - time_loaded)) time_buffered = time_setup warned = False out_batches = [] for z0, z1 in batches: z0 = max(0, z0) z1 = min(in_d, z1) z0s = max(0, z0 - seam) z1s = min(in_d, z1 + seam) batch_out_d = out_d * (z1 - z0) / in_d if not warned and batch_out_d != int(batch_out_d) and len(batches) > 1: warned = True print( 'Warning: input/output Z-dimension mismatch while using multiple batches' ) batch_out_d = int(batch_out_d) batch_in_size = (in_w, in_h, z1s - z0s) batch_out_size = (out_w, out_h, batch_out_d) attach_input(in_data[z0s:z1s], batch_in_size) attach_output(batch_out_size) time_transfer = time.time() print('Texture transfer {} took {:.2f} seconds'.format( (z0s, z0, z1, z1s), time_transfer - time_buffered)) render_to_texture(batch_in_size, batch_out_size, (z0 - z0s, z1 - z0s)) time_rendered = time.time() print('Rendering took {:.2f} seconds'.format(time_rendered - time_transfer)) buf = (gl.GLubyte * (out_w * out_h * batch_out_d))() gl.glBindTexture(gl.GL_TEXTURE_3D, rendered_texture) gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) gl.glGetTexImage(gl.GL_TEXTURE_3D, 0, gl.GL_RED, gl.GL_UNSIGNED_BYTE, buf) out_batches.append( np.frombuffer(buf, dtype=np.uint8).reshape( (batch_out_d, out_h, out_w))) time_buffered = time.time() print('Buffering took {:.2f} seconds'.format(time_buffered - time_rendered)) out_data = np.concatenate(out_batches) time_cat = time.time() print('Concatenating took {:.2f} seconds'.format(time_cat - time_buffered)) # export t = time.time() tifffile.imwrite(args.out_tiff, out_data, bigtiff=True, compress=args.compress, photometric='minisblack', metadata={ 'title': str(out_data.shape), 'code': str(args.code) }) time_exported = time.time() print('Exporting took {:.2f} seconds; size {:.2f} MB'.format( time_exported - t, os.path.getsize(args.out_tiff) / 1024**2))