def saveImage(self, data, series, frame): """ save image data as cbf """ path = os.path.join( self.path, self.basename + "_%05d_%05d%s" % (series, frame + 1, self.ftype)) if albula: albula.DImageWriter.write(albula.DImage().fromData(data), path) else: cbf.write(path, data, header=self._createHeader(self.metadata)) print "[OK] wrote %s" % path return path
def test_write(self): for i in range(1000): logging.info('start') test_file = 'test.cbf' # Ensure that file does not exist try: os.remove(test_file) except OSError: pass # 65536 # 66600 cannot compress at all min_number = 0 max_number = 66600 max_number = 600 max_number = 2147483647 # max value int32 # max_number = 2147483648 # max value + 1int32 # will fail numpy_array = numpy.random.randint(min_number, max_number, (500, 400)).astype('int32') numpy_array[0][0] = max_number # numpy_array[0][0] = -1 # numpy_array = numpy.empty((500, 400)).astype('int32') # numpy_array.fill(max_number) print(numpy_array[0][0]) cbf.write(test_file, numpy_array) content = cbf.read(test_file) if not (numpy_array == content.data).all(): print('NOT SAME') print((numpy_array == content.data).sum()) print(content.data) print(content.metadata) print(content.metadata) print(i) self.assertTrue((numpy_array == content.data).all()) # Remove test file os.remove(test_file)
def test_write(self): for i in range(1000): logging.info('start') test_file = 'test.cbf' # Ensure that file does not exist try: os.remove(test_file) except OSError: pass # 65536 # 66600 cannot compress at all min_number = 0 max_number = 66600 max_number = 600 max_number = 2147483647 # max value int32 # max_number = 2147483648 # max value + 1int32 # will fail numpy_array = numpy.random.randint(min_number, max_number, (500, 400)).astype('int32') numpy_array[0][0] = max_number # numpy_array = numpy.empty((500, 400)).astype('int32') # numpy_array.fill(max_number) print(numpy_array[0][0]) cbf.write(test_file, numpy_array) content = cbf.read(test_file) if not (numpy_array == content.data).all(): print('SAME') print((numpy_array == content.data).sum()) print(content.data) print(content.metadata) print(content.metadata) print(i) self.assertTrue((numpy_array == content.data).all())
def saveTable(self, frames, name="", ftype=".dat"): """ save pixel mask, flatfield or LUT """ path = os.path.join( self.path, self.basename + "_%05d_%s%s" % (self.series, name, ftype)) header = json.loads(frames[0].bytes) dtype = np.dtype(header["type"]) data = np.reshape(np.fromstring(frames[1].bytes, dtype=dtype), header["shape"][::-1]) if ftype == ".dat": np.savetxt(path, data) elif ftype == ".cbf": cbf.write(path, data) # self.__getHeader__()) else: raise IOError("file type %s not known. Allowed are .cbf|.dat" % ftype) print "[OK] wrote %s" % path
def savecbf(filename, data): if not isfile(filename): cbf.write(filename, data) else: print("file ", filename, " does already exist!") return None
def readBSLZ4(datafile, headerfile): """ unpack bitshuffle-lz4 compressed frame and return np array image data frame: zmq data blob frame shape: image shape dtype: image data type """ with open(datafile, "r") as f: data = f.read() with open(headerfile, "r") as f: header = json.loads(f.read()) shape = header["shape"] dtype = np.dtype(header["type"]) blob = np.fromstring(data[12:], dtype=np.uint8) # blocksize is big endian uint32 starting at byte 8, divided by element size blocksize = np.ndarray(shape=(), dtype=">u4", buffer=data[8:12]) / dtype.itemsize print blocksize, dtype.itemsize imgData = bitshuffle.decompress_lz4(blob, shape[::-1], dtype, blocksize) print "[OK] unpacked {0} bytes of bs-lz4 data".format(len(imgData)) return imgData if __name__ == "__main__": data = readBSLZ4("/Users/sascha.grimm/Downloads/lcp_100hz/000078_002.raw", "/Users/sascha.grimm/Downloads/lcp_100hz/000078_001.raw") tifffile.imsave("bslz4_16bit.tiff", data) tifffile.imsave("bslz4_32bit.tiff", data.astype("uint32")) cbf.write("bslz4_32bit.cbf", data.astype("uint32"))