def _loadAnalyzeFile(filename, name, imgObj, task): with f: filename = Future.get(filename) name = name or self.mgr.getUniqueObjName( splitPathExt(filename)[1]) img = imgObj or nibabel.load(filename) dat = dat = np.asanyarray(img.dataobj) hdr = dict(img.get_header()) hdr['filename'] = filename pixdim = hdr['pixdim'] interval = float(pixdim[4]) if interval == 0.0 and len( img.shape) == 4 and img.shape[-1] > 1: interval = 1.0 spacing = vec3(pixdim[1], pixdim[2], pixdim[3]) dat = eidolon.transposeRowsColsNP( dat) # transpose from row-column to column-row obj = self.createObjectFromArray(name, dat, interval, 0, vec3(), rotator(), spacing, task=task) obj.source = hdr f.setObject(obj)
def loadSequence(self, filenames, name=None): fileobjs = [self.loadObject(f) for f in filenames] f = Future() @taskroutine('Loading VTK File Sequence') def _loadSeq(filenames, fileobjs, name, task): with f: fileobjs = list(map(Future.get, fileobjs)) name = name or fileobjs[0].getName() obj = MeshSceneObject(name, [o.datasets[0] for o in fileobjs], self, filenames=filenames) for i, o in enumerate(fileobjs): descdata = o.kwargs['descdata'] if not isinstance(descdata, str) and 'timestep' in descdata: obj.timestepList[i] = int(descdata['timestep']) f.setObject(obj) return self.mgr.runTasks([_loadSeq(filenames, fileobjs, name)], f)
def saveXMLFile(self, filenameprefix, obj, filetype='vtu', setObjArgs=False): def writeArray(xo, mat, **kwargs): with xmltag(xo, 'DataArray', **kwargs) as xo1: o = xo1[1] o.write(' ' * xo1[0]) for n in range(mat.n()): for r in mat.getRow(n): o.write(' ' + str(r)) o.write('\n') def writeNodes(xo, nodes): with xmltag(xo, 'Points') as xo1: with xmltag(xo1, 'DataArray', type="Float32", NumberOfComponents="3", Format="ascii") as xo2: indents = ' ' * xo2[0] o = xo2[1] for n in range(nodes.n()): nn = nodes.getAt(n) o.write('%s%s %s %s\n' % (indents, nn.x(), nn.y(), nn.z())) def writeFields(xo, nodefields, cellfields): if nodefields: with xmltag(xo, 'PointData') as xo1: for df in nodefields: writeArray(xo1, df, type="Float32", Name=df.getName(), NumberOfComponents=df.m(), Format="ascii") if cellfields: with xmltag(xo, 'CellData') as xo1: for df in cellfields: writeArray(xo1, df, type="Float32", Name=df.getName(), NumberOfComponents=df.m(), Format="ascii") f = Future() @taskroutine('Saving VTK XML File') def _saveFile(obj, filenameprefix, filetype, setObjArgs, task): with f: assert filetype in ( 'vtu', ) # TODO: other file types? No real data format need for anything else though some may want vtp dds = obj.datasets filenameprefix = os.path.splitext(filenameprefix)[0] if os.path.isdir(filenameprefix): filenameprefix = os.path.join(filenameprefix, obj.getName()) knowncelltypes = {c[2]: c[1] for c in CellTypes} cellorders = {c[2]: c[3] for c in CellTypes} if len(dds) == 1: filenames = [filenameprefix + '.' + filetype] else: filenames = [ '%s_%.4i.%s' % (filenameprefix, i, filetype) for i in range(len(dds)) ] for fn, ds in zip(filenames, dds): nodes = ds.getNodes() inds = [ i for i in ds.enumIndexSets() if i.getType() in knowncelltypes ] numcells = sum(i.n() for i in inds) numindices = sum(i.n() * i.m() for i in inds) cellfields = [ df for df in ds.enumDataFields() if df.n() == numcells ] nodefields = [ df for df in ds.enumDataFields() if df.n() == nodes.n() ] with open(fn, 'w') as o: o.write('<?xml version="1.0"?>\n') if filetype == 'vtu': with xmltag(o, 'VTKFile', type="UnstructuredGrid", version="0.1", byte_order="BigEndian") as xo: with xmltag(xo, 'UnstructuredGrid') as xo1: with xmltag(xo1, 'Piece', NumberOfPoints=nodes.n(), NumberOfCells=numcells) as xo2: writeNodes(xo2, nodes) # calculate a new indices matrix by combining all those in inds and # reordering the elements to match VTK ordering with xmltag(xo2, 'Cells') as xo3: indices = IndexMatrix( 'indices', numindices) offsets = IndexMatrix( 'offsets', numcells) types = IndexMatrix( 'types', numcells) count = 0 pos = 0 ipos = 0 for ind in inds: typenum = knowncelltypes[ ind.getType()] order = cellorders[ ind.getType()] for n in range(ind.n()): count += ind.m() offsets.setAt( count, pos ) # add element offset types.setAt( typenum, pos ) # add element type pos += 1 # reorder the index values of this element to VTK ordering row = ind.getRow(n) for nn in order: indices.setAt( row[nn], ipos) ipos += 1 writeArray(xo3, indices, type="Int32", Name="connectivity", Format="ascii") writeArray(xo3, offsets, type="Int32", Name="offsets", Format="ascii") writeArray(xo3, types, type="Int32", Name="types", Format="ascii") writeFields(xo2, nodefields, cellfields) if setObjArgs: if len(dds) == 1: args = {'filename': filenames[0]} else: args = {'filenames': filenames} args['descdata'] = '' args['isXML'] = True obj.plugin = self obj.kwargs = args f.setObject(filenames) return self.mgr.runTasks( [_saveFile(obj, filenameprefix, filetype, setObjArgs)], f)
def loadXMLFile(self, filename, name=None): def _get(elem, name): return elem.get(name) or elem.get(name.lower()) def readArray(node, byteorder, compressor): dtype = np.dtype(_get(node, 'type')).newbyteorder(byteorder) if _get(node, 'format') and _get(node, 'format').lower() == 'binary': text = base64.decodestring( node.text)[8:] # skip 8 byte header? if compressor: raise NotImplementedError( "Haven't figured out compression yet") #text=zlib.decompress(text[:24]) # TODO: skip 24 byte header? this refuses to work return np.frombuffer(text, dtype=dtype) else: return np.loadtxt(StringIO(node.text.replace('\n', ' ')), dtype).flatten() def readNodes(nodearray, byteorder, compressor): assert _get(nodearray, 'NumberOfComponents') == '3' arr = readArray(nodearray, byteorder, compressor) nodes = eidolon.Vec3Matrix('nodes', arr.shape[0] / 3) np.asarray(nodes).flat[:] = arr del arr return nodes def readFields(celldata, pointdata, byteorder, compressor): fields = [] celldata = list(celldata) pointdata = list(pointdata) for array in (celldata + pointdata): fname = _get(array, 'Name') width = int(_get(array, 'NumberOfComponents') or 1) arr = readArray(array, byteorder, compressor) mat = eidolon.RealMatrix(fname, arr.shape[0] / width, width) np.asarray(mat).flat[:] = arr del arr fields.append(mat) if array in celldata: mat.meta(StdProps._elemdata, 'True') return fields def yieldConnectedOffsets(conoffsetpair, byteorder, compressor): connect = first(c for c in conoffsetpair if _get(c, 'Name') == 'connectivity') offsets = first(c for c in conoffsetpair if _get(c, 'Name') == 'offsets') start = 0 if connect is not None and len(connect.text.strip()) > 0: connect = readArray(connect, byteorder, compressor).tolist() offsets = readArray(offsets, byteorder, compressor).tolist() for off in offsets: yield connect[start:off] start = off f = Future() @taskroutine('Loading VTK XML File') @eidolon.timing def _loadFile(filename, name, task): basename = name or os.path.basename(filename).split('.')[0] name = uniqueStr( basename, [o.getName() for o in self.mgr.enumSceneObjects()]) ds = None tree = ET.parse(filename) root = tree.getroot() unstruc = root.find('UnstructuredGrid') poly = root.find('PolyData') appended = root.find('AppendedData') compressor = _get(root, 'compressor') byteorder = '<' if root.get( 'byte_order') == 'LittleEndian' else '>' #if appended and _get(appended,'encoding').lower()=='base64': # appended=base64.decodestring(root.find('AppendedData').text) if unstruc is not None: pieces = list(unstruc) points = pieces[0].find('Points') cells = pieces[0].find('Cells') celldata = pieces[0].find('CellData') pointdata = pieces[0].find('PointData') nodearray = points.find('DataArray') if celldata is None: celldata = [] if pointdata is None: pointdata = [] nodes = readNodes(nodearray, byteorder, compressor) connectivity = first( i for i in cells if i.get('Name').lower() == 'connectivity') types = first(i for i in cells if i.get('Name').lower() == 'types') offsets = first(i for i in cells if i.get('Name').lower() == 'offsets') indlist = readArray( connectivity, byteorder, compressor).tolist() # faster as Python list? fields = readFields(celldata, pointdata, byteorder, compressor) celltypes = readArray(types, byteorder, compressor) offlist = readArray(offsets, byteorder, compressor) cellofflist = np.vstack((celltypes, offlist)).T.tolist( ) # pair each cell type entry with its width entry assert len(celltypes) == len(offlist) # map cell type IDs to IndexMatrix objects for containing the indices of that type and node ordering list indmats = { i: (IndexMatrix(n + 'Inds', e, 0, len(s)), s) for n, i, e, s in CellTypes } for celltype, off in cellofflist: indmat, _ = indmats.get(celltype, (None, [])) if indmat is not None: # only found for those cell types we understand (ie. not polygon) indmat.append(*indlist[off - indmat.m():off]) inds = [] for ind, order in indmats.values( ): # collect and reorder all non-empty index matrices if ind.n() > 0: ind[:, :] = np.asarray( ind )[:, order] # reorder columns to match CHeart node ordering inds.append(ind) ds = PyDataSet('vtk', nodes, inds, fields) elif poly is not None: pieces = list(poly) #numPoints=int(_get(pieces[0],'NumberOfPoints') points = pieces[0].find('Points') celldata = pieces[0].find('CellData') pointdata = pieces[0].find('PointData') nodearray = points.find('DataArray') nodes = readNodes(nodearray, byteorder, compressor) inds = [] lines = IndexMatrix('lines', ElemType._Line1NL, 0, 2) tris = IndexMatrix('tris', ElemType._Tri1NL, 0, 3) quads = IndexMatrix('quads', ElemType._Quad1NL, 0, 4) for a, b in yieldConnectedOffsets(pieces[0].find('Lines'), byteorder, compressor): lines.append(a, b) for strip in yieldConnectedOffsets(pieces[0].find('Strips'), byteorder, compressor): for a, b, c in eidolon.successive(strip, 3): tris.append(a, b, c) for poly in yieldConnectedOffsets(pieces[0].find('Polys'), byteorder, compressor): if len(poly) == 2: lines.append(*poly) elif len(poly) == 3: tris.append(*poly) elif len(poly) == 4: quads.append(*poly) # TODO: read in arbitrary polygon and triangulate? if len(lines) > 0: inds.append(lines) if len(tris) > 0: inds.append(tris) if len(quads) > 0: quads[:, :] = np.asarray(quads)[:, CellTypes.Quad[-1]] inds.append(quads) fields = readFields(celldata, pointdata, byteorder, compressor) ds = PyDataSet('vtk', nodes, inds, fields) else: raise NotImplementedError('Dataset not understood yet') f.setObject( MeshSceneObject(name, ds, self, filename=filename, isXML=True, descdata='')) return self.mgr.runTasks([_loadFile(filename, name)], f)
def saveLegacyFile(self, filename, obj, **kwargs): dsindex = kwargs.get('dsindex', 0) ds = obj.datasets[dsindex] if isinstance(obj, MeshSceneObject) else obj datasettype = kwargs.get('datasettype', ds.meta( VTKProps.datasettype)) or DatasetTypes._UNSTRUCTURED_GRID desc = kwargs.get('descStr', ds.meta(VTKProps._desc)).strip() writeFields = kwargs.get('writeFields', True) vecfunc = kwargs.get('vecfunc', tuple) version = 3.0 assert datasettype in DatasetTypes, 'Unsupported dataset type: %s' % datasettype if not desc: if isinstance(obj, MeshSceneObject): desc = repr({ 'desc': 'Eidolon Output For ' + obj.getName(), 'timestep': obj.timestepList[dsindex] }) else: desc = 'Eidolon Output For ' + obj.getName() f = Future() @taskroutine('Saving VTK Legacy File') def _saveFile(filename, ds, datasettype, desc, version, task): with f: nodes = ds.getNodes() with open(filename, 'w') as o: o.write( '# vtk DataFile Version %.1f\n%s\nASCII\nDATASET %s' % (version, desc, datasettype)) if datasettype == DatasetTypes._STRUCTURED_GRID: griddims = eval(ds.meta(VTKProps._griddims)) o.write(' %i %i %i' % griddims) o.write('\n') # write out points o.write('POINTS %i double\n' % nodes.n()) for n in range(nodes.n()): o.write('%f %f %f\n' % vecfunc(nodes.getAt(n))) # write out the extra components for unstructured grids if datasettype == DatasetTypes._UNSTRUCTURED_GRID: cells = [] celltypes = [] for inds in ds.indices.values(): tname, cid, sortinds = first( (n, i, s) for n, i, e, s in CellTypes if e == inds.getType()) or (None, None, None) if tname == CellTypes._Poly: polyinds = ds.getIndexSet( inds.meta(VTKProps._polyinds)) celltypes += [cid] * polyinds.n() for p in range(polyinds.n()): start, end = polyinds.getRow(p) poly = tuple( inds.getAt(i) for i in range(start, end)) cells.append(poly) elif tname != None: #unsortinds=list(reversed(indexList(sortinds,list(reversed(range(len(sortinds))))))) unsortinds = eidolon.indexList( sortinds, list(range(len(sortinds)))) celltypes += [cid] * inds.n() for ind in range(inds.n()): cells.append( eidolon.indexList( unsortinds, inds.getRow(ind))) if len(cells) > 0: o.write( 'CELLS %i %i\n' % (len(cells), sum(len(c) + 1 for c in cells))) for c in cells: o.write(' '.join(map(str, [len(c)] + list(c))) + '\n') o.write('CELL_TYPES %i\n' % len(celltypes)) o.write('\n'.join(map(str, celltypes)) + '\n') # write out fields as POINT_DATA, CELL_DATA is not supported fields = list(ds.fields.values()) if writeFields else [] if len(fields) > 0: o.write('POINT_DATA %i\n' % nodes.n()) for dat in fields: atype = dat.meta( VTKProps._attrtype) or AttrTypes._SCALARS name = dat.getName() if atype in (AttrTypes._SCALARS, AttrTypes._VECTORS, AttrTypes._NORMALS, AttrTypes._TENSORS): o.write( '%s %s float\n' % (atype, name) ) # scalars doesn't preserve any lookup table components elif atype in (AttrTypes._TEXTURE_COORDINATES, AttrTypes._COLOR_SCALARS): dtype = ' float' if atype == AttrTypes._TEXTURE_COORDINATES else '' o.write('%s %s %s\n' % (atype, dat.m(), dtype)) elif atype == AttrTypes._LOOKUP_TABLE: o.write('%s %i\n' % (atype, dat.n())) else: continue # skips field matrices if these get stored for n in range(dat.n()): o.write(' '.join(map(str, dat.getRow(n))) + '\n') f.setObject(filename) return self.mgr.runTasks( [_saveFile(filename, ds, datasettype, desc, version)], f)
def loadLegacyFile(self, filename, name=None, strdata=None): f = Future() @taskroutine('Loading VTK Legacy File') def _loadFile(filename, name, strdata, task): result = self.parseString(strdata or open(filename).read()) basename = name or os.path.basename(filename).split('.')[0] name = uniqueStr( basename, [o.getName() for o in self.mgr.enumSceneObjects()]) version, desc, data = result[:3] pointattrs = [a for a in result[3:] if a[0] == 'POINT_DATA'] cellattrs = [a for a in result[3:] if a[0] == 'CELL_DATA'] ds = None indmats = [] metamap = { VTKProps.desc: desc, VTKProps.version: str(version), VTKProps.datasettype: data[0] } # interpret dataset blocks if data[0] == DatasetTypes._UNSTRUCTURED_GRID: nodes, cells, celltypes = data[1:] # map cell types to the indices of members of `cells' of that type typeindices = {} for i in range(celltypes.n()): typeindices.setdefault(celltypes.getAt(i), []).append(i) for ctype, inds in typeindices.items(): tname, elemtypename, sortinds = first( (n, e, s) for n, i, e, s in CellTypes if i == ctype) or (None, None, None) matname = '' if tname == None else uniqueStr( tname, [i.getName() for i in indmats], '') if tname == CellTypes._Poly: mat = IndexMatrix(matname, elemtypename, 0) polyinds = IndexMatrix(matname + 'Inds', VTKProps._polyinds, 0, 2) mat.meta(VTKProps._polyinds, polyinds.getName()) indmats.append(mat) indmats.append(polyinds) for ind in inds: row = cells.getRow(ind) length = row[0] polyinds.append(mat.n(), mat.n() + length) for r in row[1:length + 1]: mat.append(r) elif tname != None: elemtype = ElemType[elemtypename] mat = IndexMatrix(matname, elemtypename, 0, elemtype.numNodes()) indmats.append(mat) for ind in inds: sortedinds = eidolon.indexList( sortinds, cells.getRow(ind)[1:]) mat.append(*sortedinds) elif data[0] == DatasetTypes._STRUCTURED_GRID: dims, nodes = data[1:] dimx, dimy, dimz = map(int, dims) assert dimx > 1 assert dimy > 1 assert dimz > 1 _, inds = eidolon.generateHexBox(dimx - 2, dimy - 2, dimz - 2) inds = eidolon.listToMatrix(inds, 'hexes') inds.setType(ElemType._Hex1NL) indmats = [inds] metamap[VTKProps._griddims] = repr((dimx, dimy, dimz)) elif data[0] == DatasetTypes._POLYDATA: nodes = data[1] polyelems = data[2:] lines = IndexMatrix('lines', ElemType._Line1NL, 0, 2) tris = IndexMatrix('tris', ElemType._Tri1NL, 0, 3) quads = IndexMatrix('quads', ElemType._Quad1NL, 0, 4) for pname, numelems, numvals, ind in polyelems: n = 0 if pname == 'POLYGONS': while n < ind.n(): polylen = ind.getAt(n) if polylen == 2: lines.append(ind.getAt(n + 1), ind.getAt(n + 2)) elif polylen == 3: tris.append(ind.getAt(n + 1), ind.getAt(n + 2), ind.getAt(n + 3)) elif polylen == 4: quads.append(ind.getAt(n + 1), ind.getAt(n + 2), ind.getAt(n + 4), ind.getAt(n + 3)) n += polylen + 1 if len(tris) > 0: indmats.append(tris) if len(quads) > 0: indmats.append(quads) if len(lines) > 0: indmats.append(lines) else: raise NotImplementedError( 'Dataset type %s not understood yet' % str(data[0])) ds = PyDataSet('vtk', nodes, indmats) for k, v in metamap.items(): ds.meta(k, v) # read attributes into fields for attr in list(pointattrs) + list(cellattrs): for attrtype in attr[2:]: atype = str(attrtype[0]) spatialname = first( ds.indices.keys()) # TODO: choose a better topology if atype == AttrTypes._FIELD: for fname, width, length, dtype, dat in attrtype[3:]: assert (width * length) == dat.n() assert length == nodes.n( ) or length == ds.indices[spatialname].n() dat.setName(fname) dat.setM(width) dat.meta(StdProps._topology, spatialname) dat.meta(StdProps._spatial, spatialname) dat.meta(VTKProps._attrtype, atype) ds.setDataField(dat) else: dat = attrtype[-1] dat.setName(str(attrtype[1])) dat.meta(StdProps._topology, spatialname) dat.meta(StdProps._spatial, spatialname) dat.meta(VTKProps._attrtype, atype) ds.setDataField(dat) if atype in (AttrTypes._NORMALS, AttrTypes._VECTORS): dat.setM(3) elif atype == AttrTypes._LOOKUP_TABLE: dat.setM(4) elif atype == AttrTypes._TENSORS: dat.setM(9) elif atype in (AttrTypes._TEXTURE_COORDINATES, AttrTypes._COLOR_SCALARS): dat.setM(attrtype[2]) elif atype == AttrTypes._SCALARS: if isinstance(attrtype[3], int): dat.setM(attrtype[3]) if attrtype[3] == AttrTypes._LOOKUP_TABLE: dat.meta(AttrTypes._LOOKUP_TABLE, str(attrtype[4])) elif attrtype[4] == AttrTypes._LOOKUP_TABLE: dat.meta(AttrTypes._LOOKUP_TABLE, str(attrtype[5])) try: descdata = eval( desc ) # if desc is a Python object (eg. timestep number) attempt to evaluate it except: descdata = desc # just a normal string f.setObject( MeshSceneObject(name, ds, self, filename=filename, descdata=descdata, result=result)) return self.mgr.runTasks([_loadFile(filename, name, strdata)], f)
def saveObject(self, obj, path, overwrite=False, setFilenames=False, **kwargs): f = Future() @taskroutine('Saving Nifti File') def _saveFile(path, obj, kwargs, task): with f: assert isinstance(obj, ImageSceneObject) if os.path.isdir(path): path = os.path.join(path, obj.getName()) if not overwrite and os.path.exists(path): raise IOError('File already exists: %r' % path) if not eidolon.hasExtension(path, 'nii', 'nii.gz'): path += '.nii' if 'datatype' in kwargs: datatype = kwargs.pop('datatype') elif isinstance(obj.source, dict) and 'datatype' in obj.source: datatype = data_type_codes.dtype[int( obj.source['datatype'])] else: datatype = np.float32 mat = self.getImageObjectArray(obj, datatype) dat = mat['array'] pos = mat['pos'] spacex, spacey, spacez = mat['spacing'] rot = rotator(vec3(0, 0, 1), math.pi) * mat['rot'] * rotator( vec3(0, 0, 1), -halfpi) toffset = mat['toffset'] interval = mat['interval'] affine = np.array(rot.toMatrix()) affine[:, 3] = -pos.x(), -pos.y(), pos.z(), 1.0 dat = eidolon.transposeRowsColsNP( dat) # transpose from row-column to column-row imgobj = nibabel.nifti1.Nifti1Image(dat, affine) # header info: http://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h hdr = { 'pixdim': np.array([ 1.0, spacex, spacey, spacez if spacez != 0.0 else 1.0, interval, 1.0, 1.0, 1.0 ], np.float32), 'toffset': toffset, 'slice_duration': interval, 'xyzt_units': unit_codes['mm'] | unit_codes['msec'], 'qform_code': xform_codes['aligned'], 'sform_code': xform_codes['scanner'], 'datatype': data_type_codes.code[datatype] } hdr.update(kwargs) for k, v in hdr.items(): if k in imgobj.header: imgobj.header[k] = v nibabel.save(imgobj, path) if setFilenames: obj.plugin.removeObject(obj) obj.plugin = self obj.source = dict(nibabel.load(path).get_header()) obj.source['filename'] = path elif isinstance(obj.source, dict) and 'filename' in obj.source: obj.source['filename'] = path f.setObject(imgobj) return self.mgr.runTasks([_saveFile(path, obj, kwargs)], f)
def _loadNiftiFile(filename, name, imgObj, task): with f: filename = Future.get(filename) name = name or self.mgr.getUniqueObjName( splitPathExt(filename)[1]) img = imgObj or nibabel.load(filename) hdr = dict(img.header) hdr['filename'] = filename pixdim = hdr['pixdim'] xyzt_units = hdr['xyzt_units'] x = float(hdr['qoffset_x']) y = float(hdr['qoffset_y']) z = float(hdr['qoffset_z']) b = float(hdr['quatern_b']) c = float(hdr['quatern_c']) d = float(hdr['quatern_d']) toffset = float(hdr['toffset']) interval = float(pixdim[4]) if interval == 0.0 and len( img.shape) == 4 and img.shape[-1] > 1: interval = 1.0 qfac = float(pixdim[0]) or 1.0 spacing = vec3(pixdim[1], pixdim[2], qfac * pixdim[3]) if int(hdr['qform_code']) > 0: position = vec3(-x, -y, z) rot = rotator( -c, b, math.sqrt(max(0, 1.0 - (b * b + c * c + d * d))), -d) * rotator(vec3.Z(), halfpi) else: affine = img.get_affine() position = vec3(-affine[0, 3], -affine[1, 3], affine[2, 3]) rmat = np.asarray([ affine[0, :3] / -spacing.x(), affine[1, :3] / -spacing.y(), affine[2, :3] / spacing.z() ]) rot = rotator(*rmat.flatten().tolist()) * rotator( vec3.Z(), halfpi) xyzunit = xyzt_units & 0x07 # isolate space units with a bitmask of 7 tunit = xyzt_units & 0x38 # isolate time units with a bitmask of 56 if tunit == 0: # if no tunit provided, try to guess if interval < 1.0: tunit = unit_codes['sec'] elif interval > 1000.0: tunit = unit_codes['usec'] # convert to millimeters if xyzunit == unit_codes['meter']: position *= 1000.0 spacing *= 1000.0 elif xyzunit == unit_codes['micron']: position /= 1000.0 spacing /= 1000.0 # convert to milliseconds if tunit == unit_codes['sec']: toffset *= 1000.0 interval *= 1000.0 elif tunit == unit_codes['usec']: toffset /= 1000.0 interval /= 1000.0 dobj = img.dataobj datshape = tuple( d or 1 for d in dobj.shape ) # dimensions are sometimes given as 0 for some reason? # reading file data directly is expected to be faster than using nibabel, specifically by using memmap if filename.endswith('.gz'): dat = img.get_data() #dat=np.asanyarray(dobj) # same as the above # with gzip.open(filename) as o: # TODO: not sure if this is any faster than the above # o.seek(dobj.offset) # seek beyond the header # dat=np.frombuffer(o.read(),dobj.dtype).reshape(datshape,order=dobj.order) else: # mmap the image data below the header in the file dat = np.memmap(dobj.file_like, dobj.dtype, 'r', dobj.offset, datshape, dobj.order) dat = eidolon.transposeRowsColsNP( dat) # transpose from row-column to column-row obj = self.createObjectFromArray(name, dat, interval, toffset, position, rot, spacing, task=task) obj.source = hdr # apply slope since this isn't done automatically when using memmap/gzip if not filename.endswith('.gz'): eidolon.applySlopeIntercept(obj, *img.header.get_slope_inter()) f.setObject(obj)
def _loadFile(filename, name, position=None, rot=None, toffset=None, interval=None, task=None): with f: filename = Future.get(filename) name = name or self.mgr.getUniqueObjName( splitPathExt(filename)[1]) recfile = os.path.splitext(filename)[0] if os.path.exists(recfile + '.rec'): recfile = recfile + '.rec' elif os.path.exists(recfile + '.REC'): recfile = recfile + '.REC' else: raise IOError("Cannot find rec file '%s.rec'" % recfile) geninfo, imginfo = parseParFile(filename) # read par file rec = np.fromfile(recfile, np.uint8) # read rec file # numorients=geninfo[genInfoFields.maxgrad[2]][0] # numslices=geninfo[genInfoFields.maxloc[2]][0] # numsteps=geninfo[genInfoFields.maxphase[2]][0] # slicenum=imgInfoFields.slicenum[-1] # trigger=imgInfoFields.trigger[-1] # numslices=len(set(i[slicenum] for i in imginfo)) # # count the number of times the slice number decreases one slice to the next, this indicates how many times the slice index loops back # numorients=1+sum(1 if imginfo[i][slicenum]>imginfo[i+1][slicenum] else 0 for i in range(len(imginfo)-1)) # # count the number of times the trigger time decreases one slice to the next, this indicates when the images transition between volumes # numvols=1+sum(1 if imginfo[i][trigger]>imginfo[i+1][trigger] else 0 for i in range(len(imginfo)-1))/(numorients*numslices) # if len(imginfo)!=(numvols*numorients*numslices*numsteps): # raise IOError,'Mismatch between stated orient, slice, and step numbers and number of images (%r != %r*%r*%r*%r)'%(len(imginfo),numorients,numslices,numsteps,numvols) # orientsize=len(imginfo)/numorients datasize = 0 objs = [] rpos = 0 typemap = { } # maps type ID to dict mapping dynamic ID to SharedImage lists for imgi in imginfo: # sum up the sizes of each image to compare against the actual size of the rec file w, h = imgi[imgInfoFields.reconres[-1]] pixelsize = imgi[imgInfoFields.imgpix[ -1]] / 8 # convert from bits to bytes datasize += w * h * pixelsize if rec.shape[0] != datasize: raise IOError( 'Rec file incorrect size, should be %i but is %i' % (datasize, rec.shape[0])) for imgi in imginfo: dynamic = imgi[imgInfoFields.dynnum[-1]] itype = imgi[imgInfoFields.imgtypemr[-1]] dims = imgi[imgInfoFields.reconres[-1]] trigger = imgi[imgInfoFields.trigger[-1]] orientation = imgi[imgInfoFields.sliceori[-1]] spacing = imgi[imgInfoFields.pixspace[-1]] offcenter = imgi[imgInfoFields.imgoff[-1]] angulation = imgi[imgInfoFields.imgang[-1]] pixelsize = imgi[imgInfoFields.imgpix[-1]] reslope = imgi[imgInfoFields.rescalesl[-1]] intercept = imgi[imgInfoFields.rescalein[-1]] if itype not in typemap: typemap[itype] = dict() if dynamic not in typemap[itype]: typemap[itype][dynamic] = [] images = typemap[itype][dynamic] dtype = np.dtype('uint' + str(pixelsize)) pos, rot = getTransformFromInfo(offcenter, angulation, orientation, vec3(*spacing), vec3(*dims)) imgsize = dims[0] * dims[1] * dtype.itemsize arr = rec[rpos:rpos + imgsize].view(dtype).reshape(dims) rpos += imgsize if scalemethod in ('dv', 'DV'): arr = (arr.astype(float) * reslope) + intercept # DV scaling method simg = SharedImage(recfile, pos, rot, dims, spacing, trigger) simg.allocateImg('%s_t%i_d%i_img%i' % (name, itype, dynamic, len(images))) #simg.setArrayImg(arr) simg.setMinMaxValues(arr.min(), arr.max()) np.asarray(simg.img)[:, :] = arr images.append(simg) for itype in typemap: for dynamic, images in typemap[itype].items(): vname = '%s_t%i_d%i' % (name, itype, dynamic) source = { 'geninfo': geninfo, 'imginfo': imginfo, 'filename': filename, 'scalemethod': scalemethod, 'loadorder': len(objs) } obj = ImageSceneObject(vname, source, images, self) objs.append(obj) # for numo in range(numorients): # orientimgs=imginfo[numo*orientsize:(numo+1)*orientsize] # # for numv in range(numvols): # volsimgs=[img for i,img in enumerate(orientimgs) if i%numvols==numv] # images=[] # for imgi in volsimgs: # vname='%s_o%i_v%i'%(name,numo,numv) # dims=imgi[imgInfoFields.reconres[-1]] # trigger=imgi[imgInfoFields.trigger[-1]] # orientation=imgi[imgInfoFields.sliceori[-1]] # spacing=imgi[imgInfoFields.pixspace[-1]] # offcenter=imgi[imgInfoFields.imgoff[-1]] # angulation=imgi[imgInfoFields.imgang[-1]] # pixelsize=imgi[imgInfoFields.imgpix[-1]] # # reslope=imgi[imgInfoFields.rescalesl[-1]] # intercept=imgi[imgInfoFields.rescalein[-1]] # # dtype=np.dtype('uint'+str(pixelsize)) # # pos,rot=self._getTransformFromInfo(offcenter,angulation,orientation,vec3(*spacing),vec3(*dims)) # # imgsize=dims[0]*dims[1]*dtype.itemsize # arr=rec[rpos:rpos+imgsize].view(dtype).reshape(dims) # rpos+=imgsize # # if scalemethod in ('dv','DV'): # arr=(arr.astype(float)*reslope)+intercept # DV scaling method # # simg=SharedImage(recfile,pos,rot,dims,spacing,trigger) # simg.allocateImg('%s_img%i'%(vname,len(images))) # simg.setArrayImg(arr) # images.append(simg) # # obj=ImageSceneObject(vname,{'geninfo':geninfo,'imginfo':imginfo,'filename':filename},images,self) # objs.append(obj) assert rpos == rec.shape[0], '%i != %i' % (rpos, rec.shape[0]) f.setObject(objs)