def loadFromFile(self, filename): """ Loads the specified .oif-file and imports data from it. Parameters: filename The .oif-file to be loaded """ dataunit = DataUnit() datasource = InterfileDataSource(filename) dataunit.setDataSource(datasource) return [dataunit]
def loadFromFile(self, filename): """ Loads image from MRC-file and generates own DataSource @param filename Path to MRC-file @return List of tuples of datasets in file (only one) """ self.filename = filename dataUnits = [] imageName = self.getShortName() dataSource = MRCDataSource(filename) dataUnit = DataUnit() dataUnit.setDataSource(dataSource) dataUnits.append((imageName, dataUnit)) return dataUnits
def loadFromFile(self, filename): """ Loads a directory of slices """ self.filename = filename filebase, ext = os.path.splitext(filename) directoryname = os.path.abspath(os.path.dirname(filename)) ext = ext[1:] files = glob.glob(os.path.join(directoryname, "*.%s" % ext)) print "Loading files", files datasource = FileListDataSource.FileListDataSource() datasource.setSlicesPerTimepoint(len(files)) datasource.setFilenames(files) dataunit = DataUnit() dataunit.setDataSource(datasource) settings = dataunit.getSettings() dims = datasource.getDimensions() settings.set("Dimensions", dims) settings.set("Name", os.path.basename(os.path.dirname(filename))) return [dataunit]
class OlympusDataSource(DataSource): """ Olympus OIF files datasource """ def __init__(self, filename = "", channel = -1, \ name = ""): """ Constructor """ DataSource.__init__(self) self.channel = channel if not name: name = "Ch%d" % channel self.bitdepth = 12 self.name = name self.timepoint = 0 self.timepoints = 0 self.filename = filename self.parser = ConfigParser.RawConfigParser() if filename: filepointer = codecs.open(self.convertFileName(filename), "r", "utf-16") self.parser.readfp(filepointer) dataName = self.parser.get("File Info","DataName") if dataName[0] == '"': dataName = dataName[1:] if dataName[-1] == '"': dataName = dataName[:-1] directoryName, self.lutFileName = self.getLUTPath(self.channel) newLut = os.path.join(directoryName, self.lutFileName) newLut = os.path.join(os.path.dirname(self.filename), newLut) if not os.path.exists(newLut): self.path = "%s.files"%self.filename print self.lutFileName else: self.path = os.path.join(os.path.dirname(filename), directoryName) # when lutFileName is e.g. bro28_par3_07-04-18_LUT1.lut, we take the # bro28_par3_07-04-18 and ignore the LUT1.lut, and use that as the basis of the filenames # for the tiff files self.fileNameBase = "_".join(self.lutFileName.split("_")[:-1]) #self.fileNameBase = dataName self.reader = None self.originalScalarRange = (0, 4095) self.scalarRange = 0, 2 ** self.bitdepth - 1 self.dimensions = (0,0,0) self.voxelsize = (1,1,1) self.spacing = None self.emission = 0 self.excitation = 0 self.color = None self.shift = None self.noZ = 0 self.reverseSlices = True if channel >= 0: self.ctf = self.readLUT() self.setPath(filename) # nm = nanometer, um = micrometer, mm = millimeter self.unit_coeffs = {"nm":1e-9, "um":1e-6, "mm":0.001,"ms":0.001} self.shortname = None def setBitDepth(self, bitdepth): """ Set the bit depth of images in this dataunit """ self.bitdepth = bitdepth def setReverseSlices(self, reverseFlag): """ Set a flag indicating whether the slices should be returned in reverse order """ self.reverseSlices = reverseFlag def setEmissionWavelength(self, emission): """ Set the emission wavelength """ self.emission = emission def setExcitationWavelength(self, excitation): """ Set the emission wavelength """ self.excitation = excitation def setTimepoints(self, timepoints): """ Set the number of timepoints in the dataset """ self.timepoints = timepoints def setDimensions(self, dimensions): """ Set the dimensions of the data read by this reader """ self.dimensions = dimensions def setVoxelSize(self, voxelSize): """ Set the voxel size of a single voxel in this dataset """ self.voxelsize = voxelSize def getDataSetCount(self): """ Returns the number of individual DataSets (=time points) managed by this DataSource """ if not self.timepoints: return 1 return self.timepoints def getEmissionWavelength(self): """ Returns the emission wavelength used to image this channel managed by this DataSource """ return self.emission def getExcitationWavelength(self): """ Returns the excitation wavelength used to image the channel managed by this DataSource """ return self.excitation def getFileName(self): """ Return the file name """ return self.filename def getDataSet(self, i, raw = 0): """ Returns the image data for timepoint i """ self.setCurrentTimepoint(i) data = self.getTimepoint(i) if raw: return data if not self.originalScalarRange: self.originalScalarRange = 0, (2 ** self.getBitDepth()) - 1 data = self.getResampledData(data, i) data = self.getIntensityScaledData(data) return data def getTimepoint(self, timepointIndex): """ Return the timepointIndexth timepoint """ self.timepoint = timepointIndex path = self.path[:] if not self.reader: self.reader = vtkbxd.vtkExtTIFFReader() self.reader.AddObserver("ProgressEvent", lib.messenger.send) lib.messenger.connect(self.reader, 'ProgressEvent', self.updateProgress) xDimension, yDimension, zDimension = self.dimensions self.reader.SetDataExtent(0, xDimension - 1, 0, yDimension - 1, 0, zDimension - 1) spacing = self.getSpacing() if spacing[2] == 0: spacing[2] = 1.0 self.reader.SetDataSpacing(*spacing) zpat = "" tpat = "" cpat = os.path.sep + "%s_C%.3d" % (self.fileNameBase, self.channel) path += cpat if self.dimensions[2] > 1: zpat = "Z%.3d" if self.timepoints > 1: tpat = "T%.3d"%(timepointIndex+1) pat = path + zpat + tpat + ".tif" self.reader.SetFilePattern(self.convertFileName(pat)) if self.reverseSlices and 0: #print "offset=",self.dimensions[2] self.reader.SetFileNameSliceOffset(self.dimensions[2]) self.reader.SetFileNameSliceSpacing(-1) else: self.reader.SetFileNameSliceOffset(1) self.reader.UpdateInformation() return self.reader.GetOutput() def internalGetDimensions(self): """ get the dimensions for this dataset, used by the getDimensions() """ return self.dimensions def readLUT(self): """ Read the LUT for this dataset """ lutFile = os.path.join(self.path, self.lutFileName) fileh = codecs.open(self.convertFileName(lutFile), "r", "utf-16") while 1: line = fileh.readline() if "ColorLUTData" in line: break fileh.close() fileh = open(self.convertFileName(lutFile), "rb") fileh.seek(-4 * 65536, 2) data = fileh.read() format = "i" * 65536 values = struct.unpack(format, data) ctf = vtk.vtkColorTransferFunction() vals = [( ((x >> 16) & 0xff), ((x >> 8) & 0xff), (x & 0xff)) for x in values] i = 0 coeff = 16.0 if self.explicitScale == 1: minval, maxval = self.originalScalarRange if self.intensityShift: maxval += self.intensityShift scale = self.intensityScale if not scale: scale = 255.0 / maxval maxval *= scale self.scalarRange = (0, maxval) self.bitdepth = int(math.log(maxval + 1, 2)) self.explicitScale = 2 else: minval, maxval = self.scalarRange coeff = 65535.0 / maxval red0, green0, blue0 = -1, -1, -1 for i in range(0, maxval + 1): red, green, blue = vals[int(i * coeff)] if i == maxval or red != red0 or green != green0 or blue != blue0: ctf.AddRGBPoint(i, red / 255.0, green / 255.0, blue / 255.0) red0, green0, blue0 = red, green, blue return ctf def getSpacing(self): """ Returns the spacing of the datasets this dataunit contains """ if not self.spacing: aDimension, bDimension, cDimension = self.getVoxelSize() if cDimension == 0: cDimension = aDimension self.spacing = [1, bDimension / aDimension, cDimension / aDimension] return self.spacing def getVoxelSize(self): """ Returns the voxel size of the datasets this dataunit contains """ if not self.voxelsize: xDimension, yDimension, zDimension, timepoint, channel, \ voxelXDimension, voxelYDimension, voxelZDimension = self.getAllDimensions(self.parser) self.voxelsize = (voxelXDimension, voxelYDimension, voxelZDimension) #print "Got voxel size=",self.voxelsize return self.voxelsize def getAllDimensions(self, parser): """ Read the number of timepoints, channels and XYZ from the OIF file """ timepoints = 0 channels = 0 xDimension = 0 yDimension = 0 zDimension = 1 timeStep = 1 for i in range(0, 7): sect = "Axis %d Parameters Common" % i key = "AxisCode" data = parser.get(sect, key) # If Axis i is the time axis n = int(parser.get(sect, "MaxSize")) unit = parser.get(sect, "UnitName") unit = unit.replace('"', "") startPosition = parser.get(sect, "StartPosition") endPosition = parser.get(sect, "EndPosition") startPosition = startPosition.replace('"', '') endPosition = endPosition.replace('"', '') startpos = float(startPosition) endpos = float(endPosition) if endpos < startpos: self.reverseSlices = 1 diff = abs(endpos - startpos) if unit in self.unit_coeffs: coeff = self.unit_coeffs[unit] diff *= coeff if data == '"T"': if n == 0: timepoints = 1 timeStep = 0.0 else: timepoints = n timeStep = diff/n elif data == '"C"': channels = n elif data == '"X"': xDimension = n voxelXDimension = diff elif data == '"Y"': yDimension = n voxelYDimension = diff elif data == '"Z"': zDimension = n voxelZDimension = diff if zDimension == 0: zDimension = 1 self.noZ = 1 voxelXDimension /= float(xDimension) voxelYDimension /= float(yDimension) if zDimension > 1: voxelZDimension /= float(zDimension - 1) self.originalDimensions = (xDimension, yDimension, zDimension) return xDimension, yDimension, zDimension, timepoints, channels, \ voxelXDimension, voxelYDimension, voxelZDimension, timeStep def getLUTPath(self, channel): """ Read the path and filename for the LUT file of given channel which can also be used for the paths of the TIFF files """ path = self.parser.get("ProfileSaveInfo", "LutFileName%d"%(channel-1)) lutPath, lutFileName = path.split("\\") if lutPath[0]=='"': lutPath = lutPath[1:] if lutFileName[0]=='"': lutFileName = lutFileName[1:] if lutFileName[-1]=='"': lutFileName = lutFileName[:-1] return lutPath, lutFileName def getDyes(self, parser, numberOfChannels): """ Read the dye names for numberOfChannels channels """ names = [] exs = [] ems = [] for i in range(1, numberOfChannels + 1): sect = "Channel %d Parameters" % i data = parser.get(sect, "DyeName") data = data.replace('"', "") emission = int(parser.get(sect, "EmissionWavelength")) excitation = int(parser.get(sect, "ExcitationWavelength")) names.append(data) exs.append(excitation) ems.append(emission) return names, (exs, ems) def loadFromFile(self, filename): """ Loads the specified .oif-file and imports data from it. Parameters: filename The .oif-file to be loaded """ self.filename = filename self.path = os.path.dirname(filename) try: fileh = open(self.convertFileName(filename)) fileh.close() except IOError, ex: Logging.error("Failed to open Olympus OIF File", "Failed to open file %s for reading: %s" % (filename, str(ex))) filepointer = codecs.open(self.convertFileName(filename), "r", "utf-16") self.parser.readfp(filepointer) xDimension, yDimension, zDimension, timepoints, \ channels, voxelXDimension, voxelYDimension, voxelZDimension, timeStep = self.getAllDimensions(self.parser) voxsiz = (voxelXDimension, voxelYDimension, voxelZDimension) names, (excitations, emissions) = self.getDyes(self.parser, channels) self.bitdepth = int(self.parser.get("Reference Image Parameter", "ValidBitCounts")) dataunits = [] for channel in range(1, channels + 1): name = names[channel - 1] excitation = excitations[channel - 1] emission = emissions[channel - 1] datasource = OlympusDataSource(filename, channel, name = name) datasource.setDimensions((xDimension, yDimension, zDimension)) datasource.setTimepoints(timepoints) stamps = [] for i in range(0, timepoints): stamps.append(i*timeStep) datasource.setTimeStamps(stamps) datasource.setAbsoluteTimeStamps(stamps) datasource.setVoxelSize(voxsiz) datasource.setReverseSlices(self.reverseSlices) datasource.setEmissionWavelength(emission) datasource.setExcitationWavelength(excitation) datasource.setBitDepth(self.bitdepth) datasource.originalDimensions = (xDimension, yDimension, zDimension) dataunit = DataUnit() dataunit.setDataSource(datasource) dataunits.append(dataunit) return dataunits
def loadFromFile(self, filename): """ Loads the specified .bxc-file and imports data from it. Also returns a DataUnit of the type stored in the loaded .bxc-file or None if something goes wrong. The dataunit is returned in a list with one item for interoperability with LSM data source """ if not self.baseFilename: self.baseFilename = filename self.shortname = os.path.basename(filename) dataUnitFormat = self.loadBxdFile(filename) Logging.info("format of unit = ", dataUnitFormat, kw = "datasource") if (not dataUnitFormat) or (not self.parser): Logging.info("No dataUnitFormat or parser: %s and %s"%(dataUnitFormat, self.parser), kw = "datasource") return None # Then, the number of datasets/timepoints that belong to this dataset # series try: count = self.parser.get("ImageData", "numberOfFiles") except ConfigParser.NoOptionError: count = self.parser.get("ImageData", "numberoffiles") Logging.info("format = ", dataUnitFormat, "count = ", count, kw = "datasource") # Then read the .vti-filenames and store them in the dataSets-list: filedir = os.path.dirname(filename) hasPolydata = self.parser.has_section("PolyData") for i in range(int(count)): currentFile = "file_%d"%i filename = self.parser.get("ImageData", currentFile) if hasPolydata: print "GOT polydata" polyFileName = self.parser.get("PolyData", currentFile) self.polyDataFiles.append(polyFileName) reader = vtk.vtkXMLImageDataReader() filepath = os.path.join(filedir, filename) if not reader.CanReadFile(filepath): Logging.error("Cannot read file", "Cannot read source XML Image Data File %s"%filename) return self.dataSets.append(filename) # If everything went well, we create a new DataUnit-instance of the # correct subclass, so that the DataUnit-instance can take over and # resume data processing. First, we return the DataUnit to the caller, # so it can set a reference to it: dataunit = DataUnit() settings = DataUnitSettings() settings.setType("") settings = settings.readFrom(self.parser) self.originalDimensions = eval(settings.get("Dimensions")) self.settings = settings dataunit.setDataSource(self) dataunit.setSettings(settings) data = dataunit.getTimepoint(0) dataunits = [dataunit] if data.GetNumberOfScalarComponents() == 3: for i in range(0, 3) : dataSource = RGBComponentDataSource(self, i) dataunit = DataUnit() dataunit.setDataSource(dataSource) settings = DataUnitSettings() settings = settings.readFrom(self.parser) dataunit.setSettings(settings) dataunits.append(dataunit) return dataunits
class LeicaDataSource(DataSource): """ Leica format datasource """ def __init__(self, filename = "", experiment = "", channel = -1): """ Constructor """ DataSource.__init__(self) self.filename = filename self.reader = LeicaExperiment(filename, progressCallback = self.updateProgress) self.shortname = os.path.basename(filename) self.experiment = experiment if experiment: self.originalDimensions = self.reader.GetDimensions(self.experiment) duration = self.reader.GetDuration(self.experiment) numT = self.reader.GetNumberOfTimepoints(self.experiment) print "Number of Timepoints = ",numT print "Duration=",duration step = duration/float(numT) stamps = [] for t in range(0, numT): stamps.append(step*t) self.setTimeStamps(stamps) self.setAbsoluteTimeStamps(stamps) self.channel = channel self.dimensions = None self.voxelsize = None self.spacing = None self.color = None self.ctf = None self.setPath(filename) self.datasetCount = None def getDataSetCount(self): """ Returns the number of individual DataSets (=time points) managed by this DataSource """ if not self.datasetCount: self.datasetCount = self.reader.GetNumberOfTimepoints(self.experiment) return self.datasetCount def getFileName(self): """ Return the file name """ return self.filename def getDataSet(self, i, raw = 0): """ Returns the DataSet at the specified index Parameters: i The index """ self.setCurrentTimepoint(i) self.timepoint = i data = self.reader.GetTimepoint(self.experiment, self.channel, i) return self.getResampledData(data, i) def internalGetDimensions(self): """ Returns the (x,y,z) dimensions of the datasets this dataunit contains """ return self.reader.GetDimensions(self.experiment) def getSpacing(self): """ Returns the spacing of the datasets this dataunit contains """ if not self.spacing: a, b, c = self.getVoxelSize() self.spacing = [1, b / a, c / a] return self.spacing def getVoxelSize(self): """ Returns the voxel size of the datasets this dataunit contains """ if not self.voxelsize: self.voxelsize = self.reader.GetVoxelSize(self.experiment) return self.voxelsize def getScalarRange(self): """ Return scalar range of data """ self.getBitDepth() self.scalarRange = (0, 2 ** self.bitdepth - 1) return self.scalarRange def loadFromFile(self, filename): """ Loads the specified .txt-file and imports data from it. Parameters: filename The .txt-file to be loaded """ exts = filename.split(".") if exts[-1].lower()=="lei": filename = ".".join(exts[:-1]) if os.path.exists(filename+".txt"): filename+=".txt" elif os.path.exists(filename+".TXT"): filename+=".TXT" elif os.path.exists(filename+".Txt"): filename+=".Txt" self.filename = filename self.path = os.path.dirname(filename) self.reader.setFileName(filename) try: f = open(filename) f.close() except IOError, ex: Logging.error("Failed to open Leica File", "Failed to open file %s for reading: %s" % (filename, str(ex))) self.reader.Read() dataunits = [] experiments = self.reader.GetExperiments() for experiment in experiments: if experiment in self.reader.nonExistent: continue channelNum = self.reader.GetNumberOfChannels(experiment) #print "There are %d channels in %s"%(channelNum,filename) for i in range(channelNum): # We create a datasource with specific channel number that # we can associate with the dataunit datasource = LeicaDataSource(filename, experiment, i) dataunit = DataUnit() dataunit.setDataSource(datasource) dataunits.append((experiment, dataunit)) return dataunits
self.reader.SetFileName(self.convertFileName(filename)) #self.reader.Update() self.reader.UpdateInformation() dataunits = [] channelNum = self.reader.GetNumberOfChannels() self.timepointAmnt = channelNum Logging.info("There are %d channels" % channelNum, kw="lsmreader") for i in range(channelNum): # We create a datasource with specific channel number that # we can associate with the dataunit datasource = LsmDataSource(filename, i) datasource.setPath(filename) dataunit = DataUnit() dataunit.setDataSource(datasource) dataunits.append(dataunit) return dataunits def getColorTransferFunction(self): """ Returns the ctf of the dataset series which this datasource operates on """ if not self.ctf: Logging.info("Using ctf based on LSM Color", kw="lsmreader") ctf = vtk.vtkColorTransferFunction() r = self.reader.GetChannelColorComponent(self.channelNum, 0) g = self.reader.GetChannelColorComponent(self.channelNum, 1)