def _verifyEdf(self, subdir, multipage, outdata, outlabels): from PyMca5.PyMcaIO import EdfFile ext = '.edf' if multipage: filename = os.path.join(subdir, 'sample_dataset' + ext) f = EdfFile.EdfFile(filename) edfdata = { f.GetHeader(i)['Title']: f.GetData(i) for i in range(f.GetNumImages()) } for group, datadict in outdata.items(): if group not in outlabels['title']: continue if not multipage: edfdata = {} for label, data in datadict.items(): suffix = outlabels['filename'][group].get(label, None) if not suffix: continue filename = os.path.join( subdir, 'sample_dataset_{}{}'.format(suffix, ext)) f = EdfFile.EdfFile(filename) edfdata[f.GetHeader(0)['Title']] = f.GetData(0) for label, data in datadict.items(): edflabel = outlabels['title'][group].get(label, None) if edflabel: numpy.testing.assert_array_equal( data, edfdata[edflabel], '{}: {}'.format(group, label))
def getPixmap(): """ Open an image file and return the filename and the data. Return ``None, None`` in case of failure. """ fileTypeList = [ 'Picture Files (*jpg *jpeg *tif *tiff *png)', 'EDF Files (*edf)', 'EDF Files (*ccd)', 'ADSC Files (*img)', 'EDF Files (*)' ] fileList, filterUsed = PyMcaFileDialogs.getFileList( parent=None, filetypelist=fileTypeList, message="Please select one object data file", mode="OPEN", getfilter=True) if not fileList: return None, None fname = fileList[0] if filterUsed.split()[0] == "Picture": qimage = qt.QImage(fname) if qimage.isNull(): msg = qt.QMessageBox() msg.setIcon(qt.QMessageBox.Critical) msg.setText("Cannot read file %s as an image" % fname) msg.exec() return None, None return os.path.basename(fname), convertQImageToArray(qimage) if filterUsed.split()[0] in ["EDF", "ADSC"]: edf = EdfFile.EdfFile(fname) data = edf.GetData(0) return os.path.basename(fname), data return None, None
def getMesh(): """ Read an image data file (EDF, ADSC), return the data and image name. This is then used to display the image as a height map. Returns *None, None* if the file dialog is cancelled or loaing fails. :return: legend, data """ fileTypeList = [ 'EDF Files (*edf)', 'EDF Files (*ccd)', 'ADSC Files (*img)', 'All Files (*)' ] old = PyMcaFileDialogs.PyMcaDirs.nativeFileDialogs * 1 PyMcaFileDialogs.PyMcaDirs.nativeFileDialogs = False fileList, filterUsed = PyMcaFileDialogs.getFileList( parent=None, filetypelist=fileTypeList, message="Please select one object data file", mode="OPEN", getfilter=True) PyMcaFileDialogs.PyMcaDirs.nativeFileDialogs = old if not fileList: return None, None filename = fileList[0] edf = EdfFile.EdfFile(filename, access='rb') data = edf.GetData(0).astype(numpy.float32) return os.path.basename(filename), data
def save2DArrayListAsEDF(datalist, filename, labels=None, dtype=None): if type(datalist) != type([]): datalist = [datalist] ndata = len(datalist) if os.path.exists(filename): try: os.remove(filename) except OSError: pass if labels is None: labels = [] for i in range(ndata): labels.append("Array_%d" % i) if len(labels) != ndata: raise ValueError("Incorrect number of labels") edfout = EdfFile.EdfFile(filename, access="ab") for i in range(ndata): if dtype is None: edfout.WriteImage({'Title': labels[i]}, datalist[i], Append=1) else: edfout.WriteImage({'Title': labels[i]}, datalist[i].astype(dtype), Append=1) del edfout # force file close
def processList(self): self.__ncols = None self.__nrows = self.filestep counter = 0 ffile = SpecFileLayer.SpecFileLayer() for fitfile in self._filelist: self.onNewFile(fitfile, self._filelist) ffile.SetSource(fitfile) fileinfo = ffile.GetSourceInfo() # nscans = len(fileinfo['KeyList']) for scankey in fileinfo['KeyList']: scan,order = scankey.split(".") info,data = ffile.LoadSource(scankey) scan_obj = ffile.Source.select(scankey) if info['NbMca'] > 0: for i in range(info['NbMca']): point = int(i/info['NbMcaDet']) + 1 mca = (i % info['NbMcaDet']) + 1 key = "%s.%s.%05d.%d" % (scan,order,point,mca) if i == 0: mcainfo,mcadata = ffile.LoadSource(key) mcadata = scan_obj.mca(i+1) y0 = numpy.array(mcadata, numpy.float) if counter == 0: key0 = "%s key %s" % (os.path.basename(fitfile), key) self.__ncols = len(y0) image = numpy.zeros((self.__nrows,self.__ncols), \ numpy.float) if self.__ncols != len(y0): print("spectrum has different number of columns") print("skipping it") else: image[counter,:] = y0[:] if (counter+1) == self.filestep: if self.filestep > 1: key1 = "%s key %s" % (os.path.basename(fitfile), key) title = "%s to %s" % (key0, key1) else: title = key0 if 1: ddict={} if 'Channel0' in mcainfo: ddict['MCA start ch'] =\ int(mcainfo['Channel0']) if 'McaCalib' in mcainfo: ddict['MCA a'] = mcainfo['McaCalib'][0] ddict['MCA b'] = mcainfo['McaCalib'][1] ddict['MCA c'] = mcainfo['McaCalib'][2] else: ddict = mcainfo ddict['Title'] = title edfname = os.path.join(self.outputdir,title.replace(" ","_")+".edf") edfout = EdfFile.EdfFile(edfname) edfout.WriteImage (ddict , image, Append=0) counter = 0 else: counter += 1 self.onEnd()
def SetSource(self, source_name=None, source_obj=None): """ Sets a new source for data retrieving, an edf file. If the file exists, self.Source will be the EdfFile object associated to this file. Parameters: source_name: name of the edf file """ if source_name == self.SourceName: return 1 if (type(source_name) != type([])): source_name = [source_name] if (source_name is not None): if source_obj is not None: self.Source = source_obj else: if (type(source_name) == type([])): if DEBUG: print("List of files") self.Source = [] for name in source_name: try: self.Source.append( EdfFile.EdfFile(name, fastedf=self.fastedf)) except: #print("EdfFileLayer.SetSource: Error trying to read EDF file %s" % name) self.Source.append(None) else: try: self.Source = EdfFile.EdfFile(source_name, fastedf=self.fastedf) except: #print("EdfFileLayer.SetSource: Error trying to read EDF file") self.Source = None else: self.Source = None self.SourceInfo = None if self.Source is None: self.SourceName = None return 0 else: self.SourceName = "" for name in source_name: if self.SourceName != "": self.SourceName += "|" self.SourceName += name return 1
def load_frame(self, image_foldername, scan_no, frame_no, gz_compressed=True, normalize=False, monitor_name=None, monitor_names=None, remove_rows=None, remove_cols=None): header = dict() for line in self.spec_file[scan_no].header.split('\n'): header[line.split(' ')[0]] = line[len(line.split(' ')[0]):] frames = self.get_no_frames(scan_no) first_frame = int(header['#UCCD'].split('#r')[-1].split('.')[0]) img_folder = header['#UCCD'].split('/')[-2] + '/' filename_template = header['#UCCD'].split('/')[-1].split( '#r')[0] + '#r.' + header['#UCCD'].split('/')[-1].split('.')[-1] if (gz_compressed): filename_template = filename_template.replace('.edf', '.edf.gz') if (frame_no >= frames): raise IndexError("Frame number does not exist.") img_filename = os.path.join( image_foldername, img_folder, filename_template.replace('#n', str(scan_no).zfill(3)).replace( '#p', str(frame_no).zfill(3)).replace( '#r', str(first_frame + frame_no).zfill(3))) edf = EdfFile.EdfFile(img_filename, 'r') the_img = np.array(edf.GetData(0), dtype='float') if (remove_rows != None): the_img = np.delete(the_img, remove_rows, axis=0) if (remove_cols != None): the_img = np.delete(the_img, remove_cols, axis=1) if (normalize): if (monitor_name): mon_count = float( getattr(self.spec_file[scan_no], monitor_name)[frame_no]) the_img /= mon_count if (monitor_names): for mon_name in monitor_names: mon_count = float( getattr(self.spec_file[scan_no], mon_name)[frame_no]) the_img /= mon_count return the_img return the_img
def openEdf(filename, read=0, write=0, force=0): if read: checkEdfForRead(filename) if write: checkEdfForWrite(filename, force) try: edf = EdfFile.EdfFile(filename) except: raise XiaEdfError("Cannot open EDF file <%s>" % filename) return edf
def __init__(self, filename): edf = EdfFile.EdfFile(filename, 'r') self.header = edf.GetHeader(0) self.motors = dict() motor_mne = self.header['motor_mne'].split() motor_pos = self.header['motor_pos'].split() for i in xrange(len(motor_mne)): self.motors[motor_mne[i]] = float(motor_pos[i]) self.counters = dict() counter_mne = self.header['counter_mne'].split() counter_pos = self.header['counter_pos'].split() for i in xrange(len(counter_mne)): self.counters[counter_mne[i]] = float(counter_pos[i]) self.img = np.array(edf.GetData(0))
def _mergeEdf(self, parts, outfilename): for i, edfname in enumerate(parts): edf = EdfFile.EdfFile(edfname, access='rb', fastedf=0) nImages = edf.GetNumImages() if i == 0: images = [edf.GetData(j).copy() for j in range(nImages)] headers = [{ 'Title': edf.GetHeader(j)['Title'] } for j in range(nImages)] else: headersi = [{ 'Title': edf.GetHeader(j)['Title'] } for j in range(nImages)] for header, img in zip(headers, images): k = headersi.index(header) self._fillPartial(img, edf.GetData(k)) del edf if os.path.exists(outfilename): _logger.debug("Output file already exists, trying to delete it") os.remove(outfilename) edfout = EdfFile.EdfFile(outfilename, access="ab") for i, (img, header) in enumerate(zip(images, headers)): edfout.WriteImage(header, img, Append=i > 0) del edfout
def _parseEdfResults(self, filenames): """ :param list filenames: :returns tuple: list(nparams), ndarray(nparams, nrows, ncolumns) """ labels = [] data = [] from PyMca5.PyMcaIO import EdfFile for filename in filenames: # REMARK: each file can contain multiple images (roifit) #stack = EDFStack.EDFStack(filename) stack = EdfFile.EdfFile(filename) for i in range(stack.GetNumImages()): data.append(stack.GetData(i)) labels.append(stack.GetHeader(i)['Title']) return labels, numpy.asarray(data)
def __init__(self, filename): edf = EdfFile.EdfFile(filename, 'r') self.header = edf.GetHeader(0) self.motors = dict() motor_mne = self.header['motor_mne'].split() motor_pos = self.header['motor_pos'].split() for i in xrange(len(motor_mne)): self.motors[motor_mne[i]] = float(motor_pos[i]) self.counters = dict() counter_mne = self.header['counter_mne'].split() counter_pos = self.header['counter_pos'].split() for i in xrange(len(counter_mne)): self.counters[counter_mne[i]] = float(counter_pos[i]) self.img = np.array(edf.GetData(0)) # detector was mounted rotated by 90deg to the right self.img = np.rot90(self.img, 3)
def getObject3DInstance(config=None): fileTypeList = [ 'Picture Files (*jpg *jpeg *tif *tiff *png)', 'EDF Files (*edf)', 'EDF Files (*ccd)', 'ADSC Files (*img)', 'EDF Files (*)' ] fileList, filterUsed = Object3DFileDialogs.getFileList( None, filetypelist=fileTypeList, message="Please select one object data file", mode="OPEN", getfilter=True) if not len(fileList): return fname = fileList[0] if filterUsed.split()[0] == "Picture": qimage = qt.QImage(fname) if qimage.isNull(): msg = qt.QMessageBox(self) msg.setIcon(qt.QMessageBox.Critical) msg.setText("Cannot read file %s as an image" % fname) msg.exec_() return object3D = Object3DPixmap(os.path.basename(fname)) object3D.setQImage(qimage) return object3D if filterUsed.split()[0] in ["EDF", "ADSC"]: edf = EdfFile.EdfFile(fname) data = edf.GetData(0) if True: object3D = Object3DPixmap(os.path.basename(fname)) object3D.setImageData(data) else: (image, size, minmax) = spslut.transform(data, (1, 0), (spslut.LINEAR, 3.0), "RGBX", spslut.TEMP, 1, (0, 1), (0, 255), 1) object3D = Object3DPixmap(os.path.basename(fname)) object3D.setPixmap(image, size[0], size[1], xmirror=False, ymirror=False) return object3D return None
def getObject3DInstance(config=None): #for the time being a former configuration #for serializing purposes is not implemented #I do the import here for the case PyMca is not installed #because the modules could be instanstiated without using #this method try: from PyMca5.PyMcaIO import EdfFile except ImportError: import EdfFile fileTypeList = ['EDF Files (*edf)', 'EDF Files (*ccd)', 'ADSC Files (*img)', 'All Files (*)'] old = PyMcaFileDialogs.PyMcaDirs.nativeFileDialogs * 1 PyMcaFileDialogs.PyMcaDirs.nativeFileDialogs = False fileList, filterUsed = PyMcaFileDialogs.getFileList( parent=None, filetypelist=fileTypeList, message="Please select one object data file", mode="OPEN", getfilter=True) PyMcaFileDialogs.PyMcaDirs.nativeFileDialogs = old if not len(fileList): return None if filterUsed == fileTypeList[0]: fileindex = 2 else: fileindex = 1 #file index is irrelevant in case of an actual 3D stack. filename = fileList[0] legend = os.path.basename(filename) edf = EdfFile.EdfFile(filename, access='rb') data = edf.GetData(0).astype(numpy.float32) object3D = Object3DMesh(os.path.basename(filename)) object3D.setData(data, z=data[:]) return object3D
l3.setText(" " + ticks[i]) qApp = qt.QApplication.instance() qApp.processEvents() time.sleep(2) msg.close() result = sthread._result del sthread self.raise_() return result if __name__ == "__main__": import os from PyMca5.PyMcaIO import EdfFile app = qt.QApplication([]) app.lastWindowClosed.connect(app.quit) d = NNMADialog() imageList = [] for t in ["mix1.edf", "mix2.edf", "mix3.edf"]: fname = os.path.join(os.path.dirname(__file__), "tests", t) if not os.path.exists(fname): break edf = EdfFile.EdfFile(fname) data = edf.GetData(0) edf = None imageList.append(data) if len(imageList): d.setData(imageList) d.show() app.exec_()
def loadFileList(self, filelist, fileindex=0): if type(filelist) == type(''): filelist = [filelist] self.__keyList = [] self.sourceName = filelist self.__indexedStack = True self.sourceType = SOURCE_TYPE self.info = {} self.nbFiles = len(filelist) #read first edf file #get information tempEdf = EdfFileDataSource.EdfFileDataSource(filelist[0]) keylist = tempEdf.getSourceInfo()['KeyList'] nImages = len(keylist) dataObject = tempEdf.getDataObject(keylist[0]) self.info.update(dataObject.info) if len(dataObject.data.shape) == 3: #this is already a stack self.data = dataObject.data self.__nFiles = 1 self.__nImagesPerFile = nImages shape = self.data.shape for i in range(len(shape)): key = 'Dim_%d' % (i + 1, ) self.info[key] = shape[i] self.info["SourceType"] = SOURCE_TYPE self.info["SourceName"] = filelist[0] self.info["Size"] = 1 self.info["NumberOfFiles"] = 1 self.info["FileIndex"] = fileindex return arrRet = dataObject.data if self.__dtype is None: self.__dtype = arrRet.dtype self.onBegin(self.nbFiles) singleImageShape = arrRet.shape actualImageStack = False if (fileindex == 2) or (self.__imageStack): self.__imageStack = True if len(singleImageShape) == 1: #single line #be ready for specfile stack? self.onEnd() raise IOError("Not implemented yet") self.data = numpy.zeros( (arrRet.shape[0], nImages, self.nbFiles), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile(tempEdfFileName, 'rb') for i in range(nImages): pieceOfStack = tempEdf.GetData(i) self.data[:, i, self.incrProgressBar] = pieceOfStack[:] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) self.onEnd() else: if nImages > 1: #this is not the common case #should I try to convert it to a standard one #using a 3D matrix or keep as 4D matrix? if self.nbFiles > 1: raise IOError(\ "Multiple files with multiple images implemented yet") self.data = numpy.zeros((arrRet.shape[0], arrRet.shape[1], nImages * self.nbFiles), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile(tempEdfFileName, 'rb') for i in range(nImages): pieceOfStack = tempEdf.GetData(i) self.data[:,:, nImages*self.incrProgressBar+i] = \ pieceOfStack[:,:] self.incrProgressBar += 1 else: #this is the common case try: # calculate needed megabytes if self.__dtype == numpy.float: bytefactor = 8 else: bytefactor = 4 needed_ = self.nbFiles * \ arrRet.shape[0] *\ arrRet.shape[1] * bytefactor physicalMemory = PhysicalMemory.getPhysicalMemoryOrNone( ) if physicalMemory is not None: # spare 5% or memory if physicalMemory < (1.05 * needed_): raise MemoryError( "Not enough physical memory available") if self.__imageStack: self.data = numpy.zeros( (self.nbFiles, arrRet.shape[0], arrRet.shape[1]), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile( tempEdfFileName, 'rb') pieceOfStack = tempEdf.GetData(0) self.data[self.incrProgressBar] = pieceOfStack self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) actualImageStack = True else: self.data = numpy.zeros( (arrRet.shape[0], arrRet.shape[1], self.nbFiles), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile( tempEdfFileName, 'rb') pieceOfStack = tempEdf.GetData(0) self.data[:, :, self.incrProgressBar] = pieceOfStack self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) except (MemoryError, ValueError): hdf5done = False if HDF5 and (('PyMcaQt' in sys.modules) or\ ('PyMca.PyMcaQt' in sys.modules)): from PyMca5 import PyMcaQt as qt from PyMca5 import ArraySave msg = qt.QMessageBox.information( None, "Memory error\n", "Do you want to convert your data to HDF5?\n", qt.QMessageBox.Yes, qt.QMessageBox.No) if msg != qt.QMessageBox.No: hdf5file = qt.QFileDialog.getSaveFileName( None, "Please select output file name", os.path.dirname(filelist[0]), "HDF5 files *.h5") if not len(hdf5file): raise IOError("Invalid output file") hdf5file = qt.safe_str(hdf5file) if not hdf5file.endswith(".h5"): hdf5file += ".h5" hdf, self.data = ArraySave.getHDF5FileInstanceAndBuffer( hdf5file, (self.nbFiles, arrRet.shape[0], arrRet.shape[1])) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile( tempEdfFileName, 'rb') pieceOfStack = tempEdf.GetData(0) self.data[ self. incrProgressBar, :, :] = pieceOfStack[:, :] hdf.flush() self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) hdf5done = True if not hdf5done: for i in range(3): print("\7") samplingStep = None i = 2 while samplingStep is None: print( "**************************************************" ) print( " Memory error!, attempting %dx%d sampling reduction " ) % (i, i) print( "**************************************************" ) s1, s2 = arrRet[::i, ::i].shape try: self.data = numpy.zeros( (s1, s2, self.nbFiles), self.__dtype) samplingStep = i except: i += 1 self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile( tempEdfFileName, 'rb') pieceOfStack = tempEdf.GetData(0) self.data[:, :, self. incrProgressBar] = pieceOfStack[:: samplingStep, :: samplingStep] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) self.onEnd() else: self.__imageStack = False if len(singleImageShape) == 1: #single line #be ready for specfile stack? raise IOError("Not implemented yet") self.data = numpy.zeros( (self.nbFiles, arrRet.shape[0], nImages), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile(tempEdfFileName, 'rb') for i in range(nImages): pieceOfStack = tempEdf.GetData(i) self.data[self.incrProgressBar, :, i] = pieceOfStack[:] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) self.onEnd() else: if nImages > 1: #this is not the common case #should I try to convert it to a standard one #using a 3D matrix or kepp as 4D matrix? if self.nbFiles > 1: if (arrRet.shape[0] > 1) and\ (arrRet.shape[1] > 1): raise IOError(\ "Multiple files with multiple images not implemented yet") elif arrRet.shape[0] == 1: self.data = numpy.zeros( (self.nbFiles, arrRet.shape[0] * nImages, arrRet.shape[1]), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile( tempEdfFileName, 'rb') for i in range(nImages): pieceOfStack = tempEdf.GetData(i) self.data[self.incrProgressBar, i,:] = \ pieceOfStack[:,:] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) elif arrRet.shape[1] == 1: self.data = numpy.zeros( (self.nbFiles, arrRet.shape[1] * nImages, arrRet.shape[0]), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile( tempEdfFileName, 'rb') for i in range(nImages): pieceOfStack = tempEdf.GetData(i) self.data[self.incrProgressBar, i,:] = \ pieceOfStack[:,:] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) else: self.data = numpy.zeros( (nImages * self.nbFiles, arrRet.shape[0], arrRet.shape[1]), self.__dtype) self.incrProgressBar = 0 for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile(tempEdfFileName, 'rb') for i in range(nImages): pieceOfStack = tempEdf.GetData(i) self.data[nImages * self.incrProgressBar + i, :, :] = pieceOfStack[:, :] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) self.onEnd() else: if fileindex == 1: try: self.data = numpy.zeros( (arrRet.shape[0], self.nbFiles, arrRet.shape[1]), self.__dtype) except: try: self.data = numpy.zeros( (arrRet.shape[0], self.nbFiles, arrRet.shape[1]), numpy.float32) except: self.data = numpy.zeros( (arrRet.shape[0], self.nbFiles, arrRet.shape[1]), numpy.int16) else: try: # calculate needed megabytes if self.__dtype == numpy.float: bytefactor = 8 else: bytefactor = 4 needed_ = self.nbFiles * \ arrRet.shape[0] *\ arrRet.shape[1] * 4 physicalMemory = PhysicalMemory.getPhysicalMemoryOrNone( ) if physicalMemory is not None: # spare 5% of memory if physicalMemory < (1.05 * needed_): raise MemoryError( "Not enough physical memory available") self.data = numpy.zeros( (self.nbFiles, arrRet.shape[0], arrRet.shape[1]), self.__dtype) except: try: needed_ = self.nbFiles * \ arrRet.shape[0] *\ arrRet.shape[1] * 4 physicalMemory = PhysicalMemory.getPhysicalMemoryOrNone( ) if physicalMemory is not None: # spare 5 % of memory if physicalMemory < (1.05 * needed_): raise MemoryError( "Not enough physical memory available" ) self.data = numpy.zeros( (self.nbFiles, arrRet.shape[0], arrRet.shape[1]), numpy.float32) except (MemoryError, ValueError): text = "Memory Error: Attempt subsampling or convert to HDF5" if HDF5 and (('PyMcaQt' in sys.modules) or\ ('PyMca.PyMcaQt' in sys.modules)): from PyMca5 import PyMcaQt as qt from PyMca5 import ArraySave msg = qt.QMessageBox.information( None, "Memory error\n", "Do you want to convert your data to HDF5?\n", qt.QMessageBox.Yes, qt.QMessageBox.No) if msg == qt.QMessageBox.No: raise MemoryError(text) hdf5file = qt.QFileDialog.getSaveFileName( None, "Please select output file name", os.path.dirname(filelist[0]), "HDF5 files *.h5") if not len(hdf5file): raise IOError(\ "Invalid output file") hdf5file = qt.safe_str(hdf5file) if not hdf5file.endswith(".h5"): hdf5file += ".h5" hdf, self.data = ArraySave.getHDF5FileInstanceAndBuffer( hdf5file, (self.nbFiles, arrRet.shape[0], arrRet.shape[1])) else: raise MemoryError("Memory Error") self.incrProgressBar = 0 if fileindex == 1: for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile(tempEdfFileName, 'rb') pieceOfStack = tempEdf.GetData(0) self.data[:, self. incrProgressBar, :] = pieceOfStack[:, :] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) else: # test for ID24 map ID24 = False if "_sample_" in filelist[0]: bckFile = filelist[0].replace( "_sample_", "_samplebk_") if os.path.exists(bckFile): bckData = EdfFile.EdfFile(bckFile).GetData(0) else: bckData = 0 i0StartFile = filelist[0].replace( "_sample_", "_I0start_") if os.path.exists(i0StartFile): ID24 = True id24idx = 0 i0Start = EdfFile.EdfFile( i0StartFile, 'rb').GetData(0).astype(numpy.float) i0Start -= bckData i0EndFile = filelist[0].replace( "_sample_", "_I0end_") i0Slope = 0.0 if os.path.exists(i0EndFile): i0End = EdfFile.EdfFile( i0EndFile, 'rb').GetData(0) - bckData i0Slope = (i0End - i0Start) / len(filelist) positionersFile = filelist[0].replace( "_sample_", "_positioners_") if os.path.exists(positionersFile): positionersEdf = EdfFile.EdfFile( positionersFile, 'rb') self.info["positioners"] = {} for i in range(positionersEdf.GetNumImages()): motorName = positionersEdf.GetHeader( i).get("Title", "Motor_%02d" % i) motorValue = positionersEdf.GetData(i) self.info["positioners"][ motorName] = motorValue for tempEdfFileName in filelist: tempEdf = EdfFile.EdfFile(tempEdfFileName, 'rb') if ID24: pieceOfStack = -numpy.log( (tempEdf.GetData(0) - bckData) / (i0Start[0, :] + id24idx * i0Slope)) pieceOfStack[numpy.isfinite(pieceOfStack) == False] = 1 id24idx += 1 else: pieceOfStack = tempEdf.GetData(0) try: self.data[ self. incrProgressBar, :, :] = pieceOfStack[:, :] except: if pieceOfStack.shape[1] != arrRet.shape[1]: print(" ERROR on file %s" % tempEdfFileName) print( " DIM 1 error Assuming missing data were at the end!!!" ) if pieceOfStack.shape[0] != arrRet.shape[0]: print(" ERROR on file %s" % tempEdfFileName) print( " DIM 0 error Assuming missing data were at the end!!!" ) self.data[self.incrProgressBar,\ :pieceOfStack.shape[0],\ :pieceOfStack.shape[1]] = pieceOfStack[:,:] self.incrProgressBar += 1 self.onProgress(self.incrProgressBar) self.onEnd() self.__nFiles = self.incrProgressBar self.__nImagesPerFile = nImages shape = self.data.shape for i in range(len(shape)): key = 'Dim_%d' % (i + 1, ) self.info[key] = shape[i] if not isinstance(self.data, numpy.ndarray): hdf.flush() self.info["SourceType"] = "HDF5Stack1D" if self.__imageStack: self.info["McaIndex"] = 0 self.info["FileIndex"] = 1 else: self.info["McaIndex"] = 2 self.info["FileIndex"] = 0 self.info["SourceName"] = [hdf5file] self.info["NumberOfFiles"] = 1 self.info["Size"] = 1 elif actualImageStack: self.info["SourceType"] = SOURCE_TYPE self.info["McaIndex"] = 0 self.info["FileIndex"] = 1 self.info["SourceName"] = self.sourceName self.info["NumberOfFiles"] = self.__nFiles * 1 self.info["Size"] = self.__nFiles * self.__nImagesPerFile else: self.info["SourceType"] = SOURCE_TYPE self.info["FileIndex"] = fileindex self.info["SourceName"] = self.sourceName self.info["NumberOfFiles"] = self.__nFiles * 1 self.info["Size"] = self.__nFiles * self.__nImagesPerFile # try to use positioners to compute the scales (ID24 specific) xPositionerName = None yPositionerName = None if "positioners" in self.info and len(self.info["positioners"]) == 2: for k, v in self.info["positioners"].items(): if isinstance(v, numpy.ndarray) and v.ndim == 2: deltaDim1 = v[:, 1:] - v[:, :-1] deltaDim0 = v[1:, :] - v[:-1, :] if numpy.any(deltaDim1) and not numpy.any(deltaDim0): # positioner varying only along dim1 xPositionerName = k # should we check that all delta values are equal? deltaX = numpy.mean(deltaDim1) originX = v[0, 0] elif numpy.any(deltaDim0) and not numpy.any(deltaDim1): # positioner varying only along dim0 yPositionerName = k deltaY = numpy.mean(deltaDim0) originY = v[0, 0] if xPositionerName is not None and yPositionerName is not None: self.info["xScale"] = (originX, deltaX) self.info["yScale"] = (originY, deltaY)
def load_frame(self, scan_no, frame_no, gz_compressed=True, normalize=False, monitor_name=None, monitor_names=None, remove_rows=None, remove_cols=None): header = dict() for line in self.spec_file[scan_no].header.split('\n'): header[line.split(' ')[0]] = line[len(line.split(' ')[0]):] comments = [] for line in self.spec_file[scan_no].comments.split('\n'): comments.append(line) frames = self.get_no_frames(scan_no) if (frame_no >= frames): raise IndexError("Frame number does not exist.") if ('ccoscan' in header['#S']): for comment in comments: if ('#C DIRECTORY' in comment): img_folder = comment.split(':')[-1].strip().split('/')[-1] if ('#C RADIX' in comment): radix = comment.split(':')[-1].strip() if ('#C ZAP SCAN NUMBER' in comment): zap_scan_no = comment.split(':')[-1].strip() if ('#C ZAP IMAGE NUMBER' in comment): zap_image_no = comment.split(':')[-1].strip() filename = img_folder + '/' + radix + '_mpx-x4_%s_0000_0000.edf' % ( str(zap_scan_no).zfill(4)) edf_frame_no = frame_no else: first_frame = int(header['#UCCD'].split('#r')[-1].split('.')[0]) img_folder = header['#UCCD'].split('/')[-2] + '/' filename_template = header['#UCCD'].split('/')[-1].split('#r')[ 0] + '#r.' + header['#UCCD'].split('/')[-1].split('.')[-1] filename = img_folder + filename_template.replace( '#n', str(scan_no).zfill(3)).replace( '#p', str(frame_no).zfill(3)).replace( '#r', str(first_frame + frame_no).zfill(3)) edf_frame_no = 0 img_filename = os.path.join(self.image_foldername, filename) if (not os.path.exists(img_filename)): if (os.path.exists(img_filename.replace('.edf', '.edf.gz'))): inF = gzip.open(img_filename.replace('.edf', '.edf.gz'), 'rb') outF = open(img_filename, 'wb') outF.write(inF.read()) inF.close() outF.close() edf = EdfFile.EdfFile(img_filename, 'r') edf_header = edf.GetHeader(edf_frame_no) motors = dict() motor_mne = edf_header['motor_mne'].split() motor_pos = edf_header['motor_pos'].split() for i in xrange(len(motor_mne)): motors[motor_mne[i]] = float(motor_pos[i]) counters = dict() if (not ('ccoscan' in header['#S'])): counter_mne = edf_header['counter_mne'].split() counter_pos = edf_header['counter_pos'].split() for i in xrange(len(counter_mne)): counters[counter_mne[i]] = float(counter_pos[i]) the_img = np.array(edf.GetData(edf_frame_no), dtype='float') if (remove_rows != None): the_img = np.delete(the_img, remove_rows, axis=0) if (remove_cols != None): the_img = np.delete(the_img, remove_cols, axis=1) if (normalize): if (monitor_name): mon_count = float( getattr(self.spec_file[scan_no], monitor_name)[frame_no]) the_img /= mon_count if (monitor_names): for mon_name in monitor_names: mon_count = float( getattr(self.spec_file[scan_no], mon_name)[frame_no]) the_img /= mon_count return DetImage(the_img, motors, counters)
def load_frame(self, scan_no, frame_no, gz_compressed=True, normalize=False, monitor_name=None, monitor_names=None, remove_rows=None, remove_cols=None): header = dict() print self.spec_file[scan_no].header for line in self.spec_file[scan_no].header.split('\n'): header[line.split(' ')[0]] = line[len(line.split(' ')[0]):] comments = dict() for line in self.spec_file[scan_no].comments.split('\n'): comments[line.split(':') [0].strip()] = line[len(line.split(':')[0]) + 1:].strip() frames = self.get_no_frames(scan_no) if ('ccoscan' in header['#S'] or 'zapline' in header['#S']): img_folder = comments['#C DIRECTORY'].split('/')[-1] + '/' if (img_folder == '/'): img_folder = comments['#C DIRECTORY'].split('/')[-2] + '/' zap_scan_no = int(comments['#C ZAP SCAN NUMBER']) radix = comments['#C RADIX'] filename = radix + '_mpx-x4_%s_0000_0000.edf' % ( str(zap_scan_no).zfill(4)) multiframe_edf_frame_no = frame_no else: first_frame = int(header['#UCCD'].split('#r')[-1].split('.')[0]) img_folder = header['#UCCD'].replace('//', '/').split( '/' )[-2] + '/' #in the beginning the path of MA3886 had '//' for some reason filename_template = header['#UCCD'].split('/')[-1].split('#r')[ 0] + '#r.' + header['#UCCD'].split('/')[-1].split('.')[-1] filename = filename_template.replace( '#n', str(scan_no).zfill(3)).replace('#p', str(frame_no).zfill(3)).replace( '#r', str(first_frame + frame_no).zfill(3)) multiframe_edf_frame_no = 0 # automatically detect if frame is compressed or not if (not os.path.exists( os.path.join(self.image_foldername, img_folder, filename))): filename = filename.replace('.edf', '.edf.gz') #if(gz_compressed): # filename = filename.replace('.edf', '.edf.gz') if (frame_no >= frames): raise IndexError("Frame number does not exist.") img_filename = os.path.join(self.image_foldername, img_folder, filename) edf = EdfFile.EdfFile(img_filename, 'r') edf_header = edf.GetHeader(multiframe_edf_frame_no) motors = dict() motor_mne = edf_header['motor_mne'].split() motor_pos = edf_header['motor_pos'].split() for i in xrange(len(motor_mne)): motors[motor_mne[i]] = float(motor_pos[i]) counters = dict() if (not ('ccoscan' in header['#S'] or 'zapline' in header['#S'])): counter_mne = edf_header['counter_mne'].split() counter_pos = edf_header['counter_pos'].split() for i in xrange(len(counter_mne)): counters[counter_mne[i]] = float(counter_pos[i]) the_img = np.array(edf.GetData(multiframe_edf_frame_no), dtype='float') if (remove_rows != None): the_img = np.delete(the_img, remove_rows, axis=0) if (remove_cols != None): the_img = np.delete(the_img, remove_cols, axis=1) if (normalize): if (monitor_name): mon_count = float( getattr(self.spec_file[scan_no], monitor_name)[frame_no]) the_img /= mon_count if (monitor_names): for mon_name in monitor_names: mon_count = float( getattr(self.spec_file[scan_no], mon_name)[frame_no]) the_img /= mon_count return DetImage(the_img, motors, counters, header)
def refresh(self): self._sourceObjectList = [] for name in self.__sourceNameList: self._sourceObjectList.append( EdfFile.EdfFile(name, access='rb', fastedf=self._fastedf)) self.__lastKeyInfo = {}
def buildOutput(self, inputdir=None, outputdir=None, delete=None): if inputdir is None: inputdir = self.inputDir if inputdir is None: inputdir = os.getcwd() if outputdir is None: outputdir = self.outputDir if outputdir is None: outputdir = inputdir if delete is None: if outputdir == inputdir: delete = True if DEBUG: print("delete option = ", delete) allfiles = os.listdir(inputdir) partialedflist = [] partialdatlist = [] partialconlist = [] for filename in allfiles: if filename.endswith('000000_partial.edf'): partialedflist.append(filename) elif filename.endswith('000000_partial.dat'): partialdatlist.append(filename) elif filename.endswith('000000_partial_concentrations.txt'): partialconlist.append(filename) #IMAGES edfoutlist = [] for filename in partialedflist: if DEBUG: print("Dealing with filename %s" % filename) edflist = self.getIndexedFileList(os.path.join(inputdir, filename)) i = 0 for edfname in edflist: edf = EdfFile.EdfFile(edfname, access='rb', fastedf=0) nImages = edf.GetNumImages() #get always the last image data0 = edf.GetData(nImages - 1) data0[data0 < 0] = 0 if i == 0: header = edf.GetHeader(0) data = data0.copy() else: data += data0 del edf i += 1 edfname = filename.replace('_000000_partial.edf', ".edf") edfoutname = os.path.join(outputdir, edfname) if DEBUG: print("Dealing with output filename %s" % edfoutname) if os.path.exists(edfoutname): if DEBUG: print("Output file already exists, trying to delete it") os.remove(edfoutname) edfout = EdfFile.EdfFile(edfoutname, access="wb") edfout.WriteImage(header, data, Append=0) del edfout edfoutlist.append(edfoutname) if delete: for filename in edflist: try: os.remove(filename) except: print("Cannot delete file %s" % filename) #DAT IMAGES datoutlist = [] for filename in partialdatlist: edflist = self.getIndexedFileList(os.path.join(inputdir, filename)) first = True for edfname in edflist: f = open(edfname) lines = f.readlines() f.close() j = 1 while (not len(lines[-j].replace("\n", ""))): j += 1 if first: first = False labels = lines[0].replace("\n", "").split(" ") nlabels = len(labels) nrows = len(lines) - j data = numpy.zeros((nrows, nlabels), numpy.double) inputdata = numpy.zeros((nrows, nlabels), numpy.double) chisqIndex = labels.index('chisq') for i in range(nrows): inputdata[i, :] = [float(x) for x in lines[i + 1].split()] if inputdata[i, chisqIndex] < 0.0: inputdata[i, chisqIndex] = 0.0 data += inputdata outfilename = os.path.join(outputdir, filename.replace("_000000_partial", "")) if os.path.exists(outfilename): os.remove(outfilename) outfile = open(outfilename, 'w+') outfile.write('%s' % lines[0]) line = "" for row in range(nrows): #line = "%d" % inputdata[row, 0] for col in range(nlabels): if col == 0: line += "%d" % inputdata[row, col] elif col == 1: line += " %d" % inputdata[row, col] else: line += " %g" % data[row, col] line += "\n" outfile.write("%s" % line) line = "" outfile.write("\n") outfile.close() datoutlist.append(outfilename) if delete: for filename in edflist: os.remove(filename) #CONCENTRATIONS outconlist = [] for filename in partialconlist: edflist = self.getIndexedFileList(os.path.join(inputdir, filename)) i = 0 for edfname in edflist: edf = open(edfname, 'rb') if i == 0: outfilename = os.path.join( outputdir, filename.replace("_000000_partial", "")) if os.path.exists(outfilename): os.remove(outfilename) outfile = open(outfilename, 'wb') lines = edf.readlines() for line in lines: outfile.write(line) edf.close() i += 1 outfile.close() outconlist.append(outfilename) if delete: for filename in edflist: os.remove(filename) return edfoutlist, datoutlist, outconlist
def _dynamicAction(self, index): #just support edffiles fileName = self.imageList[index] edf = EdfFile.EdfFile(fileName) self.setImageData(edf.GetData(0)) self.graphWidget.graph.setGraphTitle(os.path.basename(fileName))
print "loading of " + nexfile_path + "failed!" #get the total numbe of images in the nex container try: num_of_image = len(data.entry.instrument.detector.data.nxdata) except: num_of_image = data.entry.instrument.detector.data.shape[0] #do only one image if you want to check first # if CHECK: # num_of_image=1 if CHECK: try: data_1D = data.entry.instrument.detector.data.nxdata[ check_image_number] if save_edf: f = EdfFile.EdfFile( nexfile_path.replace('.nxs', str(check_image_number) + '.edf')) f.WriteImage({"potential": 0}, data_1D) except: data_1D = data.entry.instrument.detector.data[check_image_number] if save_edf: f = EdfFile.EdfFile( nexfile_path.replace('.nxs', str(check_image_number) + '.edf')) f.WriteImage({"potential": 0}, data_1D) print "fit frame {} now".format(check_image_number) #remove dead pixes first data_1D_filtered = (data_1D < 10000) * data_1D try: hor_temp, ver_temp = fit_gaussian_peak(data_1D_filtered, plot_it=CHECK)
# -*- coding: utf-8 -*- if __name__ == "__main__": import glob from PyMca5.PyMcaIO import EdfFile files = glob.glob( "/data/id21/inhouse/15apr/Hiram/results/fXANES5/fXANES5_data/*.edf" ) for f in files: h = EdfFile.EdfFile(f) data = h.GetData(0) print(f, data.shape)
cen_pix = [388, 338] dx, dy = 25, 25 bg_dx = 15 roi = Roi([cen_pix[0] - dx, cen_pix[0] + dx], [cen_pix[1] - dy, cen_pix[1] + dy]) bg_rois = [] bg_rois.append( Roi([cen_pix[0] - dx - bg_dx, cen_pix[0] - dx], [cen_pix[1] - dy, cen_pix[1] + dy])) bg_rois.append( Roi([cen_pix[0] + dx, cen_pix[0] + dx + bg_dx], [cen_pix[1] - dy, cen_pix[1] + dy])) integrator = ImageIntegrator(roi, bg_rois) plt.ion() fig = plt.figure() for frame_no in xrange(30): img_filename = edf_path + "ma2254_mpx01/ma2254_" + str(scan_no).zfill( 3) + '_' + str(frame_no).zfill(3) + '_' + str( first_frame + frame_no).zfill(3) + '.edf.gz' edf = EdfFile.EdfFile(img_filename, 'r') img = edf.GetData(0) integrator.plot_cuts(img, fig) plt.draw() time.sleep(0.5) plt.ioff() plt.show()
ddict['roi_min'] = ddict0['arguments'][1] ddict['roi_max'] = ddict0['arguments'][2] ddict['smoothing'] = ddict0['arguments'][3] self.parametersWidget.setParameters(ddict) else: self.parametersWidget.setParameters(ddict0) if __name__ == "__main__": import numpy app = qt.QApplication([]) if 0: noise = numpy.random.randn(1000.) y = numpy.arange(1000.) w = SNIPDialog(None, y + numpy.sqrt(y) * noise) elif len(sys.argv) > 1: from PyMca5.PyMcaIO import EdfFile edf = EdfFile.EdfFile(sys.argv[1]) data = edf.GetData(0) w = SNIPDialog(None, data) else: x, y = numpy.ogrid[0:200:200j, 0:200:200j] data = 50 * numpy.exp(-(x-64)*(x-64)/20.) +\ 50 * numpy.exp(-(y-128)*(y-128)/20.) +\ 100 * numpy.exp(-(1./20) * ((x-64)*(x-64) + (y-128)*(y-128))) w = SNIPDialog(None, data) w.show() ret = w.exec_() if ret: print(w.getParameters())
import sys import os from PyMca5.Object3D import SceneGLWindow try: from PyMca5.PyMcaIO import EdfFile except ImportError: import EdfFile app = qt.QApplication(sys.argv) window = SceneGLWindow.SceneGLWindow() window.show() if len(sys.argv) > 1: flist = [] for i in range(1, len(sys.argv)): flist.append(sys.argv[i]) for f in flist: edf = EdfFile.EdfFile(f, access='rb') data = edf.GetData(0) object3D = Object3DMesh(os.path.basename(f)) else: data = numpy.arange(200.).astype(numpy.float32) data.shape = [40, 5] object3D = Object3DMesh('builtin') #several options: regular grid, irregular grid if len(sys.argv) > 1: #print "IMPOSSING A 1000 OFFSET" #offset = 1000.0 offset = 0 #irregular grid xSize, ySize = data.shape[0:2] zSize = 1
def saveImage(self, ffile=None): self.savedImages = [] if ffile is None: ffile = os.path.splitext(self._rootname)[0] ffile = self.os_path_join(self.imgDir, ffile) if not self.roiFit: if (self.fileStep > 1) or (self.mcaStep > 1): trailing = "_filestep_%02d_mcastep_%02d" % (self.fileStep, self.mcaStep) else: trailing = "" #speclabel = "#L row column" speclabel = "row column" if self.chunk is None: suffix = ".edf" else: suffix = "_%06d_partial.edf" % self.chunk iterationList = self.__peaks * 1 iterationList += ['chisq'] if self._concentrations: iterationList += self.__concentrationsKeys for peak in iterationList: if peak in self.__peaks: a, b = peak.split() speclabel += " %s" % (a + "-" + b) speclabel += " s(%s)" % (a + "-" + b) edfname = ffile + "_" + a + "_" + b + trailing + suffix elif peak in self.__concentrationsKeys: speclabel += " %s" % peak.replace(" ", "-") edfname = ffile + "_" + peak.replace( " ", "_") + trailing + suffix elif peak == 'chisq': speclabel += " %s" % (peak) edfname = ffile + "_" + peak + trailing + suffix else: print("Unhandled peak name: %s. Not saved." % peak) continue dirname = os.path.dirname(edfname) if not os.path.exists(dirname): try: os.mkdir(dirname) except: print("I could not create directory %s" % dirname) Append = 0 if os.path.exists(edfname): try: os.remove(edfname) except: print("I cannot delete output file") print("trying to append image to the end") Append = 1 edfout = EdfFile.EdfFile(edfname, access='ab') edfout.WriteImage({'Title': peak}, self.__images[peak], Append=Append) edfout = None self.savedImages.append(edfname) #save specfile format if self.chunk is None: specname = ffile + trailing + ".dat" else: specname = ffile + trailing + "_%06d_partial.dat" % self.chunk if os.path.exists(specname): try: os.remove(specname) except: pass specfile = open(specname, 'w+') #specfile.write('\n') #specfile.write('#S 1 %s\n' % (file+trailing)) #specfile.write('#N %d\n' % (len(self.__peaks)+2)) specfile.write('%s\n' % speclabel) specline = "" imageRows = self.__images['chisq'].shape[0] imageColumns = self.__images['chisq'].shape[1] for row in range(imageRows): for col in range(imageColumns): specline += "%d" % row specline += " %d" % col for peak in self.__peaks: #write area specline += " %g" % self.__images[peak][row][col] #write sigma area specline += " %g" % self.__sigmas[peak][row][col] #write global chisq specline += " %g" % self.__images['chisq'][row][col] if self._concentrations: for peak in self.__concentrationsKeys: specline += " %g" % self.__images[peak][row][col] specline += "\n" specfile.write("%s" % specline) specline = "" specfile.write("\n") specfile.close() else: for group in self.__ROIpeaks: i = 0 grouptext = group.replace(" ", "_") for roi in self._ROIimages[group].keys(): #roitext = roi.replace(" ","-") if (self.fileStep > 1) or (self.mcaStep > 1): edfname = ffile + "_" + grouptext + ( "_%04deVROI_filestep_%02d_mcastep_%02d.edf" % (self.roiWidth, self.fileStep, self.mcaStep)) else: edfname = ffile + "_" + grouptext + ("_%04deVROI.edf" % self.roiWidth) dirname = os.path.dirname(edfname) if not os.path.exists(dirname): try: os.mkdir(dirname) except: print("I could not create directory %s" % dirname) edfout = EdfFile.EdfFile(edfname) edfout.WriteImage({'Title': group + " " + roi}, self._ROIimages[group][roi], Append=i) if i == 0: self.savedImages.append(edfname) i = 1