def edf_keywords_completion(edfin, keywords, edfout=None): """This procedure takes an EDF file and completes it with the keywords given as CIF dictionnary @param edfin: The name or the path of an EDF file to be read @type edfin: Python String @param keywords: dictionary containing the CIF-like data @type keywords: Python dictionary @param edfout: name or the path of an EDF file to be written. Caution, this WILL overwrite the file. If nothing is given, the name witll be edfin+keywords.edf @type edfout: Python String """ if not edfout: edfout = os.path.splitext(edfin)[0] + "+keywords.edf" print "processing file %s --->%s " % (edfin, edfout) infile = EDF.EdfFile(edfin) data = infile.GetData(0) headers = infile.GetHeader(0) for key in keywords: if key.lower() == "loop_": #As loops are not possible in EDF headers, we replace them by a kind of list loops = keywords[key] for oneLoop in loops: oneLoopIdx = 0 #this is a index of the loop for oneLoopdict in oneLoop[1]: oneLoopIdx += 1 for loopKey in oneLoop[0]: headers["%s[%i]" % (loopKey, oneLoopIdx)] = oneLoopdict[loopKey] else: headers[key] = keywords[key] outfile = EDF.EdfFile(edfout) outfile.WriteImage(headers, data)
def getBGarray(self, bg_filename): if self.bg_path == '-' and bg_filename == '-': self.bg_combined = np.zeros((self.roi[3], self.roi[1])) else: bg_file_with_path = self.bg_path + '/' + self.bg_files[0] bg_class = EdfFile.EdfFile(bg_file_with_path) bg_img = bg_class.GetData(0).astype(np.int64)[ self.roi[2]:self.roi[3], self.roi[0]:self.roi[1]] self.bg_combined = np.zeros(np.shape(bg_img)) if self.rank == 0: print "Reading background files (ROI)..." for i in range(len(self.bg_files)): bg_file_with_path = self.bg_path + '/' + self.bg_files[i] bg_class = EdfFile.EdfFile(bg_file_with_path) self.bg_combined += bg_class.GetData(0).astype(np.int64)[ self.roi[2]:self.roi[3], self.roi[0]:self.roi[1]] self.bg_combined /= len(self.bg_files) bg_img_full = bg_class.GetData(0).astype(np.int64) self.bg_combined_full = np.zeros(np.shape(bg_img_full)) if self.rank == 0: print "Reading background files (Full)..." for i in range(len(self.bg_files)): bg_file_with_path = self.bg_path + '/' + self.bg_files[i] bg_class = EdfFile.EdfFile(bg_file_with_path) self.bg_combined_full += bg_class.GetData(0).astype(np.int64) self.bg_combined_full /= len(self.bg_files)
def testExecute(self): """ """ self.run() plugin = self.getPlugin() ################################################################################ # Compare XSDataResults ################################################################################ strExpectedOutput = self.readAndParseFile( self.getReferenceDataOutputFile()) # strObtainedOutput = self.readAndParseFile (self.m_edObtainedOutputDataFile) EDVerbose.DEBUG("Checking obtained result...") xsDataResultReference = XSDataResultBioSaxsMetadatav1_0.parseString( strExpectedOutput) xsDataResultObtained = plugin.getDataOutput() EDAssert.strAlmostEqual(xsDataResultReference.marshal(), xsDataResultObtained.marshal(), "XSDataResult output are the same", _strExcluded="bioSaxs") ################################################################################ # Compare dictionary: ################################################################################ edfRef = EdfFile.EdfFile( xsDataResultObtained.getOutputImage().getPath().value) edfObt = EdfFile.EdfFile( os.path.join(self.getTestsDataImagesHome(), "bioSaxsMetadata.edf")) ######################################################################## # DEPRECATED PLUGIN => DEPREFCATED TESTS ######################################################################## # headerRef = edfRef.GetHeader(0) # headerObt = edfObt.GetHeader(0) # keysRef = headerRef.keys() # keysObt = headerObt.keys() # keysRef.sort() # keysObt.sort() # for key in ["HeaderID", "Image", 'EDF_BinarySize', "EDF_DataBlockID", "EDF_HeaderSize", "filename", "RasterOrientation", "Center_1", "Center_2", "Code", "Comments", "Concentration", # "VersionNumber",'time_of_day', ]: # if key in keysObt: keysObt.remove(key) # if key in keysRef: keysRef.remove(key) # EDAssert.equal(keysRef, keysObt, _strComment="Same keys in the header dictionary") # for key in keysRef: # EDAssert.strAlmostEqual(headerRef[key], headerObt[key], _strComment="header value %s are the same" % key, _strExcluded="bioSaxs") ################################################################################ # Compare images ################################################################################ outputData = edfObt.GetData(0) referenceData = edfRef.GetData(0) EDAssert.arraySimilar(outputData, referenceData, _fAbsMaxDelta=0.1, _fScaledMaxDelta=0.05, _strComment="Images-data are the same")
def getBGarray(self, bg_filename): if self.bg_path == '-' and bg_filename == '-': self.bg_combined = np.zeros((self.roi[3], self.roi[1])) else: bg_file_with_path = self.bg_path + '/' + self.bg_files[0] bg_class = EdfFile.EdfFile(bg_file_with_path) bg_img = bg_class.GetData(0).astype( np.int64)[self.roi[2]:self.roi[3], self.roi[0]:self.roi[1]] self.bg_combined = np.zeros(np.shape(bg_img)) if self.rank == 0: print "Reading background files (ROI)..." for i in range(len(self.bg_files)): bg_file_with_path = self.bg_path + '/' + self.bg_files[i] bg_class = EdfFile.EdfFile(bg_file_with_path) self.bg_combined += bg_class.GetData(0).astype( np.int64)[self.roi[2]:self.roi[3], self.roi[0]:self.roi[1]] self.bg_combined /= len(self.bg_files) bg_img_full = bg_class.GetData(0).astype(np.int64) self.bg_combined_full = np.zeros(np.shape(bg_img_full)) if self.rank == 0: print "Reading background files (Full)..." for i in range(len(self.bg_files)): bg_file_with_path = self.bg_path + '/' + self.bg_files[i] bg_class = EdfFile.EdfFile(bg_file_with_path) self.bg_combined_full += bg_class.GetData(0).astype(np.int64) self.bg_combined_full /= len(self.bg_files) bckg = self.bg_combined bckg_entire = self.bg_combined_full # Check background images #fig = plt.figure() #a=fig.add_subplot(1,2,1) #plt.imshow(bckg) #a.set_title('Background ROI') #b=fig.add_subplot(1,2,2) #plt.imshow(bckg_entire) #b.set_title('All background') #plt.show() np.save('bckg_roi.npy', bckg) np.save('bckg_all.npy', bckg_entire)
def testExecute(self): """ """ self.run() plugin = self.getPlugin() ################################################################################ # Compare XSDataResults ################################################################################ strExpectedOutput = self.readAndParseFile (self.getReferenceDataOutputFile()) # strObtainedOutput = self.readAndParseFile (self.m_edObtainedOutputDataFile) EDVerbose.DEBUG("Checking obtained result...") xsDataResultReference = XSDataResultBioSaxsAveragev1_0.parseString(strExpectedOutput) xsDataResultObtained = plugin.getDataOutput() EDAssert.strAlmostEqual(xsDataResultReference.marshal(), xsDataResultObtained.marshal(), "XSDataResult output are the same", _strExcluded="bioSaxs") ################################################################################ # Compare spectrum ascii Files ################################################################################ outputData = open(xsDataResultObtained.getAveragedCurve().getPath().value, "rb").read() referenceData = open(os.path.join(self.getTestsDataImagesHome(), "bioSaxsAveraged.dat"), "rb").read() EDAssert.strAlmostEqual(referenceData, outputData, _strComment="3-column ascii spectra files spectra are the same", _fRelError=0.1, _fAbsError=0.1, _strExcluded="bioSaxs") ################################################################################ # Compare images ################################################################################ edfObt = EdfFile.EdfFile(xsDataResultObtained.getAveragedImage().getPath().value) edfRef = EdfFile.EdfFile(os.path.join(self.getTestsDataImagesHome(), "bioSaxsAveraged.edf")) outputData = edfObt.GetData(0) referenceData = edfRef.GetData(0) EDAssert.arraySimilar(outputData, referenceData , _fAbsMaxDelta=0.1, _fScaledMaxDelta=0.05, _strComment="Averaged images are the same") headerRef = edfRef.GetHeader(0) headerObt = edfObt.GetHeader(0) keysRef = headerRef.keys() keysObt = headerObt.keys() keysRef.sort() keysObt.sort() for key in ["HeaderID", "Image", 'EDF_BinarySize', "EDF_DataBlockID", "EDF_HeaderSize", "filename", "RasterOrientation", "History-1", "History-1~1" ]: if key in keysObt: keysObt.remove(key) if key in keysRef: keysRef.remove(key) EDAssert.equal(keysRef, keysObt, _strComment="Same keys in the header dictionary for Corrected Images") for key in keysRef: EDAssert.strAlmostEqual(headerRef[key], headerObt[key], _strComment="header value in Averaged %s are the same" % key, _strExcluded="bioSaxs")
def headeredf(filename, imgn=0): if isfile(filename): f = EdfFile.EdfFile(filename) return f.GetHeader(imgn) else: print "file ", filename, " does not exist!" return
def __saveEDF(self, fileSelected): if not fileSelected: return fileSelected = str(fileSelected) data = self._saveEDFPlug._data() edf_file = EdfFile.EdfFile(fileSelected) edf_file.WriteImage({}, data)
def loadedf(filename, imgn=0): if isfile(filename): f = EdfFile.EdfFile(filename) return f.GetData(imgn) else: print "file ", filename, " does not exist!" return
def read(filename): trcfile = EdfFile.EdfFile(filename) trcheader = trcfile.GetHeader(0) trcdata = asfarray(trcfile.GetData(0)) trcdata = trcdata / 2**16 * (float(trcheader['MaxValue']) - float( trcheader['MinValue'])) + float(trcheader['MinValue']) return trcdata
def __init__(self, filename): try: f = EdfFile.EdfFile(filename) except: raise UserInputException("""Unable to read in the file \ %s because the Object EdfFile that I am using to read in edf data \ raised an error when trying to read in the file. This probably means \ that there is something wrong \ with the file that you are trying to open""" % filename) data = f.GetData(0) if not alltrue(alltrue(less_equal(data, 2147483647))): print """Warning, some of the data stored in the \ file %s has an intensity larger then 2^31-1 which is too big for this \ program to hold. Any of these large values were clipped to have a value of 2^31-1.""" % filename # clip any data that is too big mask1 = data <= 2147483647 mask2 = data > 2147483647 masked_data = data * mask1 + (pow(2, 31) - 1) * mask2 masked_data = masked_data.astype(Numeric.Int32) self.size = max(masked_data.shape[0], masked_data.shape[1]) # pad values if necessary - create an array to put everything in self.data = Numeric.zeros((self.size, self.size), Numeric.Int32) # copy the data into the padded array self.data[0:masked_data.shape[0], 0:masked_data.shape[1]] = masked_data self.data = Numeric.transpose(self.data)
def createAverageWfandDf(self): if not (os.path.exists(self.dirname + '/refHST0000.edf')): print('NO REF ') refBeg = glob.glob(self.dirname + '/ref*_0000.edf') print(refBeg) if len(refBeg) > 0: self.outputFileFFNameBeg = self.dirname + '/refForHST0000.edf' vBeg = Averager.Averager(refBeg, self.outputFileFFNameBeg, option=0) if (self.ref_on < self.numberOfProjections): cpt = self.ref_on while cpt < self.numberOfProjections: textref = '%4.4d' % cpt refBeg = glob.glob(self.dirname + '/ref*_' + textref + '.edf') if len(refBeg > 0): self.outputFileNameFFEnd = self.dirname + '/refForHST' + textref + '.edf' vBeg = Averager.Averager(refBeg, self.outputFileNameFFEnd, option=1) cpt += self.ref_on else: textref = '%4.4d' % self.ref_on refBeg = glob.glob(self.dirname + '/ref*_' + textref + '.edf') if len(refBeg) > 0: self.outputFileNameFFEnd = self.dirname + '/refForHST' + textref + '.edf' vBeg = Averager.Averager(refBeg, self.outputFileNameFFEnd, option=0) darkFile = self.dirname + '/darkend0000.edf' data = edf.EdfFile(darkFile).GetData(0) data = np.divide(data, self.numberOfDarkFields) self.darkOutputFile = self.dirname + '/darkForHST0000.edf' filetoWrite = edf.EdfFile(self.darkOutputFile, access='wb+') filetoWrite.WriteImage({}, data, Append=0, DataType='FloatValue') else: self.outputFileFFNameBeg = self.dirname + '/refHST0000.edf' self.outputFileNameFFEnd = self.dirname + '/refHST0000.edf' self.darkOutputFile = self.dirname + '/dark.edf' self.averageDone = True
def save3DImage(self, outputName): self.nbSlices, self.width, self.height = self.data.shape for k in range(self.nbSlices): textNumSlice = '%4.4d' % k finalOutputName = outputName + textNumSlice + '.edf' filetoWrite = edf.EdfFile(finalOutputName, access='wb+') dataToStore = self.data[k, :, :].squeeze() filetoWrite.WriteImage({}, dataToStore)
def saveSino(self, outputName): for i in range(self.width): textNumSlice = '%4.4d' % i finalOutputName = outputName + textNumSlice + '.edf' filetoWrite = edf.EdfFile(finalOutputName, access='wb+') dataToStore = self.data[:, i, :].squeeze() filetoWrite.WriteImage({}, dataToStore)
def createAverageWfandDfPbm(self): refBeg = glob.glob(self.dirname + '/RefA*.edf') print(refBeg) if len(refBeg) > 0: self.outputFileFFNameBeg = self.dirname + '/refForHST0000.edf' vBeg = Averager.Averager(refBeg, self.outputFileFFNameBeg, option=0) ref = edf.EdfFile(refBeg[0], access='r') ref = ref.GetData(0) self.darkOutputFile = self.dirname + '/darkForHST0000.edf' data = np.ones(ref.shape) filetoWrite = edf.EdfFile(self.darkOutputFile, access='wb+') filetoWrite.WriteImage({}, data, Append=0, DataType='FloatValue') self.averageDone = True
def saveedf(filename, data, imgn=0): try: newf = EdfFile.EdfFile(filename) newf.WriteImage({}, data, imgn) print "file is saved to ", filename return except: print "file is not saved!" return
def makeDarkMean(Darkfiedls): nbslices, height, width = Darkfiedls.shape meanSlice = np.mean(Darkfiedls, axis=0) print ('----------------------- mean Dark calculation done ------------------------- ') OutputFileName = '/Users/helene/PycharmProjects/spytlab/meanDarkTest.edf' outputEdf = edf.EdfFile(OutputFileName, access='wb+') outputEdf.WriteImage({}, meanSlice) return meanSlice
def readImage(filename): if filename.endswith('.edf') or filename.endswith('.tiff') : edfFile = edf.EdfFile(filename, access='rb') im2D = edfFile.GetData(0) if filename.endswith('.tiff') or filename.endswith('.tif') or filename.endswith('.png') or filename.endswith('.TIF') or filename.endswith('.TIFF') : img=Image.open(filename) im2D= np.array(img) return im2D
def openImage(filename): filename = str(filename) if filename.endswith('.edf'): im = edf.EdfFile(filename, access='rb') imarray = im.GetData(0) else: imarray = Image.open(filename) imarray = np.array(imarray) return imarray
def read_edf_slices( imageDimensions, base_dir, edf_base_name, digits, extension, slices_range, crop=None ): """ This reads EDF slices, and returns a 3D volume INPUT: - image_size size of RAW files. - base_dir the directory inside which the raw files are. - edf_base_name the "base" name of the raw files - digits pre-padding with zeros in file number - extension extension of raw files - slices_range tuple or list of number of slices to read - crop list of lists: [ [ x_min, x_max ], [ y_min, y_max ] ] """ import numpy import EdfFile # ET: there should be a +1 here because if I wanto to load slices from 1 to 5 this give 5 slices and not 4 numberOfSlices = slices_range[1] - slices_range[0] + 1 # Define array for image loading if crop == None: # If we don't have a crop, we're going to use the whole slice size. outputVolume = numpy.ones( ( numberOfSlices, imageDimensions[1], imageDimensions[0] ), dtype='<f4' ) * numpy.nan else: # Slice dimensions from crop outputVolume = numpy.ones( ( numberOfSlices, crop[1][1] - crop[1][0], crop[0][1] - crop[0][0] ), dtype='<f4' ) * numpy.nan # Load all images into big array for sliceNumber in range( numberOfSlices ): try: # 2016-04-30 ET: if worrking in 2D digits == 0 and the name in constructed differently if digits ==0: filename = "%s/%s%s"%( base_dir, edf_base_name, extension ) else: filename = "%s/%s%0*i%s"%( base_dir, edf_base_name, digits, sliceNumber + slices_range[0], extension ) currentImage = numpy.array( EdfFile.EdfFile( filename ).GetData( 0 ) ) if crop == None: outputVolume[ sliceNumber ] = currentImage else: outputVolume[ sliceNumber ] = currentImage[ crop[1][0]:crop[1][1], crop[0][0]:crop[0][1] ] except : try: logging.log.warning( "read_edf_slices(): File %s not found "%filename ) except: print "read_edf_slices(): File %s not found "%filename pass try: logging.log.debug( "read_edf_pil_slices(): Volume mean value = %s"%( outputVolume.mean() ) ) except: print "read_edf_pil_slices(): Volume mean value = %s"%( outputVolume.mean() ) return outputVolume
def write(filename, trcdata): trcfile = EdfFile.EdfFile(filename) maxtrc = trcdata.max() mintrc = trcdata.min() trcdata = (trcdata - mintrc) / (maxtrc - mintrc) * 2**16 trcfile.WriteImage({ 'MaxValue': maxtrc, 'MinValue': mintrc }, trcdata, 0, DataType='UnsignedShort')
def edf(self): """ Read data from a edf file. Returns ------- ndarray Data. """ f = EdfFile.EdfFile(self.fname, access='r') d = f.GetStaticHeader(0) arr = np.empty((f.NumImages, int(d['Dim_2']), int(d['Dim_1']))) for (i, ar) in enumerate(arr): arr[i::] = f.GetData(i) arr = self._slice_array(arr) return arr
def getImage(self, index, full): file_with_path = self.path + '/' + self.data_files[index] img = EdfFile.EdfFile(file_with_path) roi = self.roi if full: im = img.GetData(0).astype(np.int64) - self.bg_combined_full else: im = img.GetData(0).astype(np.int64)[ roi[2]:roi[3], roi[0]:roi[1]] - self.bg_combined im = self.cleanImage(im) return im
def prepareFromFile(file_path, annotations): edf_file = EdfFile.EdfFile(file_path, annotations) epochs = edf_file.signals_list[1].getEpochs() outputMatrix = edf_file.createOutput(epochs) for e, o in zip(epochs, outputMatrix): if max(o) == 0: epochs.remove(e) outputMatrix.remove(o) inputMatrix = edf_file.createInput(epochs, True) if (len(inputMatrix) != len(outputMatrix)): print("shapes dont match in file %s. input: %d, output: %d" % (file_path, len(inputMatrix), len(outputMatrix))) return np.array([]), np.array([]) return inputMatrix, np.array(outputMatrix)
def writeImage(filename, data): if filename.endswith('.edf') or filename.endswith('.tiff') : edfFile = edf.EdfFile(filename, access='wb-') edfFile.WriteImage({}, data) else: typeImage=data.dtype if typeImage==np.uint8 : scipy.misc.imsave(filename, data) if typeImage==np.bool : toStore=np.zeros(data.shape,dtype=np.uint8) toStore[data==True]=255 scipy.misc.imsave(filename, toStore) if typeImage==np.float32 or typeImage==np.float16 or typeImage==np.float64 : scipy.misc.imsave(filename, data) return im2D
def getDatasFromFile(filepath, fromIndex=0, toIndex=-1): returnDatas = [] try: f = EdfFile.EdfFile(filepath) if toIndex < 0: toIndex = f.GetNumImages() for i in range(fromIndex, toIndex): a = f.GetData(i) header = f.GetHeader(i) rData = Core.Processlib.Data() rData.buffer = a try: rData.header = header except TypeError: pass returnDatas.append(rData) except: import traceback traceback.print_exc() finally: return returnDatas
def getHeader(self, filenumber): file_with_path = self.path + '/' + self.data_files[filenumber] img = EdfFile.EdfFile(file_with_path) header = img.GetHeader(0) mot_array = header['motor_mne'].split(' ') motpos_array = header['motor_pos'].split(' ') try: det_array = header['counter_mne'].split(' ') detpos_array = header['counter_pos'].split(' ') except KeyError: det_array = [] detpos_array = [] try: srcur = float(header['machine current'].split(' ')[0]) except KeyError: srcur = 0 return mot_array, motpos_array, det_array, detpos_array, srcur
def getFullHeader(self, filenumber): file_with_path = self.path + '/' + self.data_files[filenumber] img = EdfFile.EdfFile(file_with_path) header = img.GetHeader(0) metalist = [] for ind in header.keys(): if ind != 'motor_pos'\ and ind != 'motor_mne'\ and ind != 'counter_pos'\ and ind != 'counter_mne': metalist.append(header[ind]) try: metalist.extend(header['motor_pos'].split(' ')) metalist.extend(header['counter_pos'].split(' ')) except KeyError: pass return metalist
def getIndexList(self): file_with_path = self.path + '/' + self.data_files[0] img = EdfFile.EdfFile(file_with_path) header = img.GetHeader(0) indexlist = [] for ind in header.keys(): if ind != 'motor_pos'\ and ind != 'motor_mne'\ and ind != 'counter_pos'\ and ind != 'counter_mne': indexlist.append(ind) try: indexlist.extend(header['motor_mne'].split(' ')) indexlist.extend(header['counter_mne'].split(' ')) except KeyError: pass self.indexlist = indexlist
def Iq(input_file, avg): info = get_input(input_file) qtot = qpattern(info) geometry = info['geometry'] if geometry == 'gisaxs': qtot = qtot[1] wavelength = float(info['wavelength']) distance = float(info['detector sample distance']) detector = info['detector'] if detector == 'princeton' or detector == 'andor 22.5micron': pix_size = 0.0225 if detector == 'medipix': pix_size = 0.055 if detector == 'andor 13micron' or detector == 'andor': pix_size = 0.013 if detector == 'xbpm': pix_size = 0.001 deltaq = 4 * pi / wavelength * sin(arctan(2 * pix_size / distance) / 2) mask_file = info['mask file'] tot = EdfFile.EdfFile(mask_file) totmask = tot.GetData(0) + tot.GetData(1) q = qtot[totmask == 0] indq = argsort(q) q = q[indq] qr = arange(min(q), max(q) + deltaq, deltaq) m = avg[totmask == 0] m = m[indq] lqv = len(qr) radi = zeros((lqv, 2)) ini = 0 hh, bins = histogram(q, lqv, new=True) radi[:, 0] = bins[:-1] + deltaq / 2 for i in xrange(lqv): radi[i, 1] = mean(m[ini:ini + hh[i]]) ini = ini + hh[i] return radi
def _get_from_file(self, image_nb): for ref_data in self.ref_data: values = self._get_filenames(ref_data, image_nb) filename, path_in_file, image_index, file_format = values[0] if file_format in ('EDF', 'EDFGZ', 'EDFConcat'): if file_format == 'EDFConcat': image_index = 0 if EdfFile is not None: f = EdfFile.EdfFile(filename) return f.GetData(image_index) else: raise RuntimeError("EdfFile module is not available, " "cannot return image data.") elif file_format == 'HDF5': if h5py is not None: with h5py.File(filename) as f: dataset = f[path_in_file] return dataset[image_index] else: raise RuntimeError("Format net yet managed") else: raise RuntimeError("Can't retrieved image %d from file" % image_nb)