def test_mask(self): """test the generation of mask""" print self.edfPilatus data = fabio.open(self.edfPilatus).data mask = fabio.open(self.maskFile).data assert abs(self.ai.makeMask(data, mask=mask).astype(int) - fabio.open(self.maskRef).data).max() == 0 assert abs(self.ai.makeMask(data, mask=mask, dummy= -2, delta_dummy=1.1).astype(int) - fabio.open(self.maskDummy).data).max() == 0
def load_all(self): try: self.ff=os.path.join(self.root,"FILES","flatfield_%i.edf"%self.resolution) self.flatfield=fabio.open(self.ff).data.astype(np.float32) except : return False print "Error with flatfield image" try: self.d=os.path.join(self.root,"FILES","dark.edf") self.dark=fabio.open(self.d).data.astype(np.float32) except: return False print "Please provide a dark image" try: self.sf=os.path.join(self.root,"FILES","distorsion_%i.spline"%self.resolution) frelon=pyFAI.detectors.FReLoN(splineFile=self.sf) self.dist=pyFAI.distortion.Distortion(frelon) print "Calculating Distortion Look-Up Table" self.dist.calc_LUT() return True except: print "Error with distortion correction" return False
def test_invert(self): """ Tests that 2 matrixes are invert """ m1 = fabio.open(self.fn["XSDataImage.xml"]) m2 = fabio.open(self.fn["XSDataImageInv.xml"]) self.assertAlmostEqual( abs((numpy.matrix(m1.data) * numpy.matrix(m2.data)) - numpy.identity(m1.data.shape[0])).max(), 0, 3, "matrices are invert of each other")
def get_skiping_chunk(pathToFrames, nameTemplate, frameRange, chunkStart, chunkSize): """Reads only every other frame and stacks their data to a chunk. pathToFrames ... string location of the folder which contains the frames nameTemplate ... string format of the frame names frameRange ... int maximum number of frames over which to run the algorithm chunkStart ... int frame number from which to start chunkSize ... int number of frames which should be read """ # creating the file names for iteration fileNames = [] for i in range(chunkSize): fileNames.append(pathToFrames + (nameTemplate % int(i * frameRange / chunkSize + chunkStart))) # generating the base data framePrototype = fabio.open(fileNames[0]) stack = np.zeros((framePrototype.data.shape[0], framePrototype.data.shape[1], chunkSize)) # start stacking for i in range(chunkSize): print("Stacking, using " + str(fileNames[i]), end="\r") frame = fabio.open(fileNames[i]) stack[:,:,i] = frame.data.copy() del frame # freeing memory print("\nStacking complete!") return stack
def test1(self): """ Testing pilatus tif bug """ o1 = fabio.open(self.fn["pilatus2M.tif"]).data o2 = fabio.open(self.fn["pilatus2M.edf"]).data self.assertEqual(abs(o1 - o2).max(), 0.0)
def main(): module_desc = '''A simple GUI tool to create a mask for XSVS data analysis. Requires an input file with information about the data set and a static file, created with the createStatic.py script.''' parser = argparse.ArgumentParser(description=module_desc) parser.add_argument('-i','--input',dest='inputFileName', metavar='./input.txt', type=str, help='Input file describing the data',required=True) parser.add_argument('-s','--static',dest='staticFileName', metavar='./static.edf', type=str, help='Static file',required=True) args = parser.parse_args() inputFile = args.inputFileName staticFile = args.staticFileName calculator = pyxsvs.pyxsvs(inputFile) saveDir = calculator.Parameters['saveDir'] defaultMaskFile = calculator.Parameters['defaultMaskFile'] auto_mask = fabio.open(defaultMaskFile).data staticImg = fabio.open(staticFile).data masker = maskMaker(staticImg,auto_mask,saveDir) show() if masker.mask_saved: print 'Adding mask to input file' try: calculator.config.set('Main','mask',value = saveDir+'mask.edf') except: calculator.config.set('Directories','mask',value = saveDir+'mask.edf') f = open(inputFile,'w') calculator.config.write(f) f.close()
def test1(self): """ Testing packbit comressed data tif bug """ o1 = fabio.open(self.fn["a0009.tif"]).data o2 = fabio.open(self.fn["a0009.edf"]).data self.assertEqual(abs(o1 - o2).max(), 0.0)
def read_image_data(image_path): """ Returns a numpy.array image from a file name or a URL. :param str image_path: Path of the image file :rtype: numpy.ndarray :raises IOError: if the data is not reachable :raises TypeError: if the data is not an image (wrong size, wrong dimension) """ if fabio is None: raise RuntimeError("FabIO is missing") if os.path.exists(image_path): with fabio.open(image_path) as image: data = image.data elif image_path.startswith("silx:") or image_path.startswith("fabio:"): data = silx.io.get_data(image_path) elif "::" in image_path: # Could be a fabio path with fabio.open(image_path) as image: data = image.data else: raise IOError("Data from path '%s' is not supported or missing" % image_path) if len(data.shape) != 2: raise TypeError("Path identify a %dd-array, but a 2d is array is expected" % (image_path, len(data.shape))) if data.dtype.kind not in "fui": raise TypeError("Path identify an %s-kind array, but a numerical kind is expected" % (image_path, data.dtype.kind)) return data
def __init__(self, detector, image, mask=None, pitch=None, invert=False): """ :param detector: instance of Detector or its name :param image: 2d array representing the image :param mask: :param pitch: 2-tuple representing the grid spacing in (y, x) coordinates, in meter :param invert: set to true if the image of the grid has regular dark spots (instead of bright points) """ if isinstance(detector, detectors.Detector): self.detector = detectors.detector_factory(detector) else: self.detector = detector if isinstance(image, numpy.ndarray): self.image = image else: self.image = fabio.open(image).data if mask is not None: if isinstance(mask, numpy.ndarray): self.mask = mask else: self.mask = fabio.open(mask).data.astype(bool) if self.detector.mask is not None: self.mask = numpy.logical_or(self.detector.mask, self.mask) else: self.mask = numpy.zeros_like(self.image, bool) if invert: self.image = self.image.max() - self.image self.pitch = tuple(pitch[0], pitch[-1])
def get_sequential_chunk(pathToFrames, nameTemplate, chunkStart, chunkSize): """Reads a defined number of frames in a squential manner into an array. pathToFrames ... string location of the folder which contains the frames nameTemplate ... string format of the frame names chunkStart ... int frame number from which to start chunkSize ... int number of frames which should be read returns numpy.array3d of all the frame data """ # creating the file names for iteration fileNames = [] for i in range(chunkSize): fileNames.append(pathToFrames + (nameTemplate % (i + chunkStart))) # generating the base data framePrototype = fabio.open(fileNames[0]) stack = np.zeros((framePrototype.data.shape[0], framePrototype.data.shape[1], chunkSize)) # start stacking for i in range(chunkSize): print("Stacking, using " + str(fileNames[i]), end="\r") frame = fabio.open(fileNames[i]) stack[:,:,i] = frame.data.copy() del frame # freeing memory print("\nStacking complete!") return stack
def set_ai(self): poni = str(self.poni.text()).strip() if poni and os.path.isfile(poni): self.ai = pyFAI.load(poni) detector = str(self.detector.currentText()).lower().strip() or "detector" self.ai.detector = pyFAI.detectors.detector_factory(detector) wavelength = str(self.wavelength.text()).strip() if wavelength: try: fwavelength = float(wavelength) except ValueError: logger.error("Unable to convert wavelength to float: %s" % wavelength) else: if fwavelength <= 0 or fwavelength > 1e-6: logger.warning("Wavelength is in meter ... unlikely value %s" % fwavelength) self.ai.wavelength = fwavelength splineFile = str(self.splineFile.text()).strip() if splineFile and os.path.isfile(splineFile): self.ai.detector.splineFile = splineFile self.ai.pixel1 = self._float("pixel1", 1) self.ai.pixel2 = self._float("pixel2", 1) self.ai.dist = self._float("dist", 1) self.ai.poni1 = self._float("poni1", 0) self.ai.poni2 = self._float("poni2", 0) self.ai.rot1 = self._float("rot1", 0) self.ai.rot2 = self._float("rot2", 0) self.ai.rot3 = self._float("rot3", 0) if self.chi_discontinuity_at_0.isChecked(): self.ai.setChiDiscAtZero() mask_file = str(self.mask_file.text()).strip() if mask_file and os.path.exists(mask_file) and bool(self.do_mask.isChecked()): try: mask = fabio.open(mask_file).data except Exception as error: logger.error("Unable to load mask file %s, error %s" % (mask_file, error)) else: self.ai.mask = mask dark_files = [i.strip() for i in str(self.dark_current.text()).split(",") if os.path.isfile(i.strip())] if dark_files and bool(self.do_dark.isChecked()): d0 = fabio.open(dark_files[0]).data darks = numpy.zeros(d0.shape[0], d0.shape[1], len(dark_files), dtype=numpy.float32) for i, f in enumerate(dark_files): darks[:, :, i] = fabio.open(f).data self.ai.darkcurrent = darks.mean(axis= -1) flat_files = [i.strip() for i in str(self.flat_field.text()).split(",") if os.path.isfile(i.strip())] if flat_files and bool(self.do_flat.isChecked()): d0 = fabio.open(flat_files[0]).data flats = numpy.zeros(d0.shape[0], d0.shape[1], len(flat_files), dtype=numpy.float32) for i, f in enumerate(flat_files): flats[:, :, i] = fabio.open(f).data self.ai.darkcurrent = flats.mean(axis= -1) print self.ai
def SavGol_filter(pathToFrames, nameTemplate, frameRange, pathToFiltered, namePrefix, maskFrame, subsize, windowLength, polyOrder): """Subtracts a flux normalized frame from a dataset. pathToFrames ... string location of the folder which contains the frames pathToSubtracted ... string location where the processed frames should be saved namePrefix ... string short text that is added to each newly calculated frame single ... fabio.frame this frame will be substracted from the dataset maskFrame ... fabio.frame frame which contains all pixel that should be masked """ helping_tools.check_folder(pathToFiltered) print("Reading masks, please wait!") # maskUntrusted, maskDefective, maskHot = cbf_tools.generate_all_unwanted_pixel(maskFrame, 1000000) print("starting filtering\n") # generating frame paths and names for reading frameset = cbf_tools.Frameset(pathToFrames, nameTemplate) frameset.setSize = frameRange fileNames = frameset.generate_frame_names_from_template() # generating frame paths and names for writing frameset = cbf_tools.Frameset(pathToFiltered, namePrefix + nameTemplate) frameset.setSize = frameRange newFiles = frameset.generate_frame_names_from_template() templateFrame = fabio.open(fileNames[0]) # determination of how many tiles are necessary for the subdivition of the frames tilesx = int(templateFrame.data.shape[0] / subsize) + 1 tilesy = int(templateFrame.data.shape[1] / subsize) + 1 for subx in range(tilesx): for suby in range(tilesy): print("\nWorking on sub %i of %i" % ((suby * (subx + 1) + subx), tilesx * tilesy)) # generation of the subframe size taking the border regions into account if (subx + 2) > tilesx: width = templateFrame.data.shape[0] - subx * subsize else: width = subsize if (suby + 2) > tilesy: height = templateFrame.data.shape[1] - suby * subsize else: height = subsize print("Width %i, height %i" % (width, height)) subFrame = np.zeros((width, height, frameRange)) for i in range(frameRange): print("Reading frame " + fileNames[i])#, end="\r") frame = fabio.open(fileNames[i]) subFrame[:, : , i] = frame.data[subx * subsize : subx * subsize + width, suby * subsize : suby * subsize + height].copy() del frame # cleaning memory print("\nApplying SavGol filter...") for x in range(subFrame.shape[0]): for y in range(subFrame.shape[1]): print(x, y, end="\r") filterLine = subFrame[x, y, :] subFrame[x, y, :] = scipy.signal.savgol_filter(filterLine, windowLength, polyOrder, mode='wrap').copy() subframe = subFrame.astype(np.int32) for i in range(frameRange): print("Writing frame " + newFiles[i])#, end="\r") frame = fabio.open(newFiles[i]) frame.data[subx * subsize : subx * subsize + width, suby * subsize : suby * subsize + height] = subFrame[:, : , i] frame.save(os.path.join(pathToFiltered, namePrefix + os.path.basename(frame.filename))) del frame # cleaning memory print("\nDone!")
def test_mask(self): img = fabio.open(UtilsTest.getimage("Pilatus1M.f2d.bz2")) cbf = fabio.open(UtilsTest.getimage("Pilatus1M.cbf.bz2")) msk = fabio.open(UtilsTest.getimage("Pilatus1M.msk.bz2")) diff = abs((img.data).astype("int32") - cbf.data) self.assertEqual(diff.sum(), 0) diff = abs((msk.data).astype("int32") - img.header["data_mask"].astype("int32")) self.assertEqual(diff.sum(), 0)
def run(self): # MAIN_LOCK.acquire() edf = join(self.edf_folder, self.edf_basename) data = fabio.open(edf).data header = fabio.open(edf).header # stdout.write("Loading %s\n"%self.edf_basename) # stdout.flush() self.Data = (self.order,data,header)
def test_mask(self): """test the generation of mask""" ai = AzimuthalIntegrator(detector="Pilatus1M") ai.wavelength = 1e-10 data = fabio.open(self.edfPilatus).data mask = fabio.open(self.maskFile).data self.assertTrue(abs(ai.create_mask(data, mask=mask).astype(int) - fabio.open(self.maskRef).data).max() == 0, "test without dummy")
def test_same(self): """test if images are actually the same""" o1 = fabio.open(self.fn["b191_1_9_1.img"]) o2 = fabio.open(self.fn["b191_1_9_1_uncompressed.img"]) for attr in ["getmin", "getmax", "getmean", "getstddev"]: a1 = getattr(o1, attr)() a2 = getattr(o2, attr)() self.assertEqual(a1, a2, "testing %s: %s | %s" % (attr, a1, a2))
def get_scan_data_process(queue, i, edf_folder, edf_basename): edf = join(edf_folder, edf_basename) data = fabio.open(edf).data header = fabio.open(edf).header stdout.write("Loading %s\n"%edf_basename) stdout.flush() DATA_out = (i,data,header) queue.put(DATA_out)
def loadimage(filepath=None): """ Loads images Parameters ---------- filepath: str or list of str or tuple of str If str, load image, else if iterable of str load images and sum them together Returns ------- ndarray: The image str: Name of the chi file str: File(s) loaded """ if filepath is None: print 'Open Image' filepath = tkFileDialog.askopenfilenames() print filepath if type(filepath) is tuple or type(filepath) is list: xray_image = None for path in filepath: if path.endswith('.tif'): if xray_image is None: if os.path.exists(path): try: xray_image = fabio.open(path).data except AttributeError: print 'File did not load, please double ' \ 'check file. Continuing without ' +path pass else: print '%s not found' % (path) else: try: xray_image += fabio.open(path).data except AttributeError: print 'File did not load, please double ' \ 'check ' \ 'file. Continuing ' \ 'without ' +path if len(filepath) >1: file_path_name = os.path.splitext(filepath[-1])[0]+'_Sum' else: file_path_name = os.path.splitext(filepath[-1])[0] else: print filepath xray_image = fabio.open(filepath).data file_path_name = os.path.splitext(filepath)[0] return xray_image, file_path_name, filepath
def parse(self): logger.debug("in parse") usage = "usage: %prog [options] -p param.poni image.edf" description = """Check_calib is a research tool aiming at validating both the geometric calibration and everything else like flat-field correction, distortion correction. Maybe the future lies over there ... """ parser = OptionParser(usage=usage, version="%prog from pyFAI version " + PyFAI_VERSION, description=description) parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="switch to debug mode") parser.add_option("-d", "--dark", dest="dark", metavar="FILE", help="file containing the dark images to subtract", default=None) parser.add_option("-f", "--flat", dest="flat", metavar="FILE", help="file containing the flat images to divide", default=None) parser.add_option("-m", "--mask", dest="mask", metavar="FILE", help="file containing the mask", default=None) parser.add_option("-p", "--poni", dest="poni", metavar="FILE", help="file containing the diffraction parameter (poni-file)", default=None) parser.add_option("-e", "--energy", dest="energy", type="float", help="energy of the X-Ray beam in keV (hc=%skeV.A)" % hc, default=None) parser.add_option("-w", "--wavelength", dest="wavelength", type="float", help="wavelength of the X-Ray beam in Angstrom", default=None) (options, args) = parser.parse_args() if options.verbose: logger.setLevel(logging.DEBUG) if options.mask is not None: self.mask = (fabio.open(options.mask).data != 0) args = expand_args(args) if len(args) > 0: f = args[0] if os.path.isfile(f): self.img = fabio.open(f).data.astype(numpy.float32) else: print("Please enter diffraction images as arguments") sys.exit(1) for f in args[1:]: self.img += fabio.open(f).data if options.dark and os.path.exists(options.dark): self.img -= fabio.open(options.dark).data if options.flat and os.path.exists(options.flat): self.img /= fabio.open(options.flat).data if options.poni: self.ai = AzimuthalIntegrator.sload(options.poni) self.data = [f for f in args if os.path.isfile(f)] if options.poni is None: logger.error("PONI parameter is mandatory") sys.exit(1) self.ai = AzimuthalIntegrator.sload(options.poni) if options.wavelength: self.ai.wavelength = 1e-10 * options.wavelength elif options.energy: self.ai.wavelength = 1e-10 * hc / options.energy
def test_remove_metadata_header(self): filename = UtilsTest.getimage("face.edf.bz2")[0:-4] output_filename = os.path.join(UtilsTest.tempdir, "test_remove_metadata_header.edf") image = fabio.open(filename) del image.header["Dim_1"] image.write(output_filename) image2 = fabio.open(output_filename) self.assertEqual(image.shape, image2.shape)
def setUp(self): """Download files""" self.fit2dFile = UtilsTest.getimage(self.__class__.fit2d_cor) self.halfFrelon = UtilsTest.getimage(self.__class__.halfFrelon) self.splineFile = UtilsTest.getimage(self.__class__.splineFile) self.det = detectors.FReLoN(self.splineFile) self.dis = _distortion.Distortion(self.det) self.fit2d = fabio.open(self.fit2dFile).data self.raw = fabio.open(self.halfFrelon).data
def test_match(self): """ test edf and msk are the same """ i = fabio.open(self.filename) j = fabio.open(self.tiffilename) i.read(self.filename) self.assertEqual(i.data.shape, j.data.shape) diff = j.data - numpy.flipud(i.data) sumd = abs(diff).sum(dtype=float) self.assertEqual(sumd, 0)
def testExecute(self): """ """ self.run() xsdout = self.getPlugin().getDataOutput().marshal() print self.getReferenceDataOutputFile() xsdRef = XSDataResultStitchOffsetedImage.parseString(self.readAndParseFile(self.getReferenceDataOutputFile())).marshal() EDAssert.strAlmostEqual(xsdRef, xsdout, "Xsd are the same") # fabio.edfimage.edfimage(data=fabio.open(self.outFile).data - fabio.open(self.refFile).data).write("/tmp/delta.edf") EDAssert.arraySimilar(fabio.open(self.outFile).data, fabio.open(self.refFile).data, "Arrays are the same", _fAbsMaxDelta=1)
def parse(self): logger.debug("in parse") parser = OptionParser() parser.add_option("-V", "--version", dest="version", action="store_true", help="print version of the program and quit", metavar="FILE", default=False) parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="switch to debug mode") parser.add_option("-d", "--dark", dest="dark", metavar="FILE", help="file containing the dark images to subtract", default=None) parser.add_option("-f", "--flat", dest="flat", metavar="FILE", help="file containing the flat images to divide", default=None) parser.add_option("-m", "--mask", dest="mask", metavar="FILE", help="file containing the mask", default=None) parser.add_option("-p", "--poni", dest="poni", metavar="FILE", help="file containing the diffraction parameter (poni-file)", default=None) parser.add_option("-e", "--energy", dest="energy", type="float", help="energy of the X-Ray beam in keV (hc=%skeV.A)" % hc, default=None) parser.add_option("-w", "--wavelength", dest="wavelength", type="float", help="wavelength of the X-Ray beam in Angstrom", default=None) (options, args) = parser.parse_args() if options.verbose: logger.setLevel(logging.DEBUG) if options.version: print("Check calibrarion: version %s" % version) sys.exit(0) if options.mask is not None: self.mask = (fabio.open(options.mask).data != 0) args = expand_args(args) if len(args) > 0: f = args[0] if os.path.isfile(f): self.img = fabio.open(f).data.astype(numpy.float32) else: print("Please enter diffraction images as arguments") sys.exit(1) for f in args[1:]: self.img += fabio.open(f).data if options.dark and os.path.exists(options.dark): self.img -= fabio.open(options.dark).data if options.flat and os.path.exists(options.flat): self.img /= fabio.open(options.flat).data if options.poni: self.ai = AzimuthalIntegrator.sload(options.poni) self.data = [f for f in args if os.path.isfile(f)] if options.poni is None: logger.error("PONI parameter is mandatory") sys.exit(1) self.ai = AzimuthalIntegrator.sload(options.poni) if options.wavelength: self.ai.wavelength = 1e-10 * options.wavelength elif options.energy: self.ai.wavelength = 1e-10 * hc / options.energy
def integrate1d(file_list): """ """ base_dir = raw_input("Enter Base Directory: ") poni = base_dir + "/" + raw_input("Enter PONI-File (relative to Base Directory): ") mask = base_dir + "/" + raw_input("Enter MASK-File(relative to Base Directory): ") savename = base_dir + "/" + raw_input("ENTER File name to save the data (relative to Base Directory): ") +".edf" qmin = 62.83185195922852 * float(raw_input("Enter the minimum s-value (A): ")) qmax = 62.83185195922852 * float(raw_input("Enter the maximum s-value (A): ")) azmin = float(raw_input("Enter minimum azimutal angle (deg): ")) azmax = float(raw_input("Enter maximum azimutal angle (deg): ")) npt = int(raw_input("Enter number of points along s axis: ")) ai = pyFAI.azimuthalIntegrator.AzimuthalIntegrator() ai.load(poni) ai.set_maskfile(mask) I_arr = np.zeros((len(file_list)+1, npt)) for i in range(len(file_list)): data = fabio.open(file_list[i]).data q, I = ai.integrate1d(data, npt=npt, # filename=filename correctSolidAngle=False, variance=None, error_model=None, radial_range=(qmin, qmax), azimuth_range=(azmin, azmax), # mask=None, dark=None, flat=None, dummy=-5, method="bbox", unit="q_nm^-1", safe=True, normalization_factor=None) I[I == -5] = np.nan I_arr[i+1,] = I T_arr = I_arr.T q = q/62.83185195922852 I_arr[0,] = q img = fabio.open(file_list[0]) img.data = I_arr print "Save integrated 2D Data to:", savename img.write(savename)
def test_read(self): """ check whole reader""" times = [] times.append(time.time()) cbf = fabio.open(self.cbf_filename) times.append(time.time()) edf = fabio.open(self.edf_filename) times.append(time.time()) self.assertAlmostEqual(0, abs(cbf.data - edf.data).max()) logger.info("Reading CBF took %.3fs whereas the same EDF took %.3fs" % (times[1] - times[0], times[2] - times[1]))
def __getitem__(self, key): if self.singleframes: imgf = self._files[key] img = fabio.open(imgf) else: (fnum, frame) = self._file_and_frame(key) imgf = self._files[fnum] img0 = fabio.open(imgf) img = img0.getframe(frame) return img.data
def testheader(self): file2 = self.file1.replace("mb_LP_1_001.img", "mb_LP_1_002.img") self.assertTrue(os.path.exists(self.file1)) if not os.path.exists(file2): shutil.copy(self.file1, file2) image1 = fabio.open(self.file1) image2 = fabio.open(file2) self.assertEqual(self.abs_norm(image1.filename), self.abs_norm(self.file1)) self.assertEqual(self.abs_norm(image2.filename), self.abs_norm(file2)) self.assertNotEqual(image1.filename, image2.filename)
def setUp(self): """Download files""" self.fit2dFile = UtilsTest.getimage(self.__class__.fit2d_cor) self.halfFrelon = UtilsTest.getimage(self.__class__.halfFrelon) self.splineFile = UtilsTest.getimage(self.__class__.splineFile) self.det = detectors.FReLoN(self.splineFile) self.fit2d = fabio.open(self.fit2dFile).data self.ref = _distortion.Distortion(self.det) self.raw = fabio.open(self.halfFrelon).data self.dis = distortion.Distortion(self.det) self.larger = numpy.zeros(self.det.shape) self.larger[:-1, :] = self.raw
def test_edf_to_tiff(self): tmpdir = os.path.join(UtilsTest.tempdir, self.id()) os.mkdir(tmpdir) filename = UtilsTest.getimage("face.edf") output_filename = os.path.join(tmpdir, "face.tif") image = fabio.open(filename) image2 = image.convert("tiff") image2.save(output_filename) self.assertEqual(image.shape, image2.shape) image3 = fabio.open(output_filename) self.assertEqual(image.shape, image3.shape)
def lmw(im, threshold, out=None, tmp=None, nthreads=4): im = np.ascontiguousarray(im, dtype=np.float32) if out is None: out = np.zeros(im.shape, np.int32) if tmp is None: tmp = np.zeros(im.shape, np.int8) n = _lmw(im.ctypes.data, out.ctypes.data, tmp.ctypes.data, threshold, nthreads, im.shape[0], im.shape[1]) return out, n if __name__ == "__main__": import fabio, pylab as pl, time, os import scipy.ndimage im = fabio.open( "/data/id11/jon/1607/spottysucr/spottysucr0000.edf").data.astype( np.float32).copy() im = scipy.ndimage.gaussian_filter(im, 2).astype(np.float32) out = np.zeros(im.shape, np.int32) tmp = np.zeros(im.shape, np.uint8) print(im.shape) start = time.time() l, n = lmw(im, 0, out=out, tmp=tmp) print(time.time() - start, n) import multiprocessing for i in range(1, multiprocessing.cpu_count() + 1): l, n = lmw(im, 0, nthreads=i, out=out, tmp=tmp) start = time.time() l, n = lmw(im, 0, nthreads=i, out=out, tmp=tmp) end = time.time() print("%3d %7.3f ms %7.3f fps %7.3f fps/core" %
arrowprops=dict(facecolor='red', shrink=0.05), ) if __name__ == "__main__": kx = [] ky = [] k2x = [] k2y = [] dx = [] dy = [] import fabio, pylab # img = fabio.open("../test/testimages/LaB6_0003.mar3450").data # img = fabio.open("../test/testimages/grid2k0000.edf").data img = fabio.open("../test/testimages/halfccd.edf").data img = numpy.log1p(img) # img = img[img.shape[0]/2-256:img.shape[0]/2+256,img.shape[1]/2-256:img.shape[1]/2+256] # img = image_test() bd = BlobDetection(img) kx, ky, dx, dy, sigma = bd._one_octave() print(bd.sigmas) # # #building histogram with the corrected sigmas # sigma = numpy.asarray(sigma) # pylab.figure(2) # pylab.clf() # pylab.hist(sigma, bins=500) # pylab.show() #
def proceed(self): with self._sem: out = None config = self.dump() logger.debug("Let's work a bit") ai = worker.make_ai(config) # Default Keyword arguments kwarg = { "unit": self.__get_unit(), "dummy": self.__get_dummy(), "delta_dummy": self.__get_delta_dummy(), "polarization_factor": self.__get_polarization_factor(), "filename": None, "safe": False, "correctSolidAngle": self.__get_correct_solid_angle(), "error_model": self.__get_error_model(), "method": self.get_method(), "npt_rad": self.__get_nbpt_rad(), } if kwarg["npt_rad"] is None: message = "You must provide the number of output radial bins !" QtGui.QMessageBox.warning(self, "PyFAI integrate", message) return {} if self.do_2D.isChecked(): kwarg["npt_azim"] = self.__get_nbpt_azim() if self.do_radial_range.isChecked(): kwarg["radial_range"] = self.__get_radial_range() if self.do_azimuthal_range.isChecked(): kwarg["azimuth_range"] = self.__get_azimuth_range() logger.info("Parameters for integration:%s%s" % (os.linesep, os.linesep.join( ["\t%s:\t%s" % (k, v) for k, v in kwarg.items()]))) logger.debug("processing %s" % self.input_data) start_time = time.time() if self.input_data in [None, []]: logger.warning("No input data to process") return elif "ndim" in dir(self.input_data) and self.input_data.ndim == 3: # We have a numpy array of dim3 w = worker.Worker(azimuthalIntgrator=ai) try: w.nbpt_rad = self.__get_nbpt_rad() w.unit = self.__get_unit() w.dummy = self.__get_dummy() w.delta_dummy = self.__get_delta_dummy() w.polarization_factor = self.__get_polarization_factor() # NOTE: previous implementation was using safe=False, the worker use safe=True w.correct_solid_angle = self.__get_correct_solid_angle() w.error_model = self.__get_error_model() w.method = self.get_method() w.is_safe = False if self.do_2D.isChecked(): w.nbpt_azim = self.__get_nbpt_azim() else: w.nbpt_azim = 1 w.radial_range = self.__get_radial_range() w.azimuth_range = self.__get_azimuth_range() except RuntimeError as e: QtGui.QMessageBox.warning(self, "PyFAI integrate", e.message + ". Action aboreded.") return {} if self.do_2D.isChecked(): out = numpy.zeros( (self.input_data.shape[0], w.nbpt_azim, w.nbpt_rad), dtype=numpy.float32) for i in range(self.input_data.shape[0]): self.progressBar.setValue(100.0 * i / self.input_data.shape[0]) data = self.input_data[i] out[i] = w.process(data) else: out = numpy.zeros((self.input_data.shape[0], w.nbpt_rad), dtype=numpy.float32) for i in range(self.input_data.shape[0]): self.progressBar.setValue(100.0 * i / self.input_data.shape[0]) data = self.input_data[i] result = w.process(data) result = result.T[1] out[i] = result elif "__len__" in dir(self.input_data): out = [] if self.hdf5_path: import h5py hdf5 = h5py.File(self.output_path) if self.fast_dim: if "npt_azim" in kwarg: _ds = hdf5.create_dataset( "diffraction", (1, self.fast_dim, kwarg["npt_azim"], kwarg["npt_rad"]), dtype=numpy.float32, chunks=(1, self.fast_dim, kwarg["npt_azim"], kwarg["npt_rad"]), maxshape=(None, self.fast_dim, kwarg["npt_azim"], kwarg["npt_rad"])) else: _ds = hdf5.create_dataset( "diffraction", (1, self.fast_dim, kwarg["npt_rad"]), dtype=numpy.float32, chunks=(1, self.fast_dim, kwarg["npt_rad"]), maxshape=(None, self.fast_dim, kwarg["npt_rad"])) else: if "npt_azim" in kwarg: _ds = hdf5.create_dataset( "diffraction", (1, kwarg["npt_azim"], kwarg["npt_rad"]), dtype=numpy.float32, chunks=(1, kwarg["npt_azim"], kwarg["npt_rad"]), maxshape=(None, kwarg["npt_azim"], kwarg["npt_rad"])) else: _ds = hdf5.create_dataset( "diffraction", (1, kwarg["npt_rad"]), dtype=numpy.float32, chunks=(1, kwarg["npt_rad"]), maxshape=(None, kwarg["npt_rad"])) for i, item in enumerate(self.input_data): self.progressBar.setValue(100.0 * i / len(self.input_data)) logger.debug("processing %s" % item) if isinstance( item, (six.text_type, six.binary_type)) and op.exists(item): fab_img = fabio.open(item) multiframe = (fab_img.nframes > 1) kwarg["data"] = fab_img.data if self.hdf5_path is None: if self.output_path and op.isdir(self.output_path): outpath = op.join( self.output_path, op.splitext(op.basename(item))[0]) else: outpath = op.splitext(item)[0] if "npt_azim" in kwarg and not multiframe: kwarg["filename"] = outpath + ".azim" else: kwarg["filename"] = outpath + ".dat" else: logger.warning( "item is not a file ... guessing it is a numpy array" ) kwarg["data"] = item kwarg["filename"] = None multiframe = False if multiframe: if kwarg["filename"]: outpath = op.splitext(kwarg["filename"])[0] kwarg["filename"] = None writer = HDF5Writer(outpath + "_pyFAI.h5") writer.init(config) for i in range(fab_img.nframes): kwarg["data"] = fab_img.getframe(i).data if "npt_azim" in kwarg: res = ai.integrate2d(**kwarg) else: if "npt_rad" in kwarg: # convert npt_rad -> npt kwarg["npt"] = kwarg.pop("npt_rad") res = ai.integrate1d(**kwarg) writer.write(res, index=i) writer.close() else: if kwarg.get("npt_azim"): res = ai.integrate2d(**kwarg) else: if "npt_rad" in kwarg: # convert npt_rad -> npt kwarg["npt"] = kwarg.pop("npt_rad") res = ai.integrate1d(**kwarg) out.append(res) #TODO manage HDF5 stuff !!! logger.info("Processing Done in %.3fs !" % (time.time() - start_time)) self.progressBar.setValue(100) self.die() return out
def setUp(self): im = fabio.open("testoverlaps0000.edf").data.astype( np.float32 ) self.a = np.where( im > 2000.1, im, 0 ) self.s = scipy.sparse.coo_matrix( self.a ) self.threshold = 2000.1
def setup(model): usage = "pyFAI-calib2 [options] input_image.edf" parser = ArgumentParser(usage=usage, description=description, epilog=epilog) version = "calibration from pyFAI version %s: %s" % (pyFAI.version, pyFAI.date) parser.add_argument("-V", "--version", action='version', version=version) configure_parser_arguments(parser) # Analyse aruments and options options = parser.parse_args() args = options.args if options.debug: logging.root.setLevel(logging.DEBUG) # Settings settings = model.experimentSettingsModel() if options.spacing: calibrant = None if options.spacing in pyFAI.calibrant.CALIBRANT_FACTORY: calibrant = pyFAI.calibrant.CALIBRANT_FACTORY(options.spacing) elif os.path.isfile(options.spacing): calibrant = pyFAI.calibrant.Calibrant(options.spacing) else: logger.error("No such Calibrant / d-Spacing file: %s", options.spacing) if calibrant: settings.calibrantModel().setCalibrant(calibrant) if options.wavelength: value = units.convert(options.wavelength, units.Unit.ANGSTROM, units.Unit.METER_WL) settings.wavelength().setValue(value) if options.energy: value = units.convert(options.energy, units.Unit.ENERGY, units.Unit.METER_WL) settings.wavelength().setValue(value) if options.polarization_factor: settings.polarizationFactor(options.polarization_factor) if options.detector_name: detector = pyFAI.gui.cli_calibration.get_detector( options.detector_name, args) if options.pixel: logger.warning( "Detector model already specified. Pixel size argument ignored." ) elif options.pixel: pixel_size = parse_pixel_size(options.pixel) detector = pyFAI.detectors.Detector(pixel1=pixel_size[0], pixel2=pixel_size[0]) else: detector = None if options.spline: if detector is None: detector = pyFAI.detectors.Detector(splineFile=options.spline) elif detector.__class__ is pyFAI.detectors.Detector or detector.HAVE_TAPER: detector.set_splineFile(options.spline) else: logger.warning( "Spline file not supported with this kind of detector. Argument ignored." ) settings.detectorModel().setDetector(detector) if options.mask: settings.maskFile().setValue(options.mask) with fabio.open(options.mask) as mask: settings.mask().setValue(mask.data) if len(args) == 0: pass elif len(args) == 1: image_file = args[0] settings.imageFile().setValue(image_file) with fabio.open(image_file) as image: settings.image().setValue(image.data) else: logger.error("Too much images provided. Only one is expected") # Geometry # FIXME it will not be used cause the fitted geometry will be overwrited geometry = model.fittedGeometry() if options.distance: geometry.distance().setValue(1e-3 * options.distance) if options.dist: geometry.distance().setValue(options.dist) if options.dist: geometry.poni1().setValue(options.poni1) if options.dist: geometry.poni2().setValue(options.poni2) if options.dist: geometry.rotation1().setValue(options.rot1) if options.dist: geometry.rotation2().setValue(options.rot2) if options.dist: geometry.rotation3().setValue(options.rot3) # Constraints constraints = model.geometryConstraintsModel() if options.fix_wavelength is not None: constraints.wavelength().setFixed(options.fix_wavelength) if options.fix_dist is not None: constraints.distance().setFixed(options.fix_dist) if options.fix_poni1 is not None: constraints.poni1().setFixed(options.fix_poni1) if options.fix_poni2 is not None: constraints.poni2().setFixed(options.fix_poni2) if options.fix_rot1 is not None: constraints.rotation1().setFixed(options.fix_rot1) if options.fix_rot2 is not None: constraints.rotation2().setFixed(options.fix_rot2) if options.fix_rot3 is not None: constraints.rotation3().setFixed(options.fix_rot3) integrationSettingsModel = model.integrationSettingsModel() npt = None if options.npt_1d is not None: npt = options.npt_1d if options.npt_2d_rad is not None: if npt is not None: logger.error( "Both --npt and --npt-rad defined. The biggest is used.") npt = max(npt, options.npt_2d_rad) if npt is not None: integrationSettingsModel.nPointsRadial().setValue(npt) else: integrationSettingsModel.nPointsRadial().setValue(1024) if options.npt_2d_azim is not None: integrationSettingsModel.nPointsAzimuthal().setValue( options.npt_2d_azim) else: integrationSettingsModel.nPointsAzimuthal().setValue(360) # Integration if options.unit: unit = pyFAI.units.to_unit(options.unit) integrationSettingsModel.radialUnit().setValue(unit) if options.outfile: logger.error("outfile option not supported") if options.debug: logger.error("debug option not supported") if options.reconstruct: logger.error("reconstruct option not supported") if options.gaussian: logger.error("gaussian option not supported") if options.square: logger.error("square option not supported") if options.pixel: logger.error("pixel option not supported") # FIXME poni file should be supported if options.poni: logger.error("poni option not supported") if options.background: logger.error("background option not supported") if options.dark: logger.error("dark option not supported") if options.flat: logger.error("flat option not supported") if options.filter: logger.error("filter option not supported") if options.tilt: logger.error("tilt option not supported") if options.saturation: logger.error("saturation option not supported") if options.weighted: logger.error("weighted option not supported") if options.gui is not True: logger.error("gui option not supported") if options.interactive is not True: logger.error("interactive option not supported")
def get_data(url): """Returns a numpy data from an URL. Examples: >>> # 1st frame from an EDF using silx.io.open >>> data = silx.io.get_data("silx:/users/foo/image.edf::/scan_0/instrument/detector_0/data[0]") >>> # 1st frame from an EDF using fabio >>> data = silx.io.get_data("fabio:/users/foo/image.edf::[0]") Yet 2 schemes are supported by the function. - If `silx` scheme is used, the file is opened using :meth:`silx.io.open` and the data is reach using usually NeXus paths. - If `fabio` scheme is used, the file is opened using :meth:`fabio.open` from the FabIO library. No data path have to be specified, but each frames can be accessed using the data slicing. This shortcut of :meth:`silx.io.open` allow to have a faster access to the data. .. seealso:: :class:`silx.io.url.DataUrl` :param Union[str,silx.io.url.DataUrl]: A data URL :rtype: Union[numpy.ndarray, numpy.generic] :raises ImportError: If the mandatory library to read the file is not available. :raises ValueError: If the URL is not valid or do not match the data :raises IOError: If the file is not found or in case of internal error of :meth:`fabio.open` or :meth:`silx.io.open`. In this last case more informations are displayed in debug mode. """ if not isinstance(url, silx.io.url.DataUrl): url = silx.io.url.DataUrl(url) if not url.is_valid(): raise ValueError("URL '%s' is not valid" % url.path()) if not os.path.exists(url.file_path()): raise IOError("File '%s' not found" % url.file_path()) if url.scheme() == "silx": data_path = url.data_path() data_slice = url.data_slice() with open(url.file_path()) as h5: if data_path not in h5: raise ValueError("Data path from URL '%s' not found" % url.path()) data = h5[data_path] if not silx.io.is_dataset(data): raise ValueError("Data path from URL '%s' is not a dataset" % url.path()) if data_slice is not None: data = data[data_slice] else: # works for scalar and array data = data[()] elif url.scheme() == "fabio": import fabio data_slice = url.data_slice() if data_slice is None: data_slice = (0, ) if data_slice is None or len(data_slice) != 1: raise ValueError( "Fabio slice expect a single frame, but %s found" % data_slice) index = data_slice[0] if not isinstance(index, int): raise ValueError( "Fabio slice expect a single integer, but %s found" % data_slice) try: fabio_file = fabio.open(url.file_path()) except Exception: logger.debug("Error while opening %s with fabio", url.file_path(), exc_info=True) raise IOError( "Error while opening %s with fabio (use debug for more information)" % url.path()) if fabio_file.nframes == 1: if index != 0: raise ValueError( "Only a single frame availalbe. Slice %s out of range" % index) data = fabio_file.data else: data = fabio_file.getframe(index).data # There is no explicit close fabio_file = None else: raise ValueError("Scheme '%s' not supported" % url.scheme()) return data
# loop over the range of edfs from first to last frame with step n for i in range(f,l,n): fn = "%s%04d.edf"%(stem,i+m) # set the filename # loop until a valid filename is found while not os.path.isfile(fn) and m < 10: # print the filename that does not exist print("%s does not exist" % fn) # increase correction by one because the frame does not exist m = m + 1 # set the new filename now that m = m + 1 fn = "%s%04d.edf"%(stem,i+m) if m > 9: print ("Stopping, as too many files do not exist") break # break when too many filenames are missing im = fabio.open(fn) # read the frame data s = im.data.astype(np.float32) # assign floating point data to s # used to determine the average lo = s.copy() # copy for determining minimum hi = s.copy() # copy for determining maximum for j in range(n): # loop over a cycle of frames fn = "%s%04d.edf" % (stem,i+j+m) # set frame filename while not os.path.isfile(fn) and m < 10: # check whether filename exists # file does not exist, increase m and try again m = m + 1 # print filename that does not exist print("%s does not exist" % fn) # set new filename fn = "%s%04d.edf" % (stem,i+j+m) if m > 9: print ("Stopping, as too many files do not exist")
det_ini_angle=det_ini_angle, det_angle_step=det_angle_step) SMI.open_data(path, ['SR_D42_wa13.0_sdd3m_16.1keV_up_000001_SAXS.tif']) plt.figure() plt.imshow(SMI.imgs[0] * np.logical_not(SMI.masks[0]), vmin=0, vmax=20) plt.show() elif Geometry == 'nexus': import numpy as np import fabio from smi_analysis import export # path = 'C:\\Users\\gfreychet\\Desktop\\smi_data\\NEXUS_saving\\' img = fabio.open(os.path.join(path, 'q_map_ZG_Z5I_ai0.200deg_.tiff')).data qpar = np.loadtxt(os.path.join(path, 'qpar_ZG_Z5S_ai0.200deg_.txt')) qver = np.loadtxt(os.path.join(path, 'qver_ZG_Z5S_ai0.200deg_.txt')) export.store_saxs_2d(path=path, filename='test_package.hdf5', img=img, qpar=qpar, qver=qver) elif Geometry == 'giwaxs_vert': geometry, sdd, wav, alphai, center, bs_pos, detector = 'Reflection', 273.9, 1E-10 * ( 12.39842 / 2.445), np.deg2rad(7.7), [86., 1300], [[35, 1250] ], 'Pilatus300kw'
def test_single_frame(self): image = fabio.open(self.single_frame) self.assertEqual(image.nframes, 1) self.assertEqual(image.data.shape, (512, 476)) self.assertEqual(image.data[0, 0], 1) self.assertEqual(image.data[1, 1], 0)