コード例 #1
0
    def setup_fit(self, y):
        '''
        takes a data shape and returns a fit-primed object
        '''
        outputdir = None  # nope
        roifit = 0  # nope
        roiwidth = y.shape[1]  #need this to pretend its an image

        b = McaAdvancedFitBatch.McaAdvancedFitBatch(
            self.get_conf_path(), [y], outputdir, roifit, roiwidth,
            fitfiles=0)  # prime the beauty
        b.pleaseBreak = 1
        b.processList()
        b.pleaseBreak = 0
        return b
コード例 #2
0
    def setup_fit(self,y):
        '''
        takes a data shape and returns a fit-primed object
        '''
        outputdir=None # nope
        roifit=0# nope
        roiwidth=y.shape[1] #need this to pretend its an image
        b = McaAdvancedFitBatch.McaAdvancedFitBatch(self.get_conf_path(),
                                                    [y],
                                                    outputdir,
                                                    roifit,
                                                    roiwidth,
                                                    fitfiles=0,
                                                    nosave=True,
                                                    quiet=True) # prime the beauty
        b.pleaseBreak = 1

        # temporary measure to stop the next line printing arrays to screen.
        b.processList()

        b.pleaseBreak = 0
        return b
コード例 #3
0
    def testFitHdf5Stack(self):
        import tempfile
        from PyMca5.PyMcaIO import specfilewrapper as specfile
        from PyMca5.PyMcaIO import ConfigDict
        from PyMca5.PyMcaIO import HDF5Stack1D
        from PyMca5.PyMcaPhysics.xrf import McaAdvancedFitBatch
        spe = os.path.join(self.dataDir, "Steel.spe")
        cfg = os.path.join(self.dataDir, "Steel.cfg")
        sf = specfile.Specfile(spe)
        self.assertTrue(len(sf) == 1, "File %s cannot be read" % spe)
        self.assertTrue(sf[0].nbmca() == 1, "Spe file should contain MCA data")
        y = counts = sf[0].mca(1)
        x = channels = numpy.arange(y.size).astype(numpy.float)
        sf = None
        configuration = ConfigDict.ConfigDict()
        configuration.read(cfg)
        calibration = configuration["detector"]["zero"], \
                      configuration["detector"]["gain"], 0.0
        initialTime = configuration["concentrations"]["time"]

        # create the data
        nRows = 5
        nColumns = 10
        nTimes = 3
        data = numpy.zeros((nRows, nColumns, counts.size), dtype=numpy.float)
        live_time = numpy.zeros((nRows * nColumns), dtype=numpy.float)

        mcaIndex = 0
        for i in range(nRows):
            for j in range(nColumns):
                data[i, j] = counts
                live_time[i * nColumns + j] = initialTime * \
                                              (1 + mcaIndex % nTimes)
                mcaIndex += 1
        self._h5File = os.path.join(tempfile.gettempdir(), "Steel.h5")

        # write the stack to an HDF5 file
        if os.path.exists(self._h5File):
            os.remove(self._h5File)
        h5 = h5py.File(self._h5File, "w")
        h5["/entry/instrument/detector/calibration"] = calibration
        h5["/entry/instrument/detector/channels"] = channels
        h5["/entry/instrument/detector/data"] = data
        h5["/entry/instrument/detector/live_time"] = live_time

        # add nexus conventions
        h5["/entry"].attrs["NX_class"] = u"NXentry"
        h5["/entry/instrument"].attrs["NX_class"] = u"NXinstrument"
        h5["/entry/instrument/detector/"].attrs["NX_class"] = u"NXdetector"
        h5["/entry/instrument/detector/data"].attrs["interpretation"] = \
                                                              u"spectrum"

        # case with softlink
        h5["/entry/measurement/mca_soft/data"] = \
                    h5py.SoftLink("/entry/instrument/detector/data")
        # case with info
        h5["/entry/measurement/mca_with_info/data"] = \
                    h5["/entry/instrument/detector/data"]
        h5["/entry/measurement/mca_with_info/info"] = \
                    h5["/entry/instrument/detector"]
        h5.flush()
        h5.close()
        h5 = None

        # check that the data can be read as a stack as
        # single top level dataset (issue #226)
        external = self._h5File + "external.h5"
        if os.path.exists(external):
            os.remove(external)
        h5 = h5py.File(external, "w")
        h5["/data_at_top"] = h5py.ExternalLink(
            self._h5File, "/entry/measurement/mca_soft/data")
        h5.flush()
        h5.close()
        h5 = None
        stack = HDF5Stack1D.HDF5Stack1D([external], {"y": "/data_at_top"})

        # check that the data can be read as a stack through a external link
        external = self._h5File + "external.h5"
        if os.path.exists(external):
            os.remove(external)
        h5 = h5py.File(external, "w")
        h5["/data_at_top"] = h5py.ExternalLink(
            self._h5File, "/entry/measurement/mca_soft/data")
        h5["/entry/data"] = h5py.ExternalLink(
            self._h5File, "/entry/measurement/mca_soft/data")
        h5.flush()
        h5.close()
        h5 = None
        fileList = [external]
        for selection in [
            {
                "y": "/data_at_top"
            },  # dataset at top level
            {
                "y": "/data"
            },  # GOOD: selection inside /entry
            {
                "y": "/entry/data"
            }
        ]:  # WRONG: complete path
            stack = HDF5Stack1D.HDF5Stack1D(fileList, selection)
            info = stack.info
            for key in ["McaCalib", "McaLiveTime"]:
                self.assertTrue(
                    key in info,
                    "Key <%s>  not present but it should be there")

            readCalib = info["McaCalib"]
            readLiveTime = info["McaLiveTime"]
            self.assertTrue(abs(readCalib[0] - calibration[0]) < 1.0e-10,
                    "Calibration zero. Expected %f got %f" % \
                                 (calibration[0], readCalib[0]))
            self.assertTrue(abs(readCalib[1] - calibration[1]) < 1.0e-10,
                    "Calibration gain. Expected %f got %f" % \
                                 (calibration[1], readCalib[0]))
            self.assertTrue(abs(readCalib[2] - calibration[2]) < 1.0e-10,
                    "Calibration 2nd order. Expected %f got %f" % \
                                 (calibration[2], readCalib[2]))
            self.assertTrue(live_time.size == readLiveTime.size,
                            "Incorrect size of live time data")
            self.assertTrue(numpy.allclose(live_time, readLiveTime),
                            "Incorrect live time read")
            self.assertTrue(numpy.allclose(stack.x, channels),
                            "Incorrect channels read")
            self.assertTrue(numpy.allclose(stack.data, data),
                            "Incorrect data read")

        # check that the data can be read as a stack
        fileList = [self._h5File]
        for selection in [{
                "y": "/measurement/mca_with_info/data"
        }, {
                "y": "/measurement/mca_soft/data"
        }, {
                "y": "/instrument/detector/data"
        }]:
            stack = HDF5Stack1D.HDF5Stack1D(fileList, selection)
            info = stack.info
            for key in ["McaCalib", "McaLiveTime"]:
                self.assertTrue(
                    key in info,
                    "Key <%s>  not present but it should be there")

            readCalib = info["McaCalib"]
            readLiveTime = info["McaLiveTime"]
            self.assertTrue(abs(readCalib[0] - calibration[0]) < 1.0e-10,
                    "Calibration zero. Expected %f got %f" % \
                                 (calibration[0], readCalib[0]))
            self.assertTrue(abs(readCalib[1] - calibration[1]) < 1.0e-10,
                    "Calibration gain. Expected %f got %f" % \
                                 (calibration[1], readCalib[0]))
            self.assertTrue(abs(readCalib[2] - calibration[2]) < 1.0e-10,
                    "Calibration 2nd order. Expected %f got %f" % \
                                 (calibration[2], readCalib[2]))
            self.assertTrue(live_time.size == readLiveTime.size,
                            "Incorrect size of live time data")
            self.assertTrue(numpy.allclose(live_time, readLiveTime),
                            "Incorrect live time read")
            self.assertTrue(numpy.allclose(stack.x, channels),
                            "Incorrect channels read")
            self.assertTrue(numpy.allclose(stack.data, data),
                            "Incorrect data read")

        # perform the batch fit
        self._outputDir = os.path.join(tempfile.gettempdir(), "SteelTestDir")
        if not os.path.exists(self._outputDir):
            os.mkdir(self._outputDir)
        cfgFile = os.path.join(tempfile.gettempdir(), "SteelNew.cfg")
        if os.path.exists(cfgFile):
            try:
                os.remove(cfgFile)
            except:
                print("Cannot remove file %s" % cfgFile)
        # we need to make sure we use fundamental parameters and
        # the time read from the file
        configuration["concentrations"]["usematrix"] = 0
        configuration["concentrations"]["useautotime"] = 1
        if not os.path.exists(cfgFile):
            configuration.write(cfgFile)
            os.chmod(cfgFile, 0o777)
        batch = McaAdvancedFitBatch.McaAdvancedFitBatch(
            cfgFile,
            filelist=[self._h5File],
            outputdir=self._outputDir,
            concentrations=True,
            selection=selection,
            quiet=True)
        batch.processList()

        # recover the results
        imageFile = os.path.join(self._outputDir, "IMAGES", "Steel.dat")
        self.assertTrue(os.path.isfile(imageFile),
                        "Batch fit result file <%s> not present" % imageFile)
        sf = specfile.Specfile(imageFile)
        labels = sf[0].alllabels()
        scanData = sf[0].data()
        sf = None
        self.assertTrue(
            scanData.shape[-1] == (nRows * nColumns),
            "Expected %d values got %d" %
            (nRows * nColumns, scanData.shape[-1]))

        referenceResult = {}
        for point in range(scanData.shape[-1]):
            for label in labels:
                idx = labels.index(label)
                if label in ["Point", "row", "column"]:
                    continue
                elif point == 0:
                    referenceResult[label] = scanData[idx, point]
                elif label.endswith("-mass-fraction"):
                    #print("label = ", label)
                    #print("reference = ", referenceResult[label])
                    #print("current = ", scanData[idx, point])
                    reference = referenceResult[label]
                    current = scanData[idx, point]
                    #print("ratio = ", current / reference)
                    #print("time ratio = ", readLiveTime[point] / readLiveTime[0])
                    if point % nTimes:
                        if abs(reference) > 1.0e-10:
                            self.assertTrue(
                                reference != current,
                                "Incorrect concentration for point %d" % point)
                        corrected = current * \
                                    (readLiveTime[point] / readLiveTime[0])
                        if abs(reference) > 1.0e-10:
                            delta = \
                                100 * abs((reference - corrected) / reference)
                            self.assertTrue(
                                delta < 0.01,
                                "Incorrect concentration(t) for point %d" %
                                point)
                        else:
                            self.assertTrue(
                                abs(reference - corrected) < 1.0e-5,
                                "Incorrect concentration(t) for point %d" %
                                point)
                    else:
                        self.assertTrue(
                            reference == current,
                            "Incorrect concentration for point %d" % point)
                elif label not in ["Point", "row", "column"]:
                    reference = referenceResult[label]
                    current = scanData[idx, point]
                    self.assertTrue(reference == current,
                                    "Incorrect value for point %d" % point)

        # Batch fitting went well
        # Test the fast XRF
        from PyMca5.PyMcaPhysics.xrf import FastXRFLinearFit
        ffit = FastXRFLinearFit.FastXRFLinearFit()
        configuration["concentrations"]["usematrix"] = 0
        configuration["concentrations"]["useautotime"] = 1
        configuration['fit']['stripalgorithm'] = 1
        outputDict = ffit.fitMultipleSpectra(y=stack,
                                             weight=0,
                                             configuration=configuration,
                                             concentrations=True,
                                             refit=0)
        names = outputDict["names"]
        parameters = outputDict["parameters"]
        uncertainties = outputDict["uncertainties"]
        concentrations = outputDict["concentrations"]
        cCounter = 0
        for i in range(len(names)):
            name = names[i]
            if name.startswith("C(") and name.endswith(")"):
                # it is a concentrations parameter
                # verify that concentrations took into account the time
                reference = concentrations[cCounter][0, 0]
                cTime = configuration['concentrations']['time']
                values = concentrations[cCounter][:]
                values.shape = -1
                for point in range(live_time.size):
                    current = values[point]
                    if DEBUG:
                        print(name, point, reference, current, point % nTimes)
                    if (point % nTimes) and (abs(reference) > 1.0e-10):
                        self.assertTrue(
                            reference != current,
                            "Incorrect concentration for point %d" % point)
                    corrected = current * live_time[point] / cTime
                    if abs(reference) > 1.0e-10:
                        delta = 100 * abs((reference - corrected) / reference)
                        self.assertTrue(
                            delta < 0.01,
                            "Incorrect concentration(t) for point %d" % point)
                    else:
                        self.assertTrue(
                            abs(reference - corrected) < 1.0e-5,
                            "Incorrect concentration(t) for point %d" % point)
                cCounter += 1
            else:
                if DEBUG:
                    print(name, parameters[i][0, 0])
                delta = (parameters[i] - parameters[i][0, 0])
                self.assertTrue(delta.max() == 0,
                    "Different fit value for parameter %s delta %f" % \
                                (name, delta.max()))
                self.assertTrue(delta.min() == 0,
                    "Different fit value for parameter %s delta %f" % \
                                (name, delta.min()))
                delta = (uncertainties[i] - uncertainties[i][0, 0])
                self.assertTrue(delta.max() == 0,
                    "Different sigma value for parameter %s delta %f" % \
                                (name, delta.max()))
                self.assertTrue(delta.min() == 0,
                    "Different sigma value for parameter %s delta %f" % \
                                (name, delta.min()))
        outputDict = ffit.fitMultipleSpectra(y=stack,
                                             weight=0,
                                             configuration=configuration,
                                             concentrations=True,
                                             refit=1)
        names = outputDict["names"]
        parameters = outputDict["parameters"]
        uncertainties = outputDict["uncertainties"]
        concentrations = outputDict["concentrations"]
        cCounter = 0
        for i in range(len(names)):
            name = names[i]
            if name.startswith("C(") and name.endswith(")"):
                # it is a concentrations parameter
                # verify that concentrations took into account the time
                reference = concentrations[cCounter][0, 0]
                cTime = configuration['concentrations']['time']
                values = concentrations[cCounter][:]
                values.shape = -1
                for point in range(live_time.size):
                    current = values[point]
                    if DEBUG:
                        print(name, point, reference, current, point % nTimes)
                    if (point % nTimes) and (abs(reference) > 1.0e-10):
                        self.assertTrue(
                            reference != current,
                            "Incorrect concentration for point %d" % point)
                    corrected = current * live_time[point] / cTime
                    if abs(reference) > 1.0e-10:
                        delta = 100 * abs((reference - corrected) / reference)
                        self.assertTrue(
                            delta < 0.01,
                            "Incorrect concentration(t) for point %d" % point)
                    else:
                        self.assertTrue(
                            abs(reference - corrected) < 1.0e-5,
                            "Incorrect concentration(t) for point %d" % point)
                cCounter += 1
            else:
                if DEBUG:
                    print(name, parameters[i][0, 0])
                delta = (parameters[i] - parameters[i][0, 0])
                self.assertTrue(delta.max() == 0,
                    "Different fit value for parameter %s delta %f" % \
                                (name, delta.max()))
                self.assertTrue(delta.min() == 0,
                    "Different fit value for parameter %s delta %f" % \
                                (name, delta.min()))
                delta = (uncertainties[i] - uncertainties[i][0, 0])
                self.assertTrue(delta.max() == 0,
                    "Different sigma value for parameter %s delta %f" % \
                                (name, delta.max()))
                self.assertTrue(delta.min() == 0,
                    "Different sigma value for parameter %s delta %f" % \
                                (name, delta.min()))
コード例 #4
0
    def testFitHdf5Stack(self):
        import tempfile
        from PyMca5.PyMcaIO import specfilewrapper as specfile
        from PyMca5.PyMcaIO import ConfigDict
        from PyMca5.PyMcaIO import HDF5Stack1D
        from PyMca5.PyMcaPhysics.xrf import McaAdvancedFitBatch
        from PyMca5.PyMcaPhysics.xrf import LegacyMcaAdvancedFitBatch
        spe = os.path.join(self.dataDir, "Steel.spe")
        cfg = os.path.join(self.dataDir, "Steel.cfg")
        sf = specfile.Specfile(spe)
        self.assertTrue(len(sf) == 1, "File %s cannot be read" % spe)
        self.assertTrue(sf[0].nbmca() == 1, "Spe file should contain MCA data")
        y = counts = sf[0].mca(1)
        x = channels = numpy.arange(y.size).astype(numpy.float)
        sf = None
        configuration = ConfigDict.ConfigDict()
        configuration.read(cfg)
        calibration = configuration["detector"]["zero"], \
                      configuration["detector"]["gain"], 0.0
        initialTime = configuration["concentrations"]["time"]

        # create the data
        nRows = 5
        nColumns = 10
        nTimes = 3
        data = numpy.zeros((nRows, nColumns, counts.size), dtype=numpy.float)
        live_time = numpy.zeros((nRows * nColumns), dtype=numpy.float)

        mcaIndex = 0
        for i in range(nRows):
            for j in range(nColumns):
                data[i, j] = counts
                live_time[i * nColumns + j] = initialTime * \
                                              (1 + mcaIndex % nTimes)
                mcaIndex += 1
        self._h5File = os.path.join(tempfile.gettempdir(), "Steel.h5")

        # write the stack to an HDF5 file
        if os.path.exists(self._h5File):
            os.remove(self._h5File)
        h5 = h5py.File(self._h5File, "w")
        h5["/entry/instrument/detector/calibration"] = calibration
        h5["/entry/instrument/detector/channels"] = channels
        h5["/entry/instrument/detector/data"] = data
        h5["/entry/instrument/detector/live_time"] = live_time

        # add nexus conventions
        h5["/entry"].attrs["NX_class"] = u"NXentry"
        h5["/entry/instrument"].attrs["NX_class"] = u"NXinstrument"
        h5["/entry/instrument/detector/"].attrs["NX_class"] = u"NXdetector"
        h5["/entry/instrument/detector/data"].attrs["interpretation"] = \
                                                              u"spectrum"

        # case with softlink
        h5["/entry/measurement/mca_soft/data"] = \
                    h5py.SoftLink("/entry/instrument/detector/data")
        # case with info
        h5["/entry/measurement/mca_with_info/data"] = \
                    h5["/entry/instrument/detector/data"]
        h5["/entry/measurement/mca_with_info/info"] = \
                    h5["/entry/instrument/detector"]
        h5.flush()
        h5.close()
        h5 = None

        # check that the data can be read as a stack as
        # single top level dataset (issue #226)
        external = self._h5File + "external.h5"
        if os.path.exists(external):
            os.remove(external)
        h5 = h5py.File(external, "w")
        h5["/data_at_top"] = h5py.ExternalLink(
            self._h5File, "/entry/measurement/mca_soft/data")
        h5.flush()
        h5.close()
        h5 = None
        stack = HDF5Stack1D.HDF5Stack1D([external], {"y": "/data_at_top"})

        # check that the data can be read as a stack through a external link
        external = self._h5File + "external.h5"
        if os.path.exists(external):
            os.remove(external)
        h5 = h5py.File(external, "w")
        h5["/data_at_top"] = h5py.ExternalLink(
            self._h5File, "/entry/measurement/mca_soft/data")
        h5["/entry/data"] = h5py.ExternalLink(
            self._h5File, "/entry/measurement/mca_soft/data")
        h5.flush()
        h5.close()
        h5 = None
        fileList = [external]
        for selection in [
            {
                "y": "/data_at_top"
            },  # dataset at top level
            {
                "y": "/data"
            },  # GOOD: selection inside /entry
            {
                "y": "/entry/data"
            }
        ]:  # WRONG: complete path
            stack = HDF5Stack1D.HDF5Stack1D(fileList, selection)
            info = stack.info
            for key in ["McaCalib", "McaLiveTime"]:
                self.assertTrue(
                    key in info,
                    "Key <%s>  not present but it should be there")

            readCalib = info["McaCalib"]
            readLiveTime = info["McaLiveTime"]
            self.assertTrue(abs(readCalib[0] - calibration[0]) < 1.0e-10,
                    "Calibration zero. Expected %f got %f" % \
                                 (calibration[0], readCalib[0]))
            self.assertTrue(abs(readCalib[1] - calibration[1]) < 1.0e-10,
                    "Calibration gain. Expected %f got %f" % \
                                 (calibration[1], readCalib[0]))
            self.assertTrue(abs(readCalib[2] - calibration[2]) < 1.0e-10,
                    "Calibration 2nd order. Expected %f got %f" % \
                                 (calibration[2], readCalib[2]))
            self.assertTrue(live_time.size == readLiveTime.size,
                            "Incorrect size of live time data")
            self.assertTrue(numpy.allclose(live_time, readLiveTime),
                            "Incorrect live time read")
            self.assertTrue(numpy.allclose(stack.x, channels),
                            "Incorrect channels read")
            self.assertTrue(numpy.allclose(stack.data, data),
                            "Incorrect data read")

        # check that the data can be read as a stack
        fileList = [self._h5File]
        for selection in [{
                "y": "/measurement/mca_with_info/data"
        }, {
                "y": "/measurement/mca_soft/data"
        }, {
                "y": "/instrument/detector/data"
        }]:
            stack = HDF5Stack1D.HDF5Stack1D(fileList, selection)
            info = stack.info
            for key in ["McaCalib", "McaLiveTime"]:
                self.assertTrue(
                    key in info,
                    "Key <%s>  not present but it should be there")

            readCalib = info["McaCalib"]
            readLiveTime = info["McaLiveTime"]
            self.assertTrue(abs(readCalib[0] - calibration[0]) < 1.0e-10,
                    "Calibration zero. Expected %f got %f" % \
                                 (calibration[0], readCalib[0]))
            self.assertTrue(abs(readCalib[1] - calibration[1]) < 1.0e-10,
                    "Calibration gain. Expected %f got %f" % \
                                 (calibration[1], readCalib[0]))
            self.assertTrue(abs(readCalib[2] - calibration[2]) < 1.0e-10,
                    "Calibration 2nd order. Expected %f got %f" % \
                                 (calibration[2], readCalib[2]))
            self.assertTrue(live_time.size == readLiveTime.size,
                            "Incorrect size of live time data")
            self.assertTrue(numpy.allclose(live_time, readLiveTime),
                            "Incorrect live time read")
            self.assertTrue(numpy.allclose(stack.x, channels),
                            "Incorrect channels read")
            self.assertTrue(numpy.allclose(stack.data, data),
                            "Incorrect data read")

        # TODO: this is done in PyMcaBatchTest on multiple input formats
        # so not needed here
        return

        # perform the batch fit
        self._outputDir = os.path.join(tempfile.gettempdir(), "SteelTestDir")
        if not os.path.exists(self._outputDir):
            os.mkdir(self._outputDir)
        cfgFile = os.path.join(tempfile.gettempdir(), "SteelNew.cfg")
        if os.path.exists(cfgFile):
            try:
                os.remove(cfgFile)
            except:
                print("Cannot remove file %s" % cfgFile)
        # we need to make sure we use fundamental parameters and
        # the time read from the file
        configuration["concentrations"]["usematrix"] = 0
        configuration["concentrations"]["useautotime"] = 1
        if not os.path.exists(cfgFile):
            configuration.write(cfgFile)
            os.chmod(cfgFile, 0o777)

        # Test batch fitting (legacy)
        batch = LegacyMcaAdvancedFitBatch.McaAdvancedFitBatch(
            cfgFile,
            filelist=[self._h5File],
            outputdir=self._outputDir,
            concentrations=True,
            selection=selection,
            quiet=True)
        batch.processList()
        imageFile = os.path.join(self._outputDir, "IMAGES", "Steel.dat")
        self._verifyBatchFitResult(imageFile,
                                   nRows,
                                   nColumns,
                                   live_time,
                                   nTimes,
                                   legacy=True)

        # Test batch fitting
        batch = McaAdvancedFitBatch.McaAdvancedFitBatch(
            cfgFile,
            filelist=[self._h5File],
            outputdir=self._outputDir,
            concentrations=True,
            selection=selection,
            quiet=True)
        batch.outbuffer.extensions = ['.dat']
        batch.processList()
        imageFile = batch.outbuffer.filename('.dat')
        self._verifyBatchFitResult(imageFile, nRows, nColumns, live_time,
                                   nTimes)

        # Batch fitting went well
        # Test the fast XRF
        configuration["concentrations"]["usematrix"] = 0
        configuration["concentrations"]["useautotime"] = 1
        configuration['fit']['stripalgorithm'] = 1
        self._verifyFastFit(stack, configuration, live_time, nTimes)
コード例 #5
0
ファイル: fit.py プロジェクト: woutdenolf/spectrocrunch
def PerformBatchFitOld(
    filelist,
    outdir,
    outname,
    cfg,
    energy,
    mlines=None,
    quant=None,
    fast=False,
    addhigh=0,
):
    """Fit XRF spectra in batch with one primary beam energy.

        Least-square fitting. If you intend a linear fit, modify the configuration:
          - Get current energy calibration with "Load From Fit"
          - Enable: Perform a Linear Fit
          - Disable: Stripping
          - Strip iterations = 0
        Fast linear least squares:
          - Use SNIP instead of STRIP

    Args:
        filelist(list(str)): spectra to fit
        outdir(str): directory for results
        outname(str): output radix
        cfg(str or ConfigDict): configuration file to use
        energy(num): primary beam energy
        mlines(Optional(dict)): elements (keys) which M line group must be replaced by some M subgroups (values)
        fast(Optional(bool)): fast fitting (linear)
        quant(Optional(dict)):
        addhigh(Optional(int))
    Returns:
        files(list(str)): files produced by pymca
        labels(list(str)): corresponding HDF5 labels
    """
    outdir = localfs.Path(outdir).mkdir()
    if instance.isstring(cfg):
        cfg = ConfigDict.ConfigDict(filelist=cfg)

    with outdir.temp(name=outname + ".cfg", force=True) as cfgfile:
        AdaptPyMcaConfig(
            cfg, energy, mlines=mlines, quant=quant, fast=fast, addhigh=addhigh
        )
        cfg.write(cfgfile.path)

        buncertainties = False
        bconcentrations = bool(quant)
        if fast:
            # Prepare fit
            fastFit = FastXRFLinearFit.FastXRFLinearFit()
            fastFit.setFitConfiguration(cfg)
            dataStack = EDFStack.EDFStack(filelist, dtype=np.float32)

            # Fit
            result = fastFit.fitMultipleSpectra(
                y=dataStack, refit=1, concentrations=bconcentrations
            )

            # Save result and keep filenames + labels
            names = result["names"]
            if bconcentrations:
                names = names[: -len(result["concentrations"])]
            parse = re.compile("^(?P<Z>.+)[_ -](?P<line>.+)$")

            def filename(x):
                return outdir["{}_{}.edf".format(outname, x)].path

            labels = []
            files = []
            j = 0
            for i, name in enumerate(names):
                m = parse.match(name)
                if not m:
                    continue
                m = m.groupdict()
                Z, line = m["Z"], m["line"]

                # Peak area
                label = "{}_{}".format(Z, line)
                f = filename(label)
                edf.saveedf(
                    f, result["parameters"][i], {"Title": label}, overwrite=True
                )
                labels.append(label)
                files.append(f)

                # Error on peak area
                if buncertainties:
                    label = "s{}_{}".format(Z, line)
                    f = filename(label)
                    edf.saveedf(
                        f, result["uncertainties"][i], {"Title": label}, overwrite=True
                    )
                    labels.append(label)
                    files.append(f)

                # Mass fraction
                if bconcentrations and Z.lower() != "scatter":
                    label = "w{}_{}".format(Z, line)
                    f = filename(label)
                    edf.saveedf(
                        f, result["concentrations"][j], {"Title": label}, overwrite=True
                    )
                    labels.append(label)
                    files.append(f)
                    j += 1
        else:
            b = McaAdvancedFitBatch.McaAdvancedFitBatch(
                cfgfile.path,
                filelist=filelist,
                outputdir=outdir.path,
                fitfiles=0,
                concentrations=bconcentrations,
            )
            b.processList()
            filemask = os.path.join(outdir.path, "IMAGES", "*.dat")

            def basename(x):
                return os.path.splitext(os.path.basename(x))[0]

            nbase = len(basename(glob.glob(filemask)[0])) + 1
            filemask = os.path.join(outdir.path, "IMAGES", "*.edf")
            labels = []
            files = []
            for name in sorted(glob.glob(filemask)):
                label = basename(name)[nbase:]
                if label.endswith("mass_fraction"):
                    label = "w" + label[:-14]
                if label == "chisq":
                    label = "calc_chisq"
                labels.append(label)
                files.append(name)
    return files, labels
コード例 #6
0
ファイル: fit.py プロジェクト: woutdenolf/spectrocrunch
def PerformBatchFitNew(
    filelist,
    outdir,
    outname,
    cfg,
    energy,
    mlines=None,
    quant=None,
    fast=False,
    addhigh=0,
):
    """Fit XRF spectra in batch with one primary beam energy.

        Least-square fitting. If you intend a linear fit, modify the configuration:
          - Get current energy calibration with "Load From Fit"
          - Enable: Perform a Linear Fit
          - Disable: Stripping
          - Strip iterations = 0
        Fast linear least squares:
          - Use SNIP instead of STRIP

    Args:
        filelist(list(str)): spectra to fit
        outdir(str): directory for results
        outname(str): output radix
        cfg(str or ConfigDict): configuration file to use
        energy(num): primary beam energy
        mlines(Optional(dict)): elements (keys) which M line group must be replaced by some M subgroups (values)
        fast(Optional(bool)): fast fitting (linear)
        quant(Optional(dict)):
        addhigh(Optional(int))
    Returns:
        files(list(str)): files produced by pymca
        labels(list(str)): corresponding HDF5 labels
    """
    # Adapt cfg in memory
    if instance.isstring(cfg):
        cfg = ConfigDict.ConfigDict(filelist=cfg)
    AdaptPyMcaConfig(
        cfg, energy, mlines=mlines, quant=quant, fast=fast, addhigh=addhigh
    )
    buncertainties = False
    bconcentrations = bool(quant)

    # Save cfg in temporary file
    outdir = localfs.Path(outdir).mkdir()
    with outdir.temp(name=outname + ".cfg", force=True) as cfgfile:
        cfg.write(cfgfile.path)
        kwargs = {
            "outputDir": outdir.path,
            "fileEntry": outname,
            "h5": False,
            "edf": True,
            "multipage": False,
            "saveFOM": True,
        }
        outbuffer = OutputBuffer(**kwargs)
        if fast:
            batch = FastXRFLinearFit.FastXRFLinearFit()
            stack = FastXRFLinearFit.prepareDataStack(filelist)
            kwargs = {
                "y": stack,
                "configuration": cfg,
                "concentrations": bconcentrations,
                "refit": 1,
                "weight": None,  # None -> from cfg file
                "outbuffer": outbuffer,
            }
        else:
            kwargs = {
                "filelist": filelist,
                "concentrations": bconcentrations,
                "fitfiles": 0,
                "fitconcfile": 0,
                "outbuffer": outbuffer,
            }
            batch = McaAdvancedFitBatch.McaAdvancedFitBatch(cfgfile.path, **kwargs)

        with outbuffer.saveContext():
            if fast:
                batch.fitMultipleSpectra(**kwargs)
            else:
                batch.processList()

    # List of files and labels
    files, labels = [], []
    groups = ["parameters", "massfractions"]
    if buncertainties:
        groups.append("uncertainties")
    for group in groups:
        for label in outbuffer.labels(group, labeltype="filename"):
            filename = outbuffer.filename(".edf", suffix="_" + label)
            labels.append(label)
            files.append(filename)
    if "chisq" in outbuffer:
        labels.append("calc_chisq")
        files.append(outbuffer.filename(".edf", suffix="_chisq"))
    return files, labels
コード例 #7
0
ファイル: fit.py プロジェクト: woutdenolf/spectrocrunch
def PerformBatchFitHDF5(
    filelist,
    cfg,
    outuri,
    energy=None,
    mlines=None,
    quant=None,
    fast=False,
    addhigh=0,
    **kw
):
    """Fit XRF spectra in batch with one primary beam energy.

        Least-square fitting. If you intend a linear fit, modify the configuration:
          - Get current energy calibration with "Load From Fit"
          - Enable: Perform a Linear Fit
          - Disable: Stripping
          - Strip iterations = 0
        Fast linear least squares:
          - Use SNIP instead of STRIP

    Args:
        filelist(list(str)): spectra to fit
        cfg(str or ConfigDict): configuration file to use
        outuri(h5fs.Path): directory for results
        energy(num): primary beam energy
        mlines(Optional(dict)): elements (keys) which M line group must be replaced by some M subgroups (values)
        fast(Optional(bool)): fast fitting (linear)
        quant(Optional(dict)):
        addhigh(Optional(int)):
    """
    if instance.isstring(cfg):
        cfg = ConfigDict.ConfigDict(filelist=cfg)
    AdaptPyMcaConfig(
        cfg, energy, mlines=mlines, quant=quant, fast=fast, addhigh=addhigh
    )

    # outputDir/outputRoot.h5::/fileEntry/fileProcess
    kw["h5"] = True
    kw["edf"] = False
    kw["outputDir"] = outuri.device.parent.path
    kw["outputRoot"] = os.path.splitext(outuri.device.name)[0]
    kw["fileEntry"] = outuri.parent.path
    kw["fileProcess"] = outuri.name
    outbuffer = OutputBuffer(**kw)
    if fast:
        batch = FastXRFLinearFit.FastXRFLinearFit()
        stack = FastXRFLinearFit.prepareDataStack(filelist)
        kwargs = {
            "y": stack,
            "configuration": cfg,
            "concentrations": bool(quant),
            "refit": 1,
            "outbuffer": outbuffer,
        }
        with outbuffer.saveContext():
            batch.fitMultipleSpectra(**kwargs)
    else:
        split_results = list(zip(*(filename.split("::") for filename in filelist)))
        if len(split_results) == 1:
            selection = None
        else:
            filelist, path_in_file = split_results
            if len(set(path_in_file)) != 1:
                raise ValueError(path_in_file, "HDF5 group must be the same for all")
            filelist = list(filelist)
            selection = {"y": path_in_file[0]}
        kwargs = {
            "filelist": filelist,
            "selection": selection,
            "concentrations": bool(quant),
            "fitfiles": 0,
            "fitconcfile": 0,
            "outbuffer": outbuffer,
        }
        with tempPyMcaConfigFile(cfg) as cfgfilename:
            batch = McaAdvancedFitBatch.McaAdvancedFitBatch(cfgfilename, **kwargs)
            with outbuffer.saveContext():
                batch.processList()