Пример #1
0
    def report(self):
        "return  text report of parameters"
        (deg, s02, e0, ei, delr, ss2, c3, c4) = self._pathparams()

        # put 'reff' into the paramGroup so that it can be used in
        # constraint expressions
        reff = self._feffdat.reff
        self._larch.symtable._sys.paramGroup._feffdat = self._feffdat
        self._larch.symtable._sys.paramGroup.reff = reff


        geomlabel  = '          Atom     x        y        z     ipot'
        geomformat = '           %s   % .4f, % .4f, % .4f  %i'
        out = ['   feff.dat file = %s' % self.filename]
        if self.label != self.filename:
            out.append('     label     = %s' % self.label)
        out.append(geomlabel)

        for label, iz, ipot, x, y, z in self.geom:
            s = geomformat % (label, x, y, z, ipot)
            if ipot == 0: s = "%s (absorber)" % s
            out.append(s)

        stderrs = {}
        out.append('     reff   =  %.5f' % self._feffdat.reff)
        for param in ('degen', 's02', 'e0', 'ei',
                      'deltar', 'sigma2', 'third', 'fourth'):
            val = getattr(self, param)
            std = 0
            if isParameter(val):
                std = val.stderr
                val = val.value
                if isParameter(val):
                    if val.stderr is not None:
                        std = val.stderr
            if std is None: std = -1
            stderrs[param] = std

        def showval(title, par, val, stderrs, ifnonzero=False):
            if val == 0 and ifnonzero:
                return
            s = '     %s=' % title
            if title.startswith('R  '):
                val = val + self._feffdat.reff
            if stderrs[par] == 0:
                s = '%s % .5f' % (s, val)
            else:
                s = '%s % .5f +/- % .5f' % (s, val, stderrs[par])
            out.append(s)
        showval('Degen  ', 'degen',  deg,  stderrs)
        showval('S02    ', 's02',    s02,  stderrs)
        showval('E0     ', 'e0',     e0,   stderrs)
        showval('R      ', 'deltar', delr, stderrs)
        showval('deltar ', 'deltar', delr, stderrs)
        showval('sigma2 ', 'sigma2', ss2,  stderrs)
        showval('third  ', 'third',  c3,   stderrs, ifnonzero=True)
        showval('fourth ', 'fourth', c4,   stderrs, ifnonzero=True)
        showval('Ei     ', 'ei',     ei,   stderrs, ifnonzero=True)

        return '\n'.join(out)
Пример #2
0
    def report(self):
        "return  text report of parameters"
        (deg, s02, e0, ei, delr, ss2, c3, c4) = self._pathparams()

        # put 'reff' into the paramGroup so that it can be used in
        # constraint expressions
        reff = self._feffdat.reff
        self._larch.symtable._sys.paramGroup._feffdat = self._feffdat
        self._larch.symtable._sys.paramGroup.reff = reff

        geomlabel = '          Atom     x        y        z     ipot'
        geomformat = '           %s   % .4f, % .4f, % .4f  %i'
        out = ['   feff.dat file = %s' % self.filename]
        if self.label != self.filename:
            out.append('     label     = %s' % self.label)
        out.append(geomlabel)

        for label, iz, ipot, x, y, z in self.geom:
            s = geomformat % (label, x, y, z, ipot)
            if ipot == 0: s = "%s (absorber)" % s
            out.append(s)

        stderrs = {}
        out.append('     reff   =  %.5f' % self._feffdat.reff)
        for param in ('degen', 's02', 'e0', 'ei', 'deltar', 'sigma2', 'third',
                      'fourth'):
            val = getattr(self, param)
            std = 0
            if isParameter(val):
                std = val.stderr
                val = val.value
                if isParameter(val):
                    if val.stderr is not None:
                        std = val.stderr
            if std is None: std = -1
            stderrs[param] = std

        def showval(title, par, val, stderrs, ifnonzero=False):
            if val == 0 and ifnonzero:
                return
            s = '     %s=' % title
            if title.startswith('R  '):
                val = val + self._feffdat.reff
            if stderrs[par] == 0:
                s = '%s % .5f' % (s, val)
            else:
                s = '%s % .5f +/- % .5f' % (s, val, stderrs[par])
            out.append(s)

        showval('Degen  ', 'degen', deg, stderrs)
        showval('S02    ', 's02', s02, stderrs)
        showval('E0     ', 'e0', e0, stderrs)
        showval('R      ', 'deltar', delr, stderrs)
        showval('deltar ', 'deltar', delr, stderrs)
        showval('sigma2 ', 'sigma2', ss2, stderrs)
        showval('third  ', 'third', c3, stderrs, ifnonzero=True)
        showval('fourth ', 'fourth', c4, stderrs, ifnonzero=True)
        showval('Ei     ', 'ei', ei, stderrs, ifnonzero=True)

        return '\n'.join(out)
Пример #3
0
 def test_eval_group2(self):
     out = deval(self.group2)
     assert(isgroup(out))
     assert(out.sub.label == 'a label')
     assert(isParameter(out.par1))
     assert(isParameter(out.par2))
     assert(out.par1.name == 'p1')
     assert(out.par2.name == 'p2')
     assert(out.par1.vary == False)
     assert(out.par2.vary == True)
     assert(out.par1.value == 3.0)
     assert(out.par2.value == 1.0)
     assert(out.par1.min   == 0.0)
Пример #4
0
 def test_eval_group2(self):
     out = deval(self.group2)
     assert(isgroup(out))
     assert(out.sub.label == 'a label')
     assert(isParameter(out.par1))
     assert(isParameter(out.par2))
     assert(out.par1.name == 'p1')
     assert(out.par2.name == 'p2')
     assert(out.par1.vary == False)
     assert(out.par2.vary == True)
     assert(out.par1.value == 3.0)
     assert(out.par2.value == 1.0)
     assert(out.par1.min   == 0.0)
Пример #5
0
 def test_eval_param1(self):
     out = deval(self.param1)
     assert(isParameter(out))
     assert(out.name == 'a')
     assert(out.vary == True)
     assert(out.value == 2.0)
     assert(out.min == 0)
Пример #6
0
 def _get(expr, _larch=None):
     if _larch is None:
         return None
     obj = _larch.eval(expr)
     print(' .. get ', obj)
     if isinstance(obj, np.ndarray):
         out = {
             '__class__': 'Array',
             '__shape__': obj.shape,
             '__dtype__': obj.dtype.name
         }
         out['value'] = obj.tolist()
         return out
     elif _larch.symtable.isgroup(obj):
         out = {'__class__': 'Group'}
         for item in dir(obj):
             out[item] = _get(repr(getattr(obj, item)), _larch=_larch)
         return out
     elif isParameter(obj):
         out = {'__class__': 'Parameter'}
         for attr in ('value', 'name', 'vary', 'min', 'max', 'expr',
                      'stderr', 'correl'):
             val = getattr(obj, attr, None)
             if val is not None:
                 out[attr] = val
         return out
     elif isinstance(obj, basestring):
         return '%s' % obj
     else:
         return obj
Пример #7
0
 def add_data(self, group, name, data):
     if self.isgroup(data):
         g = self.add_h5group(group,
                              name,
                              attrs={
                                  'larchtype': 'group',
                                  'class': data.__class__.__name__
                              })
         for comp in dir(data):
             self.add_data(g, comp, getattr(data, comp))
     elif isinstance(data, (list, tuple)):
         dtype = 'list'
         if isinstance(data, tuple): dtype = 'tuple'
         g = self.add_h5group(group, name, attrs={'larchtype': dtype})
         for ix, comp in enumerate(data):
             iname = 'item%i' % ix
             self.add_data(g, iname, comp)
     elif isinstance(data, dict):
         g = self.add_h5group(group, name, attrs={'larchtype': 'dict'})
         for key, val in data.items():
             self.add_data(g, key, val)
     elif isParameter(data):
         g = self.add_h5group(group, name, attrs={'larchtype': 'parameter'})
         self.add_h5dataset(g, 'json', data.asjson())
     else:
         d = self.add_h5dataset(group, name, data)
Пример #8
0
def spline_eval(x, group, name='spl1', _larch=None):
    """evaluate spline at specified x values

    arguments:
    ------------
      x       input 1-d array for absicca
      group   Group containing spline representation,
              as defined by spline_rep()
      name    name for spline params and subgroups ['spl1']

    returns:
    --------
      1-d array with interpolated values
    """
    sgroup = getattr(group, "{:s}_details".format(name), None)
    if sgroup is None or not isgroup(sgroup):
        raise Warning("spline_eval: subgroup '{:s}' not found".format(name))

    knots = getattr(sgroup, 'knots')
    order = getattr(sgroup, 'order')
    coefs = getattr(sgroup, 'coefs')
    for i, val in enumerate(coefs[2:-2]):
        pname = "{:s}_c{:d}".format(name, i)
        cval = getattr(group, pname, None)
        if cval is None:
            raise Warning("spline_eval: param'{:s}' not found".format(pname))
        if isParameter(cval):
            cval = cval.value
        coefs[2+i] = cval
    setattr(sgroup, 'coefs', coefs)
    return splev(x, [knots, coefs, order])
Пример #9
0
    def _define(self, name, shape='gaussian', sigma_params=None):
        self.shape = shape
        if name is None:
            return
        try:
            elem, line = [w.title() for w in name.split()]
        except:
            return
        if line == 'Ka': line = 'Ka1'

        dat = xray_line(elem, line, _larch=self._larch)
        if dat is not None:
            ecenter = dat[0]
            if self.center is None:
                self.center = Parameter(name='center',
                                        value=ecenter,
                                        vary=False,
                                        _larch=self._larch)

            if sigma_params is not None:
                if len(sigma_params) == 2 and self.sigma is None:
                    if isParameter(sigma_params[0]):
                        sigma_params = (sigma_params[0].name,
                                        sigma_params[1].name)
                    expr = "%s + %s * %f" % (sigma_params[0], sigma_params[1],
                                             ecenter)
                    self.sigma = Parameter(name='sigma',
                                           expr=expr,
                                           _larch=self._larch)
Пример #10
0
 def _get(expr, _larch=None):
     if _larch is None:
         return None
     obj = _larch.eval(expr)
     print ' .. get ', obj
     if isinstance(obj, np.ndarray):
         out = {'__class__': 'Array', '__shape__': obj.shape,
                '__dtype__': obj.dtype.name}
         out['value'] = obj.tolist()
         return out
     elif _larch.symtable.isgroup(obj):
         out = {'__class__': 'Group'}
         for item in dir(obj):
             out[item] = _get(repr(getattr(obj, item)), _larch=_larch)
         return out
     elif isParameter(obj):
         out = {'__class__': 'Parameter'}
         for attr in ('value', 'name', 'vary', 'min', 'max',
                      'expr', 'stderr', 'correl'):
             val = getattr(obj, attr, None)
             if val is not None:
                 out[attr] = val
         return out
     elif isinstance(obj, basestring):
         return '%s' % obj
     else:
         return obj
Пример #11
0
    def _define(self, name, shape='gaussian', sigma_params=None):
        self.shape = shape
        if name is None:
            return
        try:
            elem, line = [w.title() for w in name.split()]
        except:
            return
        if line == 'Ka': line='Ka1'

        dat = xray_line(elem, line, _larch=self._larch)
        if dat is not None:
            ecenter = dat[0]
            if self.center is None:
                self.center = Parameter(name='center',
                                        value=ecenter,
                                        vary=False,
                                        _larch=self._larch)

            if sigma_params is not None:
                if len(sigma_params) == 2 and self.sigma is None:
                    if isParameter(sigma_params[0]):
                        sigma_params = (sigma_params[0].name,
                                        sigma_params[1].name)
                    expr = "%s + %s * %f" % (sigma_params[0],
                                             sigma_params[1],
                                             ecenter)
                    self.sigma = Parameter(name='sigma',
                                           expr=expr,
                                           _larch=self._larch)
Пример #12
0
 def test_eval_param1(self):
     out = deval(self.param1)
     assert(isParameter(out))
     assert(out.name == 'a')
     assert(out.vary == True)
     assert(out.value == 2.0)
     assert(out.min == 0)
Пример #13
0
def spline_eval(x, group, name='spl1', _larch=None):
    """evaluate spline at specified x values

    arguments:
    ------------
      x       input 1-d array for absicca
      group   Group containing spline representation,
              as defined by spline_rep()
      name    name for spline params and subgroups ['spl1']

    returns:
    --------
      1-d array with interpolated values
    """
    sgroup = getattr(group, "{:s}_details".format(name), None)
    if sgroup is None or not isgroup(sgroup):
        raise Warning("spline_eval: subgroup '{:s}' not found".format(name))

    knots = getattr(sgroup, 'knots')
    order = getattr(sgroup, 'order')
    coefs = getattr(sgroup, 'coefs')
    for i, val in enumerate(coefs[2:-2]):
        pname = "{:s}_c{:d}".format(name, i)
        cval = getattr(group, pname, None)
        if cval is None:
            raise Warning("spline_eval: param'{:s}' not found".format(pname))
        if isParameter(cval):
            cval = cval.value
        coefs[2 + i] = cval
    setattr(sgroup, 'coefs', coefs)
    return splev(x, [knots, coefs, order])
Пример #14
0
def encode4js(obj):
    """return an object ready for json encoding.
    has special handling for many Python types
      numpy array
      complex numbers
      Larch Groups
      Larch Parameters
    """
    if isinstance(obj, np.ndarray):
        out = {'__class__': 'Array', '__shape__': obj.shape,
               '__dtype__': obj.dtype.name}
        out['value'] = obj.flatten().tolist()
        if 'complex' in obj.dtype.name:
            out['value'] = [(obj.real).tolist(), (obj.imag).tolist()]
        return out
    elif isinstance(obj, (np.float, np.int)):
        return float(obj)
    elif isinstance(obj, six.string_types):
        return str(obj)
    elif isinstance(obj, np.complex):
        return {'__class__': 'Complex', 'value': (obj.real, obj.imag)}
    elif isgroup(obj):
        out = {'__class__': 'Group'}
        for item in dir(obj):
            out[item] = encode4js(getattr(obj, item))
        return out
    elif isParameter(obj):
        out = {'__class__': 'Parameter'}
        for attr in ('value', 'name', 'vary', 'min', 'max',
                     'expr', 'stderr', 'correl'):
            val = getattr(obj, attr, None)
            if val is not None:
                out[attr] = val
        return out
    elif isinstance(obj, (tuple, list)):
        ctype = 'List'
        if isinstance(obj, tuple):
            ctype = 'Tuple'
        val = [encode4js(item) for item in obj]
        return {'__class__': ctype, 'value': val}
    elif isinstance(obj, dict):
        out = {'__class__': 'Dict'}
        for key, val in obj.items():
            out[encode4js(key)] = encode4js(val)
        return out
    return obj
Пример #15
0
    def onFitPeak(self, evt=None):
        gname = self.groupname
        if self.dtcorr.IsChecked():
            print('fit needs to dt correct!')

        dtext = []
        model = self.fit_model.GetStringSelection().lower()
        dtext.append('Fit Model: %s' % model)
        bkg = self.fit_bkg.GetStringSelection()
        if bkg == 'None':
            bkg = None
        if bkg is None:
            dtext.append('No Background')
        else:
            dtext.append('Background: %s' % bkg)

        step = self.fit_step.GetStringSelection().lower()
        if model in ('step', 'rectangle'):
            dtext.append('Step form: %s' % step)
        lgroup = getattr(self.larch.symtable, gname)
        x = lgroup._x1_
        y = lgroup._y1_
        pgroup = fit_peak(x,
                          y,
                          model,
                          background=bkg,
                          step=step,
                          _larch=self.larch)
        text = fit_report(pgroup.params, _larch=self.larch)
        dtext.append('Parameters: ')
        for pname in dir(pgroup.params):
            par = getattr(pgroup.params, pname)
            if isParameter(par):
                ptxt = "    %s= %.4f" % (par.name, par.value)
                if (hasattr(par, 'stderr') and par.stderr is not None):
                    ptxt = "%s(%.4f)" % (ptxt, par.stderr)
                dtext.append(ptxt)

        dtext = '\n'.join(dtext)
        # plotframe = self.get_plotwindow()
        # plotframe.oplot(x, pgroup.fit, label='fit (%s)' % model)
        text = fit_report(pgroup.params, _larch=self.larch)
        self.fit_report.SetLabel(dtext)
Пример #16
0
    def onFitPeak(self, evt=None):
        gname = self.groupname
        if self.dtcorr.IsChecked():
            print('fit needs to dt correct!')

        dtext = []
        model = self.fit_model.GetStringSelection().lower()
        dtext.append('Fit Model: %s' % model)
        bkg =  self.fit_bkg.GetStringSelection()
        if bkg == 'None':
            bkg = None
        if bkg is None:
            dtext.append('No Background')
        else:
            dtext.append('Background: %s' % bkg)

        step = self.fit_step.GetStringSelection().lower()
        if model in ('step', 'rectangle'):
            dtext.append('Step form: %s' % step)
        lgroup =  getattr(self.larch.symtable, gname)
        x = lgroup._x1_
        y = lgroup._y1_
        pgroup = fit_peak(x, y, model, background=bkg, step=step,
                          _larch=self.larch)
        text = fit_report(pgroup.params, _larch=self.larch)
        dtext.append('Parameters: ')
        for pname in dir(pgroup.params):
            par = getattr(pgroup.params, pname)
            if isParameter(par):
                ptxt = "    %s= %.4f" % (par.name, par.value)
                if (hasattr(par, 'stderr') and par.stderr is not None):
                    ptxt = "%s(%.4f)" % (ptxt, par.stderr)
                dtext.append(ptxt)

        dtext = '\n'.join(dtext)
        # plotframe = self.get_plotwindow()
        # plotframe.oplot(x, pgroup.fit, label='fit (%s)' % model)
        text = fit_report(pgroup.params, _larch=self.larch)
        self.fit_report.SetLabel(dtext)
Пример #17
0
 def add_data(self, group, name, data):
     name = fix_varname(name)
     if self.isgroup(data):
         g = self.add_h5group(group, name,
                              attrs={'larchtype': 'group',
                                     'class': data.__class__.__name__})
         for comp in dir(data):
             self.add_data(g, comp, getattr(data, comp))
     elif isinstance(data, (list, tuple)):
         dtype = 'list'
         if isinstance(data, tuple): dtype = 'tuple'
         g = self.add_h5group(group, name, attrs={'larchtype': dtype})
         for ix, comp in enumerate(data):
             iname = 'item%i' % ix
             self.add_data(g, iname, comp)
     elif isinstance(data, dict):
         g = self.add_h5group(group, name, attrs={'larchtype': 'dict'})
         for key, val in data.items():
             self.add_data(g, key, val)
     elif isParameter(data):
         g = self.add_h5group(group, name, attrs={'larchtype': 'parameter'})
         self.add_h5dataset(g, 'json', data.asjson())
     else:
         d = self.add_h5dataset(group, name, data)
Пример #18
0
def feffit_report(result, min_correl=0.1, with_paths=True, _larch=None):
    """return a printable report of fit for feffit

    Parameters:
    ------------
      result:      Feffit result, output group from feffit()
      min_correl:  minimum correlation to report [0.1]
      wit_paths:   boolean (True/False) for whether to list all paths [True]

    Returns:
    ---------
      printable string of report.

    """
    input_ok = False
    try:
        fit = result.fit
        params = result.params
        datasets = result.datasets
        input_ok = True
    except:
        pass
    if not input_ok:
        print('must pass output of feffit()!')
        return
    topline = '=================== FEFFIT RESULTS ===================='
    header = '[[%s]]'
    varformat = '   %12s = % f +/- %s   (init= % f)'
    exprformat = '   %12s = % f +/- %s  = \'%s\''
    out = [topline, header % 'Statistics']

    npts = len(params.residual)

    out.append('   npts, nvarys, nfree= %i, %i, %i' %
               (npts, params.nvarys, params.nfree))
    out.append('   chi_square         = %.8g' % (params.chi_square))
    out.append('   reduced chi_square = %.8g' % (params.chi_reduced))
    out.append('   r-factor           = %.8g' % (params.rfactor))
    out.append(' ')
    if len(datasets) == 1:
        out.append(header % 'Data')
    else:
        out.append(header % 'Datasets (%i)' % len(datasets))
    for i, ds in enumerate(datasets):
        tr = ds.transform
        if len(datasets) > 1:
            out.append(' dataset %i:' % (i + 1))
        if isinstance(tr.kweight, Iterable):
            if isinstance(ds.epsilon_k[0], np.ndarray):
                msg = []
                for eps in ds.epsilon_k:
                    msg.append('Array(mean=%.6f, std=%.6f)' %
                               (eps.mean(), eps.std()))
                eps_k = ', '.join(msg)
            else:
                eps_k = ', '.join(['%.6f' % eps for eps in ds.epsilon_k])
            eps_r = ', '.join(['%.6f' % eps for eps in ds.epsilon_r])
            kweigh = ', '.join(['%i' % kwe for kwe in tr.kweight])
        else:
            if isinstance(ds.epsilon_k, np.ndarray):
                eps_k = 'Array(mean=%.6f, std=%.6f)' % (ds.epsilon_k.mean(),
                                                        ds.epsilon_k.std())
            else:
                eps_k = '%.6f' % ds.epsilon_k
            eps_r = '%.6f' % ds.epsilon_r
            kweigh = '%i' % tr.kweight

        out.append('   fit space          = \'%s\'' % (tr.fitspace))
        out.append('   r-range            = %.3f, %.3f' % (tr.rmin, tr.rmax))
        out.append('   k-range            = %.3f, %.3f' % (tr.kmin, tr.kmax))
        kwin = '   k window, dk       = \'%s\', %.3f' % (tr.window, tr.dk)
        if tr.dk2 is not None:
            kwin = "%s, %.3f" % (kwin, tr.dk2)
        out.append(kwin)
        pathfiles = [p.filename for p in ds.pathlist]
        out.append('   paths used in fit  = %s' % (repr(pathfiles)))
        out.append('   k-weight           = %s' % kweigh)
        out.append('   epsilon_k          = %s' % eps_k)
        out.append('   epsilon_r          = %s' % eps_r)
        out.append('   n_independent      = %.3f' % (ds.n_idp))

        #
    out.append(' ')
    out.append(header % 'Variables')

    exprs = []
    for name in dir(params):
        var = getattr(params, name)
        if len(name) < 14:
            name = (name + ' ' * 14)[:14]
        if isParameter(var):
            if var.vary:
                stderr = 'unknown'
                if var.stderr is not None: stderr = "%f" % var.stderr
                out.append(varformat % (name, var.value, stderr, var._initval))

            elif var.expr is not None:
                stderr = 'unknown'
                if var.stderr is not None: stderr = "%f" % var.stderr
                exprs.append(exprformat % (name, var.value, stderr, var.expr))
    if len(exprs) > 0:
        out.append(header % 'Constraint Expressions')
        out.extend(exprs)

    covar_vars = getattr(params, 'covar_vars', [])
    if len(covar_vars) > 0:
        out.append(' ')
        out.append(header % 'Correlations' +
                   '    (unreported correlations are < % .3f)' % min_correl)
        correls = {}
        for i, name in enumerate(covar_vars):
            par = getattr(params, name)
            if not par.vary:
                continue
            if hasattr(par, 'correl') and par.correl is not None:
                for name2 in covar_vars[i + 1:]:
                    if name != name2 and name2 in par.correl:
                        correls["%s, %s" % (name, name2)] = par.correl[name2]

        sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
        sort_correl.reverse()
        for name, val in sort_correl:
            if abs(val) < min_correl:
                break
            if len(name) < 20:
                name = (name + ' ' * 20)[:20]
            out.append('   %s = % .3f ' % (name, val))

    if with_paths:
        out.append(' ')
        out.append(header % 'Paths')
        for ids, ds in enumerate(datasets):
            if len(datasets) > 1:
                out.append(' dataset %i:' % (ids + 1))
            for p in ds.pathlist:
                out.append('%s\n' % p.report())
    out.append('=' * len(topline))
    return '\n'.join(out)
Пример #19
0
def feffit(params,
           datasets,
           _larch=None,
           rmax_out=10,
           path_outputs=True,
           **kws):
    """execute a Feffit fit: a fit of feff paths to a list of datasets

    Parameters:
    ------------
      paramgroup:   group containing parameters for fit
      datasets:     Feffit Dataset group or list of Feffit Dataset group.
      rmax_out:     maximum R value to calculate output arrays.
      path_output:  Flag to set whether all Path outputs should be written.

    Returns:
    ---------
      a fit results group.  This will contain subgroups of:

        datasets: an array of FeffitDataSet groups used in the fit.
        params:   This will be identical to the input parameter group.
        fit:      an object which points to the low-level fit.

     Statistical parameters will be put into the params group.  Each
     dataset will have a 'data' and 'model' subgroup, each with arrays:
        k            wavenumber array of k
        chi          chi(k).
        kwin         window Omega(k) (length of input chi(k)).
        r            uniform array of R, out to rmax_out.
        chir         complex array of chi(R).
        chir_mag     magnitude of chi(R).
        chir_pha     phase of chi(R).
        chir_re      real part of chi(R).
        chir_im      imaginary part of chi(R).
    """
    def _resid(params, datasets=None, _larch=None, **kwargs):
        """ this is the residual function"""
        return concatenate([d._residual() for d in datasets])

    if isNamedClass(datasets, FeffitDataSet):
        datasets = [datasets]
    for ds in datasets:
        if not isNamedClass(ds, FeffitDataSet):
            print("feffit needs a list of FeffitDataSets")
            return
    fitkws = dict(datasets=datasets)
    fit = Minimizer(_resid,
                    params,
                    fcn_kws=fitkws,
                    scale_covar=True,
                    _larch=_larch,
                    **kws)

    fit.leastsq()
    dat = concatenate([d._residual(data_only=True) for d in datasets])
    params.rfactor = (params.fit_details.fvec**2).sum() / (dat**2).sum()

    # remove temporary parameters for _feffdat and reff
    # that had been placed by _pathparams()
    #for pname in ('_feffdat', 'reff'):
    #    if hasattr(params, pname):
    #        delattr(params, pname)

    n_idp = 0
    for ds in datasets:
        n_idp += ds.n_idp

    # here we rescale chi-square and reduced chi-square to n_idp
    npts = len(params.residual)
    params.chi_square *= n_idp * 1.0 / npts
    params.chi_reduced = params.chi_square / (n_idp * 1.0 - params.nvarys)

    # With scale_covar = True, Minimizer() scales the uncertainties
    # by reduced chi-square assuming params.nfree is the correct value
    # for degrees-of-freedom. But n_idp-params.nvarys is a better measure,
    # so we rescale uncertainties here.

    covar = getattr(params, 'covar', None)
    if covar is not None:
        err_scale = (params.nfree / (n_idp - params.nvarys))
        for name in dir(params):
            p = getattr(params, name)
            if isParameter(p) and p.vary:
                p.stderr *= sqrt(err_scale)

        # next, propagate uncertainties to constraints and path parameters.
        params.covar *= err_scale
        vsave, vbest = {}, []
        # 1. save current params
        for vname in params.covar_vars:
            par = getattr(params, vname)
            vsave[vname] = par
            vbest.append(par.value)

        # 2. get correlated uncertainties, set params accordingly
        uvars = correlated_values(vbest, params.covar)
        # 3. evaluate constrained params, save stderr
        for nam in dir(params):
            obj = getattr(params, nam)
            eval_stderr(obj, uvars, params.covar_vars, vsave, _larch)

        # 3. evaluate path params, save stderr
        for ds in datasets:
            for p in ds.pathlist:
                _larch.symtable._sys.paramGroup._feffdat = copy(p._feffdat)
                _larch.symtable._sys.paramGroup.reff = p._feffdat.reff

                for param in ('degen', 's02', 'e0', 'ei', 'deltar', 'sigma2',
                              'third', 'fourth'):
                    obj = getattr(p, param)
                    eval_stderr(obj, uvars, params.covar_vars, vsave, _larch)

        # restore saved parameters again
        for vname in params.covar_vars:
            setattr(params, vname, vsave[vname])

        # clear any errors evaluting uncertainties
        if len(_larch.error) > 0:
            _larch.error = []

    # here we create outputs arrays for chi(k), chi(r):
    for ds in datasets:
        ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)
    return Group(name='feffit fit results',
                 fit=fit,
                 params=params,
                 datasets=datasets)
Пример #20
0
 def test_eval_param2(self):
     out = deval(self.param2)
     assert(isParameter(out))
     assert(out.name == 'b')
     assert(out.expr.startswith('sqrt(x/2)'))
Пример #21
0
def feffit(paramgroup,
           datasets,
           rmax_out=10,
           path_outputs=True,
           _larch=None,
           **kws):
    """execute a Feffit fit: a fit of feff paths to a list of datasets

    Parameters:
    ------------
      paramgroup:   group containing parameters for fit
      datasets:     Feffit Dataset group or list of Feffit Dataset group.
      rmax_out:     maximum R value to calculate output arrays.
      path_output:  Flag to set whether all Path outputs should be written.

    Returns:
    ---------
      a fit results group.  This will contain subgroups of:

        datasets: an array of FeffitDataSet groups used in the fit.
        params:   This will be identical to the input parameter group.
        fit:      an object which points to the low-level fit.

     Statistical parameters will be put into the params group.  Each
     dataset will have a 'data' and 'model' subgroup, each with arrays:
        k            wavenumber array of k
        chi          chi(k).
        kwin         window Omega(k) (length of input chi(k)).
        r            uniform array of R, out to rmax_out.
        chir         complex array of chi(R).
        chir_mag     magnitude of chi(R).
        chir_pha     phase of chi(R).
        chir_re      real part of chi(R).
        chir_im      imaginary part of chi(R).
    """
    def _resid(params, datasets=None, paramgroup=None, _larch=None, **kwargs):
        """ this is the residual function"""
        params2group(params, paramgroup)
        return concatenate([d._residual(paramgroup) for d in datasets])

    if isNamedClass(datasets, FeffitDataSet):
        datasets = [datasets]

    params = group2params(paramgroup, _larch=_larch)

    for ds in datasets:
        if not isNamedClass(ds, FeffitDataSet):
            print("feffit needs a list of FeffitDataSets")
            return
        ds.prepare_fit()

    fit = Minimizer(_resid,
                    params,
                    fcn_kws=dict(datasets=datasets, paramgroup=paramgroup),
                    scale_covar=True,
                    **kws)

    result = fit.leastsq()

    params2group(result.params, paramgroup)
    dat = concatenate(
        [d._residual(paramgroup, data_only=True) for d in datasets])

    n_idp = 0
    for ds in datasets:
        n_idp += ds.n_idp

    # here we rescale chi-square and reduced chi-square to n_idp
    npts = len(result.residual)
    chi_square = result.chisqr * n_idp * 1.0 / npts
    chi_reduced = chi_square / (n_idp * 1.0 - result.nvarys)
    rfactor = (result.residual**2).sum() / (dat**2).sum()
    # calculate 'aic', 'bic' rescaled to n_idp
    # note that neg2_loglikel is -2*log(likelihood)
    neg2_loglikel = n_idp * np.log(chi_square / n_idp)
    aic = neg2_loglikel + 2 * result.nvarys
    bic = neg2_loglikel + np.log(n_idp) * result.nvarys

    # With scale_covar = True, Minimizer() scales the uncertainties
    # by reduced chi-square assuming params.nfree is the correct value
    # for degrees-of-freedom. But n_idp-params.nvarys is a better measure,
    # so we rescale uncertainties here.

    covar = getattr(result, 'covar', None)
    # print("COVAR " , covar)
    if covar is not None:
        err_scale = (result.nfree / (n_idp - result.nvarys))
        for name in result.var_names:
            p = result.params[name]
            if isParameter(p) and p.vary:
                p.stderr *= sqrt(err_scale)

        # next, propagate uncertainties to constraints and path parameters.
        result.covar *= err_scale
        vsave, vbest = {}, []

        # 1. save current params
        for vname in result.var_names:
            par = result.params[vname]
            vsave[vname] = par
            vbest.append(par.value)

        # 2. get correlated uncertainties, set params accordingly
        uvars = correlated_values(vbest, result.covar)
        # 3. evaluate constrained params, save stderr
        for nam, obj in result.params.items():
            eval_stderr(obj, uvars, result.var_names,
                        result.params)  # vsave) # , _larch)

        # 3. evaluate path params, save stderr
        for ds in datasets:
            for p in ds.pathlist:
                p.store_feffdat()
                for pname in ('degen', 's02', 'e0', 'ei', 'deltar', 'sigma2',
                              'third', 'fourth'):
                    obj = p.params[PATHPAR_FMT % (pname, p.label)]
                    eval_stderr(obj, uvars, result.var_names, result.params)

        # restore saved parameters again
        for vname in result.var_names:
            # setattr(params, vname, vsave[vname])
            params[vname] = vsave[vname]

        # clear any errors evaluting uncertainties
        if len(_larch.error) > 0:
            _larch.error = []

    # reset the parameters group with the newly updated uncertainties
    params2group(result.params, paramgroup)

    # here we create outputs arrays for chi(k), chi(r):
    for ds in datasets:
        ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)

    out = Group(name='feffit results',
                datasets=datasets,
                fitter=fit,
                fit_details=result,
                chi_square=chi_square,
                n_independent=n_idp,
                chi_reduced=chi_reduced,
                rfactor=rfactor,
                aic=aic,
                bic=bic,
                covar=covar)

    for attr in ('params', 'nvarys', 'nfree', 'ndata', 'var_names', 'nfev',
                 'success', 'errorbars', 'message', 'lmdif_message'):
        setattr(out, attr, getattr(result, attr, None))
    return out
Пример #22
0
def feffit(params, datasets, _larch=None, rmax_out=10, path_outputs=True, **kws):
    """execute a Feffit fit: a fit of feff paths to a list of datasets

    Parameters:
    ------------
      paramgroup:   group containing parameters for fit
      datasets:     Feffit Dataset group or list of Feffit Dataset group.
      rmax_out:     maximum R value to calculate output arrays.
      path_output:  Flag to set whether all Path outputs should be written.

    Returns:
    ---------
      a fit results group.  This will contain subgroups of:

        datasets: an array of FeffitDataSet groups used in the fit.
        params:   This will be identical to the input parameter group.
        fit:      an object which points to the low-level fit.

     Statistical parameters will be put into the params group.  Each
     dataset will have a 'data' and 'model' subgroup, each with arrays:
        k            wavenumber array of k
        chi          chi(k).
        kwin         window Omega(k) (length of input chi(k)).
        r            uniform array of R, out to rmax_out.
        chir         complex array of chi(R).
        chir_mag     magnitude of chi(R).
        chir_pha     phase of chi(R).
        chir_re      real part of chi(R).
        chir_im      imaginary part of chi(R).
    """

    def _resid(params, datasets=None, _larch=None, **kwargs):
        """ this is the residual function"""
        return concatenate([d._residual() for d in datasets])

    if isNamedClass(datasets, FeffitDataSet):
        datasets = [datasets]
    for ds in datasets:
        if not isNamedClass(ds, FeffitDataSet):
            print( "feffit needs a list of FeffitDataSets")
            return
    fitkws = dict(datasets=datasets)
    fit = Minimizer(_resid, params, fcn_kws=fitkws,
                    scale_covar=True,  _larch=_larch, **kws)

    fit.leastsq()
    dat = concatenate([d._residual(data_only=True) for d in datasets])
    params.rfactor = (params.fit_details.fvec**2).sum() / (dat**2).sum()

    # remove temporary parameters for _feffdat and reff
    # that had been placed by _pathparams()
    #for pname in ('_feffdat', 'reff'):
    #    if hasattr(params, pname):
    #        delattr(params, pname)

    n_idp = 0
    for ds in datasets:
        n_idp += ds.n_idp

    # here we rescale chi-square and reduced chi-square to n_idp
    npts =  len(params.residual)
    params.chi_square *=  n_idp*1.0 / npts
    params.chi_reduced =  params.chi_square/(n_idp*1.0 - params.nvarys)

    # With scale_covar = True, Minimizer() scales the uncertainties
    # by reduced chi-square assuming params.nfree is the correct value
    # for degrees-of-freedom. But n_idp-params.nvarys is a better measure,
    # so we rescale uncertainties here.

    covar = getattr(params, 'covar', None)
    if covar is not None:
        err_scale = (params.nfree / (n_idp - params.nvarys))
        for name in dir(params):
            p = getattr(params, name)
            if isParameter(p) and p.vary:
                p.stderr *= sqrt(err_scale)

        # next, propagate uncertainties to constraints and path parameters.
        params.covar *= err_scale
        vsave, vbest = {}, []
        # 1. save current params
        for vname in params.covar_vars:
            par = getattr(params, vname)
            vsave[vname] = par
            vbest.append(par.value)

        # 2. get correlated uncertainties, set params accordingly
        uvars = correlated_values(vbest, params.covar)
        # 3. evaluate constrained params, save stderr
        for nam in dir(params):
            obj = getattr(params, nam)
            eval_stderr(obj, uvars,  params.covar_vars, vsave, _larch)

        # 3. evaluate path params, save stderr
        for ds in datasets:
            for p in ds.pathlist:
                _larch.symtable._sys.paramGroup._feffdat = copy(p._feffdat)
                _larch.symtable._sys.paramGroup.reff = p._feffdat.reff

                for param in ('degen', 's02', 'e0', 'ei',
                              'deltar', 'sigma2', 'third', 'fourth'):
                    obj = getattr(p, param)
                    eval_stderr(obj, uvars,  params.covar_vars, vsave, _larch)

        # restore saved parameters again
        for vname in params.covar_vars:
            setattr(params, vname, vsave[vname])

        # clear any errors evaluting uncertainties
        if len(_larch.error) > 0:
            _larch.error = []

    # here we create outputs arrays for chi(k), chi(r):
    for ds in datasets:
        ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)
    return Group(name='feffit fit results', fit=fit, params=params,
                 datasets=datasets)
Пример #23
0
 def test_eval_param2(self):
     out = deval(self.param2)
     assert(isParameter(out))
     assert(out.name == 'b')
     assert(out.expr.startswith('sqrt(x/2)'))
Пример #24
0
def feffit_report(result, min_correl=0.1, with_paths=True,
                  _larch=None):
    """return a printable report of fit for feffit

    Parameters:
    ------------
      result:      Feffit result, output group from feffit()
      min_correl:  minimum correlation to report [0.1]
      wit_paths:   boolean (True/False) for whether to list all paths [True]

    Returns:
    ---------
      printable string of report.

    """
    input_ok = False
    try:
        fit    = result.fit
        params = result.params
        datasets = result.datasets
        input_ok = True
    except:
        pass
    if not input_ok:
        print( 'must pass output of feffit()!')
        return
    topline = '=================== FEFFIT RESULTS ===================='
    header = '[[%s]]'
    varformat  = '   %12s = % f +/- %s   (init= % f)'
    exprformat = '   %12s = % f +/- %s  = \'%s\''
    out = [topline, header % 'Statistics']

    npts = len(params.residual)

    out.append('   npts, nvarys, nfree= %i, %i, %i' % (npts, params.nvarys,
                                                       params.nfree))
    out.append('   chi_square         = %.8g'  % (params.chi_square))
    out.append('   reduced chi_square = %.8g'  % (params.chi_reduced))
    out.append('   r-factor           = %.8g'  % (params.rfactor))
    out.append(' ')
    if len(datasets) == 1:
        out.append(header % 'Data')
    else:
        out.append(header % 'Datasets (%i)' % len(datasets))
    for i, ds in enumerate(datasets):
        tr = ds.transform
        if len(datasets) > 1:
            out.append(' dataset %i:' % (i+1))
        if isinstance(tr.kweight, Iterable):
            if isinstance(ds.epsilon_k[0], np.ndarray):
                msg = []
                for eps in ds.epsilon_k:
                    msg.append('Array(mean=%.6f, std=%.6f)' % (eps.mean(), eps.std()))
                eps_k = ', '.join(msg)
            else:
                eps_k = ', '.join(['%.6f' % eps for eps in ds.epsilon_k])
            eps_r = ', '.join(['%.6f' % eps for eps in ds.epsilon_r])
            kweigh = ', '.join(['%i' % kwe for kwe in tr.kweight])
        else:
            if isinstance(ds.epsilon_k, np.ndarray):
                eps_k = 'Array(mean=%.6f, std=%.6f)' % (ds.epsilon_k.mean(),
                                                        ds.epsilon_k.std())
            else:
                eps_k = '%.6f' % ds.epsilon_k
            eps_r = '%.6f' % ds.epsilon_r
            kweigh = '%i' % tr.kweight


        out.append('   fit space          = \'%s\''  % (tr.fitspace))
        out.append('   r-range            = %.3f, %.3f' % (tr.rmin, tr.rmax))
        out.append('   k-range            = %.3f, %.3f' % (tr.kmin, tr.kmax))
        kwin = '   k window, dk       = \'%s\', %.3f'   % (tr.window, tr.dk)
        if tr.dk2 is not None:
            kwin = "%s, %.3f" % (kwin, tr.dk2)
        out.append(kwin)
        pathfiles = [p.filename for p in ds.pathlist]
        out.append('   paths used in fit  = %s' % (repr(pathfiles)))
        out.append('   k-weight           = %s' % kweigh)
        out.append('   epsilon_k          = %s'  % eps_k)
        out.append('   epsilon_r          = %s'  % eps_r)
        out.append('   n_independent      = %.3f'  % (ds.n_idp))

        #
    out.append(' ')
    out.append(header % 'Variables')

    exprs = []
    for name in dir(params):
        var = getattr(params, name)
        if len(name) < 14:
            name = (name + ' '*14)[:14]
        if isParameter(var):
            if var.vary:
                stderr = 'unknown'
                if var.stderr is not None: stderr = "%f" % var.stderr
                out.append(varformat % (name, var.value,
                                        stderr, var._initval))

            elif var.expr is not None:
                stderr = 'unknown'
                if var.stderr is not None: stderr = "%f" % var.stderr
                exprs.append(exprformat % (name, var.value,
                                           stderr, var.expr))
    if len(exprs) > 0:
        out.append(header % 'Constraint Expressions')
        out.extend(exprs)

    covar_vars = getattr(params, 'covar_vars', [])
    if len(covar_vars) > 0:
        out.append(' ')
        out.append(header % 'Correlations' +
                   '    (unreported correlations are < % .3f)' % min_correl)
        correls = {}
        for i, name in enumerate(covar_vars):
            par = getattr(params, name)
            if not par.vary:
                continue
            if hasattr(par, 'correl') and par.correl is not None:
                for name2 in covar_vars[i+1:]:
                    if name != name2 and name2 in par.correl:
                        correls["%s, %s" % (name, name2)] = par.correl[name2]

        sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
        sort_correl.reverse()
        for name, val in sort_correl:
            if abs(val) < min_correl:
                break
            if len(name) < 20:
                name = (name + ' '*20)[:20]
            out.append('   %s = % .3f ' % (name, val))

    if with_paths:
        out.append(' ')
        out.append(header % 'Paths')
        for ids, ds in enumerate(datasets):
            if len(datasets) > 1:
                out.append(' dataset %i:' % (ids+1))
            for p in ds.pathlist:
                out.append('%s\n' % p.report())
    out.append('='*len(topline))
    return '\n'.join(out)