コード例 #1
0
def build_usage():
    modelnames = []
    ms = ''
    for d in sorted(Models.keys()):
        ms = ms + ' %s ' % d
        if len(ms) > 55:
            modelnames.append(ms)
            ms = '    '
    modelnames.append(ms)
    modelnames = '\n'.join(modelnames)

    usage = """
 === Test Fit to NIST StRD Models ===

usage:
------
    python fit_NIST.py [options] Model Start

where Start is one of 'start1','start2' or 'cert', for different
starting values, and Model is one of

    %s

if Model = 'all', all models and starting values will be run.

options:
--------
  -m  name of fitting method.  One of:
          leastsq, nelder, powell, lbfgsb, bfgs,
          tnc, cobyla, slsqp, cg, newto-cg
      leastsq (Levenberg-Marquardt) is the default
""" % modelnames
    return usage
コード例 #2
0
ファイル: test_NIST_Strd.py プロジェクト: Tillsten/lmfit-py
def build_usage():
    modelnames = []
    ms = ''
    for d in sorted(Models.keys()):
        ms = ms + ' %s ' % d
        if len(ms) > 55:
            modelnames.append(ms)
            ms = '    '
    modelnames.append(ms)
    modelnames = '\n'.join(modelnames)

    usage = """
 === Test Fit to NIST StRD Models ===

usage:
------
    python fit_NIST.py [options] Model Start

where Start is one of 'start1','start2' or 'cert', for different
starting values, and Model is one of

    %s

if Model = 'all', all models and starting values will be run.

options:
--------
  -m  name of fitting method.  One of:
          leastsq, nelder, powell, lbfgsb, bfgs,
          tnc, cobyla, slsqp, cg, newto-cg
      leastsq (Levenberg-Marquardt) is the default
""" % modelnames
    return usage
コード例 #3
0
 def test_NIST(self):
     # Run all the NIST standard tests with leastsq
     for model in Models.keys():
         try:
             NIST_runner(model)
         except Exception:
             print(model)
             raise
コード例 #4
0
def run_interactive():
    usage = build_usage()
    parser = OptionParser(usage=usage, prog="fit-NIST.py")

    parser.add_option("-m",
                      "--method",
                      dest="method",
                      metavar='METH',
                      default='leastsq',
                      help="set method name, default = 'leastsq'")

    (opts, args) = parser.parse_args()
    dset = ''
    start = 'start2'
    if len(args) > 0:
        dset = args[0]
    if len(args) > 1:
        start = args[1]

    if dset.lower() == 'all':
        tpass = 0
        tfail = 0
        failures = []
        dsets = sorted(Models.keys())
        for dset in dsets:
            for start in ('start1', 'start2', 'cert'):
                if NIST_Dataset(dset,
                                method=opts.method,
                                start=start,
                                plot=False,
                                verbose=True):
                    tpass += 1
                else:
                    tfail += 1
                    failures.append("   %s (starting at '%s')" % (dset, start))
        print('--------------------------------------')
        print(' Fit Method: %s ' % opts.method)
        print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
        print(' Tests Failed for:\n %s' % '\n '.join(failures))
        print('--------------------------------------')
    elif dset not in Models:
        print(usage)
    else:
        return NIST_Dataset(dset,
                            method=opts.method,
                            start=start,
                            plot=False,
                            verbose=True)
コード例 #5
0
ファイル: test_NIST_Strd.py プロジェクト: Tillsten/lmfit-py
def run_interactive():
    usage = build_usage()
    parser = OptionParser(usage=usage, prog="fit-NIST.py")

    parser.add_option("-m", "--method", dest="method",
                      metavar='METH',
                      default='leastsq',
                      help="set method name, default = 'leastsq'")

    (opts, args) = parser.parse_args()
    dset = ''
    start = 'start2'
    if len(args) > 0:
        dset = args[0]
    if len(args) > 1:
        start = args[1]

    if dset.lower() == 'all':
        tpass = 0
        tfail = 0
        failures = []
        dsets = sorted(Models.keys())
        for dset in dsets:
            for start in ('start1', 'start2', 'cert'):
                if NIST_Dataset(dset, method=opts.method, start=start,
                                plot=False, verbose=True):
                    tpass += 1
                else:
                    tfail += 1
                    failures.append("   %s (starting at '%s')" % (dset, start))
        print('--------------------------------------')
        print(' Fit Method: %s ' %  opts.method)
        print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
        print(' Tests Failed for:\n %s' % '\n '.join(failures))
        print('--------------------------------------')
    elif dset not in Models:
        print(usage)
    else:
        return NIST_Dataset(dset, method=opts.method,
                            start=start, plot=True, verbose=True)
コード例 #6
0
"""

msg2 = """
That is, use
    python fit_NIST.py Bennet5 start1
or go through all models and starting points with:
    python fit_NIST.py all
"""

if __name__  == '__main__':
    dset = 'Bennett5'
    start = 'start2'
    if len(sys.argv) < 2:
        print(msg1)
        out = ''
        for d in sorted(Models.keys()):
            out = out + ' %s ' % d
            if len(out) > 55:
                print( out)
                out = ''
        print(out)
        print(msg2)

        sys.exit()

    if len(sys.argv) > 1:
        dset = sys.argv[1]
    if len(sys.argv) > 2:
        start = sys.argv[2]
    if dset.lower() == 'all':
        tpass = 0
コード例 #7
0
        cval.append(NISTdata['cert_values'][count])
        cerr.append(NISTdata['cert_stderr'][count])
        pval1 = NISTdata[start][count]
        start_param.append(pval1)
    try:
        out = curve_fit2(func, x, y, p0=start_param, full_output=True)
    except RuntimeError as e:
        print(e)
    else:
        popt = out[0]
        nvarys = len(popt)

        print('Optimized -- Certified')
        for el in zip(popt, cval):
            print( str(el[0]) + '   ' + str(el[1]))

        errors =  np.array(popt) - np.array(cval)
        rel_errors = errors / np.array(cval)
        print('Highest relative error: '),
        print(np.max(rel_errors)) 


if __name__ == '__main__':

    #Get names of dataset
    datasets = sorted(Models.keys())
    # Run test
    for dataset in datasets:
        start = 'start1' #start1 or start2...
        test_curvefit(dataset, start)