Exemplo n.º 1
0
def fit_allXY(data, time_pi_pulse, plot=False):
    pfit = Parameters()

    offset = np.average(data)
    visibility = np.max(data)-np.min(data)

    pfit.add(name='visibility', value=visibility, min = 0, max=1, vary=False)
    pfit.add(name='offset', value=offset, min=0, max=0.5)
    pfit.add(name='rotation_error', value=0.01, min=-np.pi/10, max=np.pi/10) #20deg error max
    pfit.add(name='detuning_error', value=0.01)

    mini = Minimizer(fit_func_allXY, pfit, fcn_args=(data,))
    intermedediate_result = mini.minimize(method='Nelder')
    result = mini.minimize(method='leastsq', params=intermedediate_result.params)

    best_fit = data + result.residual

    if plot==True:
        plt.figure()
        plt.plot(data, 'bo')
        plt.plot(best_fit, 'r--', label='best fit')
        plt.xlabel('nth gate')
        plt.ylabel('spin prob (%)')
        plt.legend(loc='best')
        plt.show()
    
    # approximate errors.
    print(f'Change pi time by {round(-result.params["rotation_error"].value/np.pi*100,2)} %')
    print(f'Off resonant by {round(result.params["detuning_error"].value/time_pi_pulse*1e-6, 3)} MHz')

    return -result.params["rotation_error"].value/np.pi, result.params["detuning_error"].value/time_pi_pulse/2
def ctrendlogs(data, z, wlog):
    print('BUG: Vs predicton is wrong')
    print('--------------------------')
    indnan = np.argwhere(np.isnan(data))
    if (indnan.size > 0):
        msg = wlog + ' stil has nan values but is removed for now'
        print('BUG: inside ctrendlog module')
        print(msg)
        print('--------------------------------------------------')
        data = np.delete(data, indnan)
        z = np.delete(z, indnan)
        dtold = z[1] - z[0]
        data = ft.Filt(data, 10, dtold)

    params = Parameters()
    lskws = dict(ftol=1.e-20, xtol=1.e-20)
    # create Minimizer
    if (wlog == 'AI'):  # not tested
        scal = 1
        data = data / scal
        params.add('a', value=1.0000e-10, min=0, max=300, vary=True)
        params.add('b', value=1.0000e-10, min=0, max=300, vary=True)
        params.add('c', value=1.0000e-10, min=0, max=300, vary=True)
        mini = Minimizer(residual, params, fcn_args=(data, z, wlog))
        result = mini.minimize(method='least_squares', **lskws)
    elif (wlog == 'Vp'):
        scal = 1
        data = data / scal
        params.add('a', value=1.0000e-10, min=0, max=300, vary=True)
        params.add('b', value=1.0000e-10, min=0, max=300, vary=True)
        params.add('c', value=1.0000e-10, min=0, max=300, vary=True)
        mini = Minimizer(residual, params, fcn_args=(data, z, wlog))
        result = mini.minimize(method='least_squares', **lskws)
    elif (wlog == 'Vs'):
        scal = 1
        data = data / scal
        params.add('a', value=1.0000e-10, min=0, max=0.005, vary=True)
        params.add('b', value=1.0000e-10, min=0, max=0.001, vary=True)
        params.add('c', value=1.0000e-10, min=0, max=0.900, vary=True)
        params.add('d', value=1.0000e-10, min=0, max=0.002, vary=True)
        mini = Minimizer(residual, params, fcn_args=(data, z, wlog))
        result = mini.minimize(method='least_squares', **lskws)
    elif (wlog == 'GR'):
        scal = 1
        data = data / scal
        params.add('a', value=1.0000e-10, min=0, max=0.005, vary=True)
        params.add('b', value=1.0000e-10, min=0, max=0.001, vary=True)
        mini = Minimizer(residual, params, fcn_args=(data, z, wlog))
        result = mini.minimize(method='least_squares', **lskws)
    elif (wlog == 'Rhob'):
        params.add('a', value=1.0000e-10, min=0, max=1.0000, vary=True)
        params.add('b', value=1.0000e-10, min=0, max=0.00012, vary=True)
        mini = Minimizer(residual, params, fcn_args=(data, z, wlog))
        result = mini.minimize(method='least_squares', **lskws)

    return result.params
Exemplo n.º 3
0
def fitTwoCauchy(x, data, err, xbin):
    data1 = np.copy(data)  #[9,10,11]
    #data1[len(data)/2-2] = float('NaN')
    data1[len(data) / 2 - 1] = float('NaN')
    data1[len(data) / 2] = float('NaN')
    data1[len(data) / 2 + 1] = float('NaN')
    #data1[len(data)/2+2] = float('NaN')

    params1 = Parameters()
    params1.add('amp', value=0.3, min=0.1)
    params1.add('gamma_1', value=0.4, min=0.3, max=1.0)

    minner1 = Minimizer(cauchy_no_const,
                        params1,
                        fcn_args=(x, data1, err),
                        nan_policy='omit')
    result1 = minner1.minimize()
    gamma_2 = result1.params['gamma_1']
    amp2 = result1.params['amp']

    params = Parameters()
    params.add('amp', value=0.3, min=0.1, max=0.5)
    params.add('nonamp', value=0.6, min=0.0, max=1.0)
    params.add('gamma_1', value=0.04, min=0.02, max=0.2)

    minner = Minimizer(twoCauchy,
                       params,
                       fcn_args=(x, data, err, gamma_2, amp2),
                       nan_policy='omit')
    result = minner.minimize()
    #report_fit(result)

    a = result1.params['amp'].value * result.params[
        'nonamp'].value * result.params['amp'].value + result.params[
            'amp'].value
    n = result.params['amp'].value / a
    fit_out = {
        'amplitude': a,
        'constant': 0.0,
        'gamma_1': result.params['gamma_1'].value,
        'gamma_1 error': result.params['gamma_1'].stderr,
        'gamma_2': result1.params['gamma_1'].value,
        'gamma_2 error': result1.params['gamma_1'].stderr,
        'reduced chi': result.redchi,
        'N_1': n
    }

    return fit_out
def fit_theory(x, y, vary=True):

    # fits a sum of gaussians to a data set
    # my_lines is a list of frequency offsets

    params = Parameters()

    #params.add('a', value = np.min(y), min = 0*np.min(y), max = np.max(y), vary = vary)
    #params.add('freq_offset', value = freq_offset, min = np.min(x), max = np.max(x), vary = vary)

    params.add('a', value=1.0, vary=vary)
    params.add('b', value=1.0, vary=vary)
    params.add('c', value=1.0, vary=vary)

    # do fit, here with leastsq model
    minner = Minimizer(fcn2min, params, fcn_args=(x, y))
    result = minner.minimize()

    # Store the Confidence data from the fit
    con_report = lmfit.fit_report(result.params)

    (x_plot, model) = fcn2min(result.params, x, y, plot_fit=True)

    # get residuals
    (residuals) = fcn2min(result.params, x, y)

    #:print(result.params)

    return (x_plot, model, result, residuals)
Exemplo n.º 5
0
def lmfit(fcm_weights, agg_weights, const, func):
    flat_weights = np.concatenate(
        (fcm_weights.flatten(), agg_weights.flatten()), axis=None)

    params = Parameters()

    np.fromiter(map(
        lambda x: params.add(f'w{x[0]}', value=x[1], min=-1, max=1),
        enumerate(flat_weights)),
                dtype=float)

    fitter = Minimizer(func, params)
    result = fitter.minimize(method='nelder')

    n, m = const

    err = func(result.params)

    fcm_weights = np.reshape(
        np.fromiter([result.params[f'w{i}'] for i in range(n * n)],
                    dtype=float), (n, n))
    agg_weights = np.reshape(
        np.fromiter(
            [result.params[f'w{i}'] for i in range(n * n, len(flat_weights))],
            dtype=float), (m, n))

    return fcm_weights, agg_weights, err
Exemplo n.º 6
0
def minimisation(next_peak, fit_y, total_spectral_ydata, corr_distance):

    region = np.arange(max(0, next_peak - 100),
                       min(next_peak + 100, len(total_spectral_ydata)))

    params = Parameters()

    params.add('amp' + str(next_peak),
               value=total_spectral_ydata[next_peak],
               vary=False,
               min=0)
    params.add('width' + str(next_peak),
               value=4 * corr_distance,
               vary=True,
               min=1 * corr_distance,
               max=8 * corr_distance)
    params.add('pos' + str(next_peak), value=next_peak, vary=False)

    # print('minimising')

    out = Minimizer(residual,
                    params,
                    fcn_args=(fit_y[region], next_peak, region,
                              total_spectral_ydata[region]))

    results = out.minimize()

    # append the results params to the total params

    fit_yc = lorentzian(np.arange(
        len(total_spectral_ydata)), results.params['width' + str(next_peak)],
                        results.params['pos' + str(next_peak)],
                        results.params['amp' + str(next_peak)]) + fit_y

    return fit_yc
def fit_yb_T(x, y):
        params = Parameters()
 
        params.add('a', value=-5.0, min=-10.0, max=0.0, vary = True)
        #params.add('w', value=50.0, min=1.0, max=2000, vary = True)
        params.add('x_offset', value=50, min=np.min(x), max = np.max(x), vary = True)
        params.add('y_offset', value=0.0, min=-2.0, max=2.0, vary = True)
        params.add('T', value = 1.0, min=0.0, max=100.0, vary = True)

         
        iso_abund = np.array([12.887, 31.896, 16.098, 16.098, 21.754, 14.216, 14.216, 3.023, 0.126])
        for k in range(len(iso_abund)):
            params.add('a' + str(k), value = 1.0, min = 0.0, max = 10.0, vary = True)


        # print(params)
        # do fit, here with leastsq model
        minner = Minimizer(fcn2min, params, fcn_args=(x, y))
        result = minner.minimize()
        
        # Store the Confidence data from the fit
        con_report = lmfit.fit_report(result.params)
        
        (x_plot, model) = fcn2min(result.params, x, y, plot_fit = True)

        return (x_plot, model, result)
Exemplo n.º 8
0
    def Poly_fitting_Er(self):
        def fcn2min(prms, rs, Es):
            A = prms['A']
            B = prms['B']
            C = prms['C']
            D = prms['D']
            E_calc = []
            E_poly = lambda r: A + B * r + C * r**2 + D * r**3
            for ri in rs:
                E_calc.append(E_poly(ri))
            return np.array(E_calc) - Es

        params = Parameters()
        params.add('A', 0, vary=True)
        params.add('B', 0, vary=True)
        params.add('C', 0, vary=True)
        params.add('D', 0, vary=True)
        minner = Minimizer(fcn2min,
                           params=params,
                           fcn_args=(self._rexp, self._Eexp))
        result = minner.minimize()
        final = self._Eexp + result.residual
        report_fit(result)
        self.__A = result.params['A'].value
        self.__B = result.params['B'].value
        self.__C = result.params['C'].value
        self.__D = result.params['D'].value
        return final
    def cov_quasiparticle_occupation_fit_thermal(self, C):

        n_qp = self.cov_calc_quasiparticle_occupation(C)

        def objective_function(params, e, n_qp):
            beta = params['beta']
            mu = params['mu']
            FD = np.exp(-beta * e + mu) / (1 + np.exp(-beta * e + mu))
            return FD - n_qp

        params = Parameters()
        params.add('beta', value=3, min=0, max=100)
        params.add('mu', value=0, min=-10, max=10)

        minner = Minimizer(objective_function, params, fcn_args=(self.e, n_qp))
        result = minner.minimize()

        final = n_qp + result.residual
        beta_opt = result.params['beta'].value
        mu_opt = result.params['mu'].value
        print "Inverse temp fit: ", beta_opt
        print "Chemical potential fit fit: ", mu_opt

        n_qp_thermal = [self.Fermi_Dirac(E, beta_opt, mu_opt) for E in self.e]
        report_fit(result)
        return [n_qp, n_qp_thermal, beta_opt, mu_opt]
Exemplo n.º 10
0
def fitter(model, params, args, mcmc=False, pos=None, nwalkers=100,
           steps=1000, burn=0.2, progress=True, get_ci=False,
           nan_policy='raise', max_nfev=None, thin=10, is_weighted=True):

    # Do fit
    maxfev = [0 if max_nfev is None else max_nfev]
    maxfev = int(maxfev[0])
    func = Minimizer(model, params, fcn_args=args, nan_policy=nan_policy,
                     max_nfev=maxfev)
    results = func.minimize()
    if mcmc:
        func = Minimizer(model, results.params, fcn_args=args)
        mcmc_results = func.emcee(nwalkers=nwalkers, steps=steps,
                                  burn=int(burn * steps), pos=pos,
                                  is_weighted=is_weighted, progress=progress,
                                  thin=thin)
        results = mcmc_results

    if get_ci:
        if results.errorbars:
            ci = conf_interval(func, results)
        else:
            ci = ''
        return results, ci
    else:
        return results
Exemplo n.º 11
0
    def run(config, input, output):
        dataFile = os.path.join(config['cwd'], input['fit.datafile'][0][0])
        x, data = np.loadtxt(dataFile, usecols=(0, 1), unpack=True)
        params = Parameters()
        # Loop over parameters defined in input. Later, we might loop over
        # all possible parameters and set which ones vary instead.
        for param in input['fit.parameters']:
            #minval = param[1]*0.5 if param[1] != 0 else -0.5
            #maxval = param[1]*1.5 if param[1] != 0 else 0.5
            #params.add(param[0],value=param[1],min=minval,max=maxval)
            params.add(param[0], value=param[1])

        #open('fitconvergence.dat', 'ab')
        # do fit, here with the default leastsq algorithm
        minner = Minimizer(Xanes2Min,
                           params,
                           fcn_args=(x, data, input, config, output))
        #result = minner.minimize(epsfcn=0.0001,method='differential_evolution',workers=6)
        result = minner.minimize(epsfcn=0.0001)
        final = data + result.residual

        report_fit(result)
        with open('fit_result.txt', 'w') as fh:
            fh.write(fit_report(result))

        output['fit'] = [
            x.tolist(), final.tolist(),
            data.tolist()
        ]  # For now set this as fit. Later we may want to make a statement about what is implemented and what is not for fit.
Exemplo n.º 12
0
def test_multidimensional_fit_GH205():
    # test that you don't need to flatten the output from the objective
    # function. Tests regression for GH205.
    pos = np.linspace(0, 99, 100)
    xv, yv = np.meshgrid(pos, pos)
    f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1) + np.cos(
        yv * lambda2))

    data = f(xv, yv, 0.3, 3)
    assert_(data.ndim, 2)

    def fcn2min(params, xv, yv, data):
        """ model decaying sine wave, subtract data"""
        lambda1 = params['lambda1'].value
        lambda2 = params['lambda2'].value
        model = f(xv, yv, lambda1, lambda2)
        return model - data

    # create a set of Parameters
    params = Parameters()
    params.add('lambda1', value=0.4)
    params.add('lambda2', value=3.2)

    mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
    res = mini.minimize()
Exemplo n.º 13
0
def fit_data_sample():
    # create a set of Parameters
    params = Parameters()
    params.add('amp', value=10, min=0)
    params.add('decay', value=0.1)
    params.add('shift', value=0.0, min=-np.pi / 2., max=np.pi / 2.)
    params.add('omega', value=3.0)

    # create data to be fitted
    x = np.linspace(0, 15, 301)
    data = (5.0 * np.sin(2.0 * x - 0.1) * np.exp(-x * x * 0.025) +
            np.random.normal(size=x.size, scale=0.2))

    # do fit, here with the default leastsq algorithm
    minner = Minimizer(fcn2min, params, fcn_args=(x, data))
    result = minner.minimize()

    # calculate final result
    final = data + result.residual

    # write error report
    report_fit(result)

    # try to plot results
    try:
        import matplotlib.pyplot as plt
        plt.plot(x, data, 'k+')
        plt.plot(x, final, 'r')
        plt.show()
    except ImportError:
        pass
Exemplo n.º 14
0
    def E_total_fitting_Er(
            self):  # fitting DFT data of E_V with the above function
        def fcn2min(prms, rs, Es):
            ls = prms['ls']
            E_calc = []
            for ri in rs:
                E_calc.append(self.E_total(ri, ls))
            return np.array(E_calc) - Es

        params = Parameters()
        _last_fitting = "/_last_fitting.json"
        filename = current_folder + _last_fitting
        if os.path.exists(filename):
            with open(filename, 'r') as f:
                dct = json.load(f)
            if 'ls' in dct:
                params.add('ls', dct['ls'], vary=True)
        else:
            params.add('ls', 0.5, vary=True)
        minner = Minimizer(fcn2min,
                           params=params,
                           fcn_args=(self._rexp, self._Eexp))
        result = minner.minimize()
        final = self._Eexp + result.residual
        _dct = {'ls': result.params['ls'].value}
        with open(filename, 'w') as f:
            json.dump(_dct, f)
        report_fit(result)
        self.LengthScale = result.params['ls'].value
        return final
Exemplo n.º 15
0
def do_fit(d, init_guesses, lines=35):

    # init_guesses = [Ug, Ue] = Dunham coefficients
    (Yg35, Ye35, Yg37, Ye37) = get_dunham(init_guesses[0], init_guesses[1])

    params = get_params(Yg35, Ye35)

    #print_params(params)

    #params['Yg11'].min = -1
    #params['Yg11'].max = 1
    #
    #params['Yg20'].min = -1
    #params['Yg20'].max = 1

    # do fit, here with leastsq model
    minner = Minimizer(fcn2min, params, fcn_args=([], d))
    result = minner.minimize()

    # Store the Confidence data from the fit
    #con_report = lmfit.fit_report(result.params)

    #report_fit(result)

    #print(params)
    #print(result.params)

    return (result.params, params)
Exemplo n.º 16
0
def Gaussian_beam_propagation(meas_points,widths,lambda_beam,plot=False):
    
    params_BeamPropagation = Parameters()
    params_BeamPropagation.add('omega_zero',value=widths[0])
    params_BeamPropagation.add('z0',value=0)
    
    fit = Minimizer(__w_Gauss_freespace_residual,params_BeamPropagation,fcn_args=(meas_points,),\
        fcn_kws={"meas_beamwidths":widths,"lam":lambda_beam})
    fit_res = fit.minimize(maxfev=10**8)
    print(fit_report(fit_res))

    if plot is True:
        print("Let's plot it")
        fitted_w0 = fit_res.params.valuesdict()["omega_zero"]
        fitted_z0 = fit_res.params.valuesdict()["z0"]
        if fitted_z0<meas_points[0]:
            plotpoints = np.linspace(fitted_w0,meas_points[-1],100)
        elif meas_points[0]<fitted_z0<meas_points[-1]:
            plotpoints = np.linspace(meas_points[0],meas_points[-1],100)
        else:
            plotpoints = np.linspace(meas_points[0],fitted_z0,100)
        model_beamwidth = __w_Gauss_freespace_residual(fit_res.params,plotpoints,meas_beamwidths=None,lam=lambda_beam)
        plt.plot(plotpoints,model_beamwidth,'b')
        plt.scatter(meas_points,widths,c="r")
        plt.show()
Exemplo n.º 17
0
 def _process(dataproc):
     y = dataproc[offset_time:]
     lstEch = self.listEcho[offset_time:]
     ymax = y[-1]
     if ymax < min_amp:
         t1 = -1.0
         magn = 0.0
         shift = 0.0
         return t1, magn, shift
     yT1 = ymax * (1 - np.exp(-1))
     idx = (np.abs(y - yT1)).argmin()
     t1_init = lstEch[idx]
     try:
         params.add('amp', value=ymax)
         params.add('decay', value=t1_init)
         params.add('shift', value=0.0)
         minner = Minimizer(_fcn2min,
                            params,
                            fcn_args=(lstEch, y),
                            max_nfev=iteration)
         result = minner.minimize()
         t1 = result.params['decay'].value
         magn = result.params['amp'].value
         shift = result.params['shift'].value
         if t1 > 5000.0:
             t1 = -1.0
             magn = 0.0
             shift = 0.0
     except Exception as e:
         t1 = -1.0
         magn = 0.0
         shift = 0.0
     return t1, magn, shift
Exemplo n.º 18
0
def do_fit(x, y):
    params = Parameters()

    params.add('Te_1', value=0.0, min=0.0, max=340.0, vary=False)
    params.add('Te_2', value=38237.0, min=0.0, max=40000.0, vary=True)

    params.add('we_1', value=480.0, min=0.0, max=500.0, vary=True)
    params.add('we_2', value=440.0, min=0.0, max=500.0, vary=True)

    params.add('wexe_1', value=2.037, min=0.0, max=5.0, vary=True)
    params.add('wexe_2', value=2.81, min=0.0, max=8.0, vary=True)

    params.add('weye_1', value=0.5, min=0.0, max=5.0, vary=True)
    params.add('weye_2', value=0.1, min=0.0, max=8.0, vary=True)

    params.add('weze_1', value=0.5, min=0.0, max=5.0, vary=True)
    params.add('weze_2', value=0.1, min=0.0, max=8.0, vary=True)

    # do fit, here with leastsq model
    minner = Minimizer(fcn2min, params, fcn_args=(x, y))
    result = minner.minimize()

    # Store the Confidence data from the fit
    con_report = lmfit.fit_report(result.params)

    return (result)
def test_multidimensional_fit_GH205():
    # test that you don't need to flatten the output from the objective
    # function. Tests regression for GH205.
    pos = np.linspace(0, 99, 100)
    xv, yv = np.meshgrid(pos, pos)
    f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1)
                                             + np.cos(yv * lambda2))

    data = f(xv, yv, 0.3, 3)
    assert_(data.ndim, 2)

    def fcn2min(params, xv, yv, data):
        """ model decaying sine wave, subtract data"""
        lambda1 = params['lambda1'].value
        lambda2 = params['lambda2'].value
        model = f(xv, yv, lambda1, lambda2)
        return model - data

    # create a set of Parameters
    params = Parameters()
    params.add('lambda1', value=0.4)
    params.add('lambda2', value=3.2)

    mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
    res = mini.minimize()
Exemplo n.º 20
0
        def _process(dataproc):
            y = dataproc[offset_time:]
            lstEch = self.listEcho[offset_time:]
            ymax = y[-1]
            if ymax < min_amp:
                ti = -1.0
                magn = 0.0
                shift = 1.0
                return ti, magn, shift

            idx = (np.abs(y)).argmin()
            ti_init = lstEch[idx] / np.log(2)
            try:
                params.add('amp', value=ymax)
                params.add('decay', value=ti_init)
                params.add('shift', value=1.0)
                minner = Minimizer(_fcn2min,
                                   params,
                                   fcn_args=(lstEch, y),
                                   max_nfev=iteration)
                result = minner.minimize()
                ti = result.params['decay'].value
                magn = result.params['amp'].value
                shift = result.params['shift'].value
            except Exception as e:
                ti = -1.0
                magn = 0.0
                shift = 1.0
            return ti, magn, shift
Exemplo n.º 21
0
def asymmetric_samples(mean, plus, minus, size=5000):
    if (plus == 0.0) and (minus == 0.0):
        samples = np.ones(size)*mean
        return samples

    x = np.array([mean-minus, mean, mean+plus])
    data = np.array([0.16, 0.5, 0.84])
    
    params = Parameters()
    params.add('mu', value=mean)
    params.add('sigma', value=np.mean([minus, plus]))
    params.add('alpha', value=0.0)
    
    try:
        minner = Minimizer(fnc2min, params, fcn_args=(x, data))
        result = minner.minimize()
    
        mu = result.params['mu'].value
        sigma = result.params['sigma'].value
        alpha = result.params['alpha'].value
    
        samples = skewnorm.rvs(a=alpha, loc=mu, scale=sigma, size=size)
    
    except ValueError:
        print("Problem producing the distribution ot sampling from it.")
        print("\nReversing to a Normal Distribution")
        print("\nwith scale given by the average between the maximum and minimum errors.")
        samples = norm.rvs(loc=mean, scale=np.mean([minus, plus]), size=size)
    
    return samples
Exemplo n.º 22
0
def fit_nl_orthorhombic_cell(data_df, a, b, c, wavelength, verbose=True):
    """
    perform non-linear fit
    data_df = data in pandas DataFrame
    a, b, c = cell parameter
    wavelength = this ca be replaced with .get_base_ptn_wavelength()
    """
    h = data_df['h']
    k = data_df['k']
    l = data_df['l']
    param = Parameters()
    param.add('a', value=a, min=0)
    param.add('b', value=b, min=0)
    param.add('c', value=c, min=0)
    twoth_data = data_df['twoth']
    minner = Minimizer(fcn2min_orthorhombic,
                       param,
                       fcn_args=(h, k, l, twoth_data, wavelength))
    result = minner.minimize()
    # calculate final result

    # write error report
    if verbose:
        report_fit(result)

    return result
def fit_dunham(q, d):
    print('Starting fit...')
    molids, mols = read_in_dunham_config()
    Ys = mols['AlCl62X_Bernath'].keys()
    allowed_Ys = [
        'y00', 'y01', 'y10', 'y11', 'y20', 'y21', 'y22', 'y12', 'y02'
    ]
    params = Parameters()
    for p in Ys:
        if p != 'matrix':
            #if p in allowed_Ys:
            if p == 'y00':
                params.add(p + 'A', value=0.0, vary=True)
            elif p == 'y10':
                params.add(p + 'A', value=0.0, max=600.0, vary=True)
            elif p == 'y20':
                params.add(p + 'A', value=0.0, vary=True)
            else:
                params.add(p + 'A', value=0.0, vary=True)

        #params.add(p+'X', value = 0.0, min = -500, max = 500, vary = True)

    # do fit, here with leastsq model
    minner = Minimizer(fcn2min, params, fcn_args=(q, d))
    result = minner.minimize()

    # Store the Confidence data from the fit
    con_report = lmfit.fit_report(result.params)
    model = fcn2min(result.params, q, d, get_fit=True)

    return (result.params, params, con_report, model)
Exemplo n.º 24
0
def test_scalar_minimize_has_no_uncertainties():
    # scalar_minimize doesn't calculate uncertainties.
    # when a scalar_minimize is run the stderr and correl for each parameter
    # should be None. (stderr and correl are set to None when a Parameter is
    # initialised).
    # This requires a reset after a leastsq fit has been done.
    # Only when scalar_minimize calculates stderr and correl can this test
    # be removed.

    np.random.seed(1)
    x = np.linspace(0, 15, 301)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x * x * 0.025) +
            np.random.normal(size=len(x), scale=0.2))

    # define objective function: returns the array to be minimized
    def fcn2min(params, x, data):
        """ model decaying sine wave, subtract data"""
        amp = params['amp'].value
        shift = params['shift'].value
        omega = params['omega'].value
        decay = params['decay'].value

        model = amp * np.sin(x * omega + shift) * np.exp(-x * x * decay)
        return model - data

    # create a set of Parameters
    params = Parameters()
    params.add('amp', value=10, min=0)
    params.add('decay', value=0.1)
    params.add('shift', value=0.0, min=-pi / 2., max=pi / 2)
    params.add('omega', value=3.0)

    mini = Minimizer(fcn2min, params, fcn_args=(x, data))
    out = mini.minimize()
    assert_(np.isfinite(out.params['amp'].stderr))
    print(out.errorbars)
    assert_(out.errorbars == True)
    out2 = mini.minimize(method='nelder-mead')
    assert_(out2.params['amp'].stderr is None)
    assert_(out2.params['decay'].stderr is None)
    assert_(out2.params['shift'].stderr is None)
    assert_(out2.params['omega'].stderr is None)
    assert_(out2.params['amp'].correl is None)
    assert_(out2.params['decay'].correl is None)
    assert_(out2.params['shift'].correl is None)
    assert_(out2.params['omega'].correl is None)
    assert_(out2.errorbars == False)
def test_scalar_minimize_has_no_uncertainties():
    # scalar_minimize doesn't calculate uncertainties.
    # when a scalar_minimize is run the stderr and correl for each parameter
    # should be None. (stderr and correl are set to None when a Parameter is
    # initialised).
    # This requires a reset after a leastsq fit has been done.
    # Only when scalar_minimize calculates stderr and correl can this test
    # be removed.

    np.random.seed(1)
    x = np.linspace(0, 15, 301)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
            np.random.normal(size=len(x), scale=0.2) )

    # define objective function: returns the array to be minimized
    def fcn2min(params, x, data):
        """ model decaying sine wave, subtract data"""
        amp = params['amp'].value
        shift = params['shift'].value
        omega = params['omega'].value
        decay = params['decay'].value

        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
        return model - data

    # create a set of Parameters
    params = Parameters()
    params.add('amp',   value= 10,  min=0)
    params.add('decay', value= 0.1)
    params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
    params.add('omega', value= 3.0)

    mini = Minimizer(fcn2min, params, fcn_args=(x, data))
    out = mini.minimize()
    assert_(np.isfinite(out.params['amp'].stderr))
    print(out.errorbars)
    assert_(out.errorbars == True)
    out2 = mini.minimize(method='nelder-mead')
    assert_(out2.params['amp'].stderr is None)
    assert_(out2.params['decay'].stderr is None)
    assert_(out2.params['shift'].stderr is None)
    assert_(out2.params['omega'].stderr is None)
    assert_(out2.params['amp'].correl is None)
    assert_(out2.params['decay'].correl is None)
    assert_(out2.params['shift'].correl is None)
    assert_(out2.params['omega'].correl is None)
    assert_(out2.errorbars == False)
Exemplo n.º 26
0
def estimate_autocorrelation(total_spectral_ydata):
    # note this region may have a baseline distortion

    y = np.real(total_spectral_ydata[0:10000])

    params = Parameters()

    # define a basleine polnomial

    order = 6

    for p in range(order + 1):
        params.add('p' + str(p), value=0)

    def poly(params, order, y):

        bl = np.zeros(len(y))
        x = np.arange(len(y))

        for p in range(order + 1):
            bl += params['p' + str(p)] * x ** (p)

        return bl

    def res(params, order, y):

        bl = poly(params, order, y)

        r = abs(y - bl)

        return r

    out = Minimizer(res, params,
                    fcn_args=(order, y))

    results = out.minimize()

    bl = poly(results.params, order, y)

    y = y - bl

    t0 = np.sum(y * y)

    c = 1

    tc = 1

    t = []

    while tc > 0.36:
        tc = np.sum(np.roll(y, c) * y) / t0

        t.append(tc)

        c += 1

    print('autocorrelation distance = ' + str(c))

    return c
Exemplo n.º 27
0
    def button_click(self):
        print('Fit Button Pressed')
        self.x = np.array([])
        self.y = np.array([])

        for k in range(self.no_of_rows):

            hlp = self.tableWidget.item(k, 0)
            if not hlp is None:
                self.x = np.append(self.x, np.float(hlp.text()))
            else:
                break
            hlp = self.tableWidget.item(k, 1)
            if not hlp is None:
                self.y = np.append(self.y, np.float(hlp.text()))

        print(self.x)
        print(self.y)

        params = Parameters()
        params.add('amplitude',
                   value=np.max(self.y),
                   min=(np.max(self.y) - np.min(self.y)) / 2.0,
                   max=(np.max(self.y) - np.min(self.y)))
        params.add('waist',
                   value=(np.max(self.x) - np.min(self.x)) / 2.0,
                   min=10.0,
                   max=2000)
        params.add('x_offset',
                   value=np.mean(self.x),
                   min=np.min(self.x),
                   max=np.max(self.x))
        params.add('y_offset',
                   value=np.min(self.y),
                   min=0.00,
                   max=np.max(self.y),
                   vary=False)

        # do fit, here with leastsq model
        minner = Minimizer(fcn2min, params, fcn_args=(self.x, self.y))
        result = minner.minimize()

        # Store the Confidence data from the fit
        con_report = lmfit.fit_report(result.params)

        # write error report
        self.textbox.setText("")
        for k in params.keys():
            my_str = str(result.params[k].value)
            self.textbox.append(str(k) + " = " + my_str + "\n")
        self.textbox.append(
            con_report)  # include the confidence data in the textbox

        self.canvas.x = self.x
        self.canvas.y = self.y

        self.canvas.plot(fit_plot=result)
        print(params)
Exemplo n.º 28
0
def fit_model_2_fixed_n0(turns, DA):
    params = Parameters()
    params.add("rho", value=1, min=0, vary=True)
    params.add("n0", value=1, vary=False)
    params.add("k", value=1, min=0, vary=True)
    minner = Minimizer(fit.model_2_lmfit, params, fcn_args=(turns, DA))
    result = minner.minimize(method="basinhopping")
    final = DA + result.residual
    return result, final
Exemplo n.º 29
0
def fit_rb(x, y, my_lines):
    params = Parameters()

    cnt_rb = 384.230115e12

    params.add('w0', value=295.0e6, min=100.0e6, max=350e6, vary=True)
    params.add('w1', value=290.48e6, min=0.0e6, max=350e6, vary=True)

    params.add('wlamb', value=10.48e6, min=0.0e6, max=20e6, vary=False)

    params.add('x_offset', value=-15.0e6, min=-100.0e6, max=100.0e6,
               vary=True)  # wavemeter offset
    params.add('y_offset', value=1.0, min=-2.0, max=2.0, vary=True)

    params.add('ab0', value=+0.23, min=0.0, max=2.0, vary=True)
    params.add('ab1', value=+0.92, min=0.0, max=2.0, vary=True)
    params.add('cnt0',
               value=cnt_rb - 1914.0e6,
               min=cnt_rb - 3000.0e6,
               max=cnt_rb - 1000e6,
               vary=True)
    params.add('cnt1',
               value=cnt_rb - 785.0e6,
               min=cnt_rb - 1000.0e6,
               max=cnt_rb + 0e6,
               vary=True)

    #for k in range(len(my_lines)):
    #    params.add('a' + str(k), value = 0.1, min = 0.0, max = 1.0, vary = False)

    params.add('a0', value=0.025, min=0.0, max=1.0, vary=True)
    params.add('a1', value=0.2, min=0.0, max=1.0, vary=True)
    params.add('a2', value=0.2, min=0.0, max=1.0, vary=True)
    params.add('a3', value=0.025, min=0.0, max=1.0, vary=True)
    params.add('a4', value=0.2, min=0.0, max=1.0, vary=True)
    params.add('a5', value=0.2, min=0.0, max=1.0, vary=True)

    params.add('a6', value=0.2, min=0.0, max=1.0, vary=True)
    params.add('a7', value=0.2, min=0.0, max=1.0, vary=True)
    params.add('a8', value=0.2, min=0.0, max=1.0, vary=True)
    params.add('a9', value=0.2, min=0.0, max=1.0, vary=True)

    # do fit, here with leastsq model
    minner = Minimizer(fcn2min, params, fcn_args=(x, y, my_lines))
    result = minner.minimize()

    # Store the Confidence data from the fit
    con_report = lmfit.fit_report(result.params)

    (x_plot, model) = fcn2min(result.params, x, y, my_lines, plot_fit=True)

    # get residuals
    (residuals) = fcn2min(result.params, x, y, my_lines)

    #:print(result.params)

    return (x_plot, model, result, residuals)
Exemplo n.º 30
0
    def NLLSR(self, LMparams):
        """ Returns the result of the NLLSR using LMFit
        """
        # uses least swuares method to minimize the parameters given by LMparams according to the residuals given by self.fnc2min
        LMFitmin = Minimizer(self.fnc2min, LMparams)
        LMFitResult = LMFitmin.minimize(method='least_squares')
        lmfit.printfuncs.report_fit(LMFitResult.params)

        return LMFitResult
Exemplo n.º 31
0
def perform_fit(exp_volumes,
                exp_pressures,
                starting_params,
                interval,
                fix_vo=False,
                bm2=False):

    volumes = exp_volumes[:, 0]
    sigV = exp_volumes[:, 1]
    p = exp_pressures[:, 0]
    sigP = exp_pressures[:, 1]

    uncert = np.column_stack((sigV, sigP))

    #initial starting parameters
    fit_params = Parameters()
    fit_params.add('vo', value=starting_params['vo'], min=0.0)
    fit_params.add('ko', value=starting_params['ko'], min=0.0)
    fit_params.add('kp', value=starting_params['kp'])

    if fix_vo:
        fit_params['vo'].vary = False
    if bm2:
        fit_params['kp'].vary = False

    mini = Minimizer(bm3, fit_params, fcn_args=(volumes,), fcn_kws={'data': p,\
                                                          'uncert': uncert})
    out1 = mini.minimize(method='Nedler')

    out2 = mini.minimize(method='leastsq', params=out1.params)

    results = out2.params.valuesdict()

    plot_volumes = np.linspace((np.min(volumes) - 10), results['vo'], 1000)
    fit = bm3(out2.params, plot_volumes)

    report_fit(out2, show_correl=True, min_correl=0.001)

    PVs = np.column_stack((fit, plot_volumes))

    ci = conf_interval(mini, out2)
    report_ci(ci)

    return ci, mini, out2, PVs
Exemplo n.º 32
0
def autofit_fp_poly(turns, losses, dt, I0, I_max, iter_step, exp_0, method):
    params = Parameters()
    params.add("exponent", value=exp_0, min=0, vary=True)

    minner = Minimizer(fp_lmfit_poly,
                       params,
                       fcn_args=(turns, losses, dt, I0, I_max, iter_step))
    result = minner.minimize(method=method)
    final = losses + result.residual
    return result, final
Exemplo n.º 33
0
def fit_axis(image_nparray2D,axis,minim_method="nelder"):
	axis_data = np.sum(pic_data,axis = 1) if (axis == 0) else np.sum(pic_data,axis = 0)
	axis_points = np.linspace(1,len(axis_data),len(axis_data))
	param_estimates = startparams_estimate(axis_data)
	params_for_fit = Parameters()
	params_for_fit.add('I_zero',value=param_estimates[0],min=0,max=np.amax(axis_data))
	params_for_fit.add('r_zero',value=param_estimates[1],min=1,max=len(axis_data))
	params_for_fit.add('omega_zero',value=param_estimates[2],min=1,max=len(axis_data))
	params_for_fit.add('backgr',value=param_estimates[3])
	fit = Minimizer(residual,params_for_fit,fcn_args=(axis_points,),\
		fcn_kws={"data":axis_data})
	fit_res = fit.minimize(minim_method)
	return (axis_points,axis_data,fit_res)
Exemplo n.º 34
0
    def __fit2D(self,minim_method="nelder",rotation=False):
        
        self.__fit_axis(0,minim_method)
        self.__fit_axis(1,minim_method)

        # we first take all the initial parameters from 1D fits
        bgr2D_est = self.axis0fitparams.valuesdict()["backgr"]/len(self.axis0pts)
        x2D_est = self.axis0fitparams.valuesdict()["r_zero"]
        omegaX2D_est = self.axis0fitparams.valuesdict()["omega_zero"]
        y2D_est = self.axis1fitparams.valuesdict()["r_zero"]
        omegaY2D_est = self.axis1fitparams.valuesdict()["omega_zero"]

        smoothened_image = gaussian_filter(self.image_array,50)
        peakheight2D_est = np.amax(smoothened_image)
        #now we need to programatically cut out the region of interest out of the
        #whole picture so that fitting takes way less time

        # NOTE! In this implementation, if the beam is small compared to picture size
        # and is very close to the edge, the fitting will fail, because the x and y
        # center position estimates will be off

        self.__format_picture(x2D_est,omegaX2D_est,y2D_est,omegaY2D_est)
        cropped_data = self.formatted_array
        xvals = np.linspace(1,cropped_data.shape[0],cropped_data.shape[0])
        yvals = np.linspace(1,cropped_data.shape[1],cropped_data.shape[1])
        x, y = np.meshgrid(yvals,xvals)
        # NOTE! there's apparently some weird convention, this has to do with
        # Cartesian vs. matrix indexing, which is explain in numpy.meshgrid manual

        estimates_2D = Parameters()
        estimates_2D.add("I_zero",value=peakheight2D_est,min=bgr2D_est)
        estimates_2D.add("x_zero",value=0.5*len(yvals),min=0,max=len(yvals)) # NOTE! weird indexing conventions
        estimates_2D.add("y_zero",value=0.5*len(xvals),min=0,max=len(xvals)) # NOTE! weird indexing conventions
        estimates_2D.add("omegaX_zero",value=omegaX2D_est)
        estimates_2D.add("omegaY_zero",value=omegaY2D_est)
        estimates_2D.add("theta_rot",value=0*np.pi,min = 0,max = np.pi) #just starting with 0
        estimates_2D.add("backgr",value=bgr2D_est)


        if rotation:
            fit2D = Minimizer(residual_G2D,estimates_2D,fcn_args=(x,y),fcn_kws={"data":cropped_data})
            print("Including rotation")
        else:
            fit2D = Minimizer(residual_G2D_norotation,estimates_2D,fcn_args=(x,y),fcn_kws={"data":cropped_data})
            print("Not including rotation")

        fit_res2D = fit2D.minimize(minim_method)

        self.x2Dgrid = x
        self.y2Dgrid = y
        self.fit2Dparams = fit_res2D.params
    def fit(self, image):
        """Fit a image of a hologram with the current attribute 
        parameters.

        Example:
        >>> p = {'x':0, 'y':0, 'z':100, 'a_p':0.5, 'n_p':1.5, 'n_m':1.337, 
        ...      'mpp':0.135, 'lamb':0.447}
        >>> mie_fit = Mie_Fitter(p)
        >>> mit_fit.result(image)
        """
        dim = image.shape
        minner = Minimizer(mie_loss, self.p, fcn_args=(image, dim))
        self.result = minner.minimize()
        return self.result
Exemplo n.º 36
0
class MinimizerClassSuite:
    """
    Benchmarks for the Minimizer class
    """
    def setup(self):
        self.x = np.linspace(1, 10, 250)
        np.random.seed(0)
        self.y = (3.0 * np.exp(-self.x / 2)
                  - 5.0 * np.exp(-(self.x - 0.1) / 10.)
                  + 0.1 * np.random.randn(len(self.x)))

        self.p = Parameters()
        self.p.add_many(('a1', 4., True, 0., 10.),
                        ('a2', 4., True, -10., 10.),
                        ('t1', 3., True, 0.01, 10.),
                        ('t2', 3., True, 0.01, 20.))

        self.p_emcee = deepcopy(self.p)
        self.p_emcee.add('noise', 0.2, True, 0.001, 1.)

        self.mini_de = Minimizer(Minimizer_Residual,
                                 self.p,
                                 fcn_args=(self.x, self.y),
                                 kws={'seed': 1,
                                      'polish': False,
                                      'maxiter': 100})

        self.mini_emcee = Minimizer(Minimizer_lnprob,
                                    self.p_emcee,
                                    fcn_args=(self.x, self.y))

    def time_differential_evolution(self):
        self.mini_de.minimize(method='differential_evolution')

    def time_emcee(self):
        self.mini_emcee.emcee(self.p_emcee, steps=100, seed=1)
Exemplo n.º 37
0
def fit2D(image_nparray2D,fit_axis0=None,fit_axis1=None,minim_method="nelder"):
	if fit_axis0 is None:
		fit_axis0 = fit_axis(image_nparray2D,0,minim_method)
	if fit_axis1 is None:
		fit_axis1 = fit_axis(image_nparray2D,1,minim_method)
		
	# we first take all the initial parameters from 1D fits
	bgr2D_est = fit_axis0[2].params.valuesdict()["backgr"]/len(fit_axis1[0])
	x2D_est = fit_resultsA0[2].params.valuesdict()["r_zero"]
	omegaX2D_est = fit_axis0[2].params.valuesdict()["omega_zero"]
	y2D_est = fit_axis1[2].params.valuesdict()["r_zero"]
	omegaY2D_est = fit_axis1[2].params.valuesdict()["omega_zero"]

	smoothened_image = gaussian_filter(image_nparray2D,50)
	peakheight2D_est = np.amax(smoothened_image)
	#now we need to programatically cut out the region of interest out of the 
	#whole picture so that fitting takes way less time
		
	# NOTE! In this implementation, if the beam is small compared to picture size 
	# and is very close to the edge, the fitting will fail, because the x and y 
	# center position estimates will be off

	cropped_data = format_picture(image_nparray2D,x2D_est,omegaX2D_est,y2D_est,omegaY2D_est)
	xvals = np.linspace(1,cropped_data.shape[0],cropped_data.shape[0])
	yvals = np.linspace(1,cropped_data.shape[1],cropped_data.shape[1])
	x, y = np.meshgrid(yvals,xvals) 
	# NOTE! there's apparently some weird convention, this has to do with 
	# Cartesian vs. matrix indexing, which is explain in numpy.meshgrid manual 

	estimates_2D = Parameters()
	estimates_2D.add("I_zero",value=peakheight2D_est,min=bgr2D_est)
	estimates_2D.add("x_zero",value=0.5*len(yvals),min=0,max=len(xvals)) # NOTE! weird indexing conventions
	estimates_2D.add("y_zero",value=0.5*len(xvals),min=0,max=len(yvals)) # NOTE! weird indexing conventions
	estimates_2D.add("omegaX_zero",value=omegaX2D_est)
	estimates_2D.add("omegaY_zero",value=omegaY2D_est)
	estimates_2D.add("backgr",value=bgr2D_est)
		

	fit2D = Minimizer(residual_2D,estimates_2D,fcn_args=(x,y),fcn_kws={"data":cropped_data})
	fit_res2D = fit2D.minimize(minim_method)
	print(estimates_2D.valuesdict()["x_zero"])
	return (x,y,fit_res2D)
Exemplo n.º 38
0
def fit_axis(image_nparray2D,axis,minim_method="nelder"):
	"""
	This function fits one axis of a 2D array representing an image by doing 
	a summation along the other axis 

	fit_axis(image_nparray2D,axis,minim_method="nelder")
	"""


	axis_data = np.sum(image_nparray2D,axis = 1) if (axis == 0) else np.sum(image_nparray2D,axis = 0)
	axis_points = np.linspace(1,len(axis_data),len(axis_data))
	param_estimates = startparams_estimate(axis_data)
	params_for_fit = Parameters()
	params_for_fit.add('I_zero',value=param_estimates[0],min=0,max=np.amax(axis_data))
	params_for_fit.add('r_zero',value=param_estimates[1],min=1,max=len(axis_data))
	params_for_fit.add('omega_zero',value=param_estimates[2],min=1,max=len(axis_data))
	params_for_fit.add('backgr',value=param_estimates[3])
	fit = Minimizer(residual_G1D,params_for_fit,fcn_args=(axis_points,),\
		fcn_kws={"data":axis_data})
	fit_res = fit.minimize(minim_method)
	return (axis_points,axis_data,fit_res)
Exemplo n.º 39
0
def minimize(fcn, paramgroup, method='leastsq', args=None, kws=None,
             scale_covar=True, iter_cb=None, reduce_fcn=None, nan_polcy='omit',
             _larch=None, **fit_kws):
    """
    wrapper around lmfit minimizer for Larch
    """
    fiteval = _larch.symtable._sys.fiteval
    if isinstance(paramgroup, ParameterGroup):
        params = paramgroup.__params__
    elif isgroup(paramgroup):
        params = group2params(paramgroup, _larch=_larch)
    elif isinstance(Parameters):
        params = paramgroup
    else:
        raise ValueError('minimize takes ParamterGroup or Group as first argument')

    if args is None:
        args = ()
    if kws is None:
        kws = {}

    def _residual(params):
        params2group(params, paramgroup)
        return fcn(paramgroup, *args,  **kws)

    fitter = Minimizer(_residual, params, iter_cb=iter_cb,
                       reduce_fcn=reduce_fcn, nan_policy='omit', **fit_kws)

    result = fitter.minimize(method=method)
    params2group(result.params, paramgroup)

    out = Group(name='minimize results', fitter=fitter, fit_details=result,
                chi_square=result.chisqr, chi_reduced=result.redchi)

    for attr in ('aic', 'bic', 'covar', 'params', 'nvarys',
                 'nfree', 'ndata', 'var_names', 'nfev', 'success',
                 'errorbars', 'message', 'lmdif_message', 'residual'):
        setattr(out, attr, getattr(result, attr, None))
    return out
Exemplo n.º 40
0
    def __fit_axis(self,axis,minim_method="nelder"):
        """
        This function fits one axis of a 2D array representing an image by doing
        a summation along the other axis

        fit_axis(axis,minim_method="nelder")
        """

        if axis not in [0,1]:
            sys.exit("The axis can only be 0 or 1 in fit_axis function")


        axis_data = np.sum(self.image_array,axis = 1) if (axis == 0) else np.sum(self.image_array,axis = 0)
        axis_points = np.linspace(1,len(axis_data),len(axis_data))
        param_estimates = self.__startparams_estimate(axis_data)

        # using lmfit package for fitting (based on Scipy)
        # https://lmfit.github.io/lmfit-py/
        params_for_fit = Parameters()
        params_for_fit.add('I_zero',value=param_estimates[0],min=0,max=np.amax(axis_data))
        params_for_fit.add('r_zero',value=param_estimates[1],min=1,max=len(axis_data))
        params_for_fit.add('omega_zero',value=param_estimates[2],min=1,max=len(axis_data))
        params_for_fit.add('backgr',value=param_estimates[3])
        fit = Minimizer(residual_G1D,params_for_fit,fcn_args=(axis_points,),\
            fcn_kws={"data":axis_data})
        fit_res = fit.minimize(minim_method)

        if axis == 0:
            self.axis0pts = axis_points
            self.axis0data = axis_data
            self.axis0fitparams = fit_res.params

        elif axis == 1:
            self.axis1pts = axis_points
            self.axis1data = axis_data
            self.axis1fitparams = fit_res.params
Exemplo n.º 41
0
class CommonMinimizerTest(unittest.TestCase):

    def setUp(self):
        """
        test scale minimizers except newton-cg (needs jacobian) and
        anneal (doesn't work out of the box).
        """
        p_true = Parameters()
        p_true.add('amp', value=14.0)
        p_true.add('period', value=5.33)
        p_true.add('shift', value=0.123)
        p_true.add('decay', value=0.010)
        self.p_true = p_true

        n = 2500
        xmin = 0.
        xmax = 250.0
        noise = np.random.normal(scale=0.7215, size=n)
        self.x = np.linspace(xmin, xmax, n)
        self.data = self.residual(p_true, self.x) + noise

        fit_params = Parameters()
        fit_params.add('amp', value=11.0, min=5, max=20)
        fit_params.add('period', value=5., min=1., max=7)
        fit_params.add('shift', value=.10, min=0.0, max=0.2)
        fit_params.add('decay', value=6.e-3, min=0, max=0.1)
        self.fit_params = fit_params

        self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])

    def residual(self, pars, x, data=None):
        amp = pars['amp']
        per = pars['period']
        shift = pars['shift']
        decay = pars['decay']

        if abs(shift) > pi/2:
            shift = shift - np.sign(shift) * pi
        model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
        if data is None:
            return model
        return model - data

    def test_diffev_bounds_check(self):
        # You need finite (min, max) for each parameter if you're using
        # differential_evolution.
        self.fit_params['decay'].min = -np.inf
        self.fit_params['decay'].vary = True
        self.minimizer = 'differential_evolution'
        pytest.raises(ValueError, self.scalar_minimizer)

        # but only if a parameter is not fixed
        self.fit_params['decay'].vary = False
        self.mini.scalar_minimize(method='differential_evolution', maxiter=1)

    def test_scalar_minimizers(self):
        # test all the scalar minimizers
        for method in SCALAR_METHODS:
            if method in ['newton', 'dogleg', 'trust-ncg', 'cg', 'trust-exact',
                          'trust-krylov', 'trust-constr']:
                continue
            self.minimizer = SCALAR_METHODS[method]
            if method == 'Nelder-Mead':
                sig = 0.2
            else:
                sig = 0.15
            self.scalar_minimizer(sig=sig)

    def scalar_minimizer(self, sig=0.15):
        out = self.mini.scalar_minimize(method=self.minimizer)

        self.residual(out.params, self.x)

        for para, true_para in zip(out.params.values(), self.p_true.values()):
            check_wo_stderr(para, true_para.value, sig=sig)

    def test_nan_policy(self):
        # check that an error is raised if there are nan in
        # the data returned by userfcn
        self.data[0] = np.nan

        major, minor, _micro = scipy_version.split('.', 2)
        for method in SCALAR_METHODS:
            if (method == 'differential_evolution' and int(major) > 0 and
                    int(minor) >= 2):
                pytest.raises(RuntimeError, self.mini.scalar_minimize,
                              SCALAR_METHODS[method])
            else:
                pytest.raises(ValueError, self.mini.scalar_minimize,
                              SCALAR_METHODS[method])

        pytest.raises(ValueError, self.mini.minimize)

        # now check that the fit proceeds if nan_policy is 'omit'
        self.mini.nan_policy = 'omit'
        res = self.mini.minimize()
        assert_equal(res.ndata, np.size(self.data, 0) - 1)

        for para, true_para in zip(res.params.values(), self.p_true.values()):
            check_wo_stderr(para, true_para.value, sig=0.15)

    def test_nan_policy_function(self):
        a = np.array([0, 1, 2, 3, np.nan])
        pytest.raises(ValueError, _nan_policy, a)
        assert_(np.isnan(_nan_policy(a, nan_policy='propagate')[-1]))
        assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])

        a[-1] = np.inf
        pytest.raises(ValueError, _nan_policy, a)
        assert_(np.isposinf(_nan_policy(a, nan_policy='propagate')[-1]))
        assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
        assert_equal(_nan_policy(a, handle_inf=False), a)

    @dec.slow
    def test_emcee(self):
        # test emcee
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10)

        check_paras(out.params, self.p_true, sig=3)

    @dec.slow
    def test_emcee_method_kwarg(self):
        # test with emcee as method keyword argument
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        out = self.mini.minimize(method='emcee', nwalkers=100, steps=200,
                                 burn=50, thin=10)
        assert out.method == 'emcee'
        assert out.nfev == 100*200

        check_paras(out.params, self.p_true, sig=3)

    @dec.slow
    def test_emcee_PT(self):
        # test emcee with parallel tempering
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        self.mini.userfcn = residual_for_multiprocessing
        out = self.mini.emcee(ntemps=4, nwalkers=50, steps=200,
                              burn=100, thin=10, workers=2)

        check_paras(out.params, self.p_true, sig=3)

    @dec.slow
    def test_emcee_multiprocessing(self):
        # test multiprocessing runs
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        self.mini.userfcn = residual_for_multiprocessing
        self.mini.emcee(steps=10, workers=4)

    def test_emcee_bounds_length(self):
        # the log-probability functions check if the parameters are
        # inside the bounds. Check that the bounds and parameters
        # are the right lengths for comparison. This can be done
        # if nvarys != nparams
        if not HAS_EMCEE:
            return True
        self.mini.params['amp'].vary = False
        self.mini.params['period'].vary = False
        self.mini.params['shift'].vary = False

        self.mini.emcee(steps=10)

    @dec.slow
    def test_emcee_partial_bounds(self):
        # mcmc with partial bounds
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        # test mcmc output vs lm, some parameters not bounded
        self.fit_params['amp'].max = np.inf
        # self.fit_params['amp'].min = -np.inf
        out = self.mini.emcee(nwalkers=100, steps=300, burn=100, thin=10)

        check_paras(out.params, self.p_true, sig=3)

    def test_emcee_init_with_chain(self):
        # can you initialise with a previous chain
        if not HAS_EMCEE:
            return True

        out = self.mini.emcee(nwalkers=100, steps=5)
        # can initialise with a chain
        self.mini.emcee(nwalkers=100, steps=1, pos=out.chain)

        # can initialise with a correct subset of a chain
        self.mini.emcee(nwalkers=100, steps=1, pos=out.chain[..., -1, :])

        # but you can't initialise if the shape is wrong.
        pytest.raises(ValueError,
                      self.mini.emcee,
                      nwalkers=100,
                      steps=1,
                      pos=out.chain[..., -1, :-1])

    def test_emcee_reuse_sampler(self):
        if not HAS_EMCEE:
            return True

        self.mini.emcee(nwalkers=100, steps=5)

        # if you've run the sampler the Minimizer object should have a _lastpos
        # attribute
        assert_(hasattr(self.mini, '_lastpos'))

        # now try and re-use sampler
        out2 = self.mini.emcee(steps=10, reuse_sampler=True)
        assert_(out2.chain.shape[1] == 15)

        # you shouldn't be able to reuse the sampler if nvarys has changed.
        self.mini.params['amp'].vary = False
        pytest.raises(ValueError, self.mini.emcee, reuse_sampler=True)

    def test_emcee_lnpost(self):
        # check ln likelihood is calculated correctly. It should be
        # -0.5 * chi**2.
        result = self.mini.minimize()

        # obtain the numeric values
        # note - in this example all the parameters are varied
        fvars = np.array([par.value for par in result.params.values()])

        # calculate the cost function with scaled values (parameters all have
        # lower and upper bounds.
        scaled_fvars = []
        for par, fvar in zip(result.params.values(), fvars):
            par.value = fvar
            scaled_fvars.append(par.setup_bounds())

        val = self.mini.penalty(np.array(scaled_fvars))

        # calculate the log-likelihood value
        bounds = np.array([(par.min, par.max)
                           for par in result.params.values()])
        val2 = _lnpost(fvars,
                       self.residual,
                       result.params,
                       result.var_names,
                       bounds,
                       userargs=(self.x, self.data))

        assert_almost_equal(-0.5 * val, val2)

    def test_emcee_output(self):
        # test mcmc output
        if not HAS_EMCEE:
            return True
        try:
            from pandas import DataFrame
        except ImportError:
            return True
        out = self.mini.emcee(nwalkers=10, steps=20, burn=5, thin=2)
        assert_(isinstance(out, MinimizerResult))
        assert_(isinstance(out.flatchain, DataFrame))

        # check that we can access the chains via parameter name
        assert_(out.flatchain['amp'].shape[0] == 80)
        assert out.errorbars
        assert_(np.isfinite(out.params['amp'].correl['period']))

        # the lnprob array should be the same as the chain size
        assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))

        # test chain output shapes
        assert_(out.lnprob.shape == (10, (20-5+1)/2))
        assert_(out.chain.shape == (10, (20-5+1)/2, out.nvarys))
        assert_(out.flatchain.shape == (10*(20-5+1)/2, out.nvarys))

    def test_emcee_PT_output(self):
        # test mcmc output when using parallel tempering
        if not HAS_EMCEE:
            return True
        try:
            from pandas import DataFrame
        except ImportError:
            return True
        out = self.mini.emcee(ntemps=6, nwalkers=10, steps=20, burn=5, thin=2)
        assert_(isinstance(out, MinimizerResult))
        assert_(isinstance(out.flatchain, DataFrame))

        # check that we can access the chains via parameter name
        assert_(out.flatchain['amp'].shape[0] == 80)
        assert out.errorbars
        assert_(np.isfinite(out.params['amp'].correl['period']))

        # the lnprob array should be the same as the chain size
        assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))

        # test chain output shapes
        assert_(out.lnprob.shape == (6, 10, (20-5+1)/2))
        assert_(out.chain.shape == (6, 10, (20-5+1)/2, out.nvarys))
        # Only the 0th temperature is returned
        assert_(out.flatchain.shape == (10*(20-5+1)/2, out.nvarys))

    @dec.slow
    def test_emcee_float(self):
        # test that it works if the residuals returns a float, not a vector
        if not HAS_EMCEE:
            return True

        def resid(pars, x, data=None):
            return -0.5 * np.sum(self.residual(pars, x, data=data)**2)

        # just return chi2
        def resid2(pars, x, data=None):
            return np.sum(self.residual(pars, x, data=data)**2)

        self.mini.userfcn = resid
        np.random.seed(123456)
        out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10)
        check_paras(out.params, self.p_true, sig=3)

        self.mini.userfcn = resid2
        np.random.seed(123456)
        out = self.mini.emcee(nwalkers=100, steps=200,
                              burn=50, thin=10, float_behavior='chi2')
        check_paras(out.params, self.p_true, sig=3)

    @dec.slow
    def test_emcee_seed(self):
        # test emcee seeding can reproduce a sampling run
        if not HAS_EMCEE:
            return True

        out = self.mini.emcee(params=self.fit_params,
                              nwalkers=100,
                              steps=1, seed=1)
        out2 = self.mini.emcee(params=self.fit_params,
                               nwalkers=100,
                               steps=1, seed=1)

        assert_almost_equal(out.chain, out2.chain)
Exemplo n.º 42
0
class Fitter(PlotFit):
    u"""Wrapper for LMFIT, which is a high-level extension for
    scipy.optimize.leastsq. Performs Non-Linear Least Squares fitting using
    the Levenberg-Marquardt method.

    Parameters
    ----------
    residuals : func
        The residuals function, see description below.

    derivatives : func, optional
        Derivatives function, to compute the Jacobian of the residuals
        function with derivatives across the rows. If this is None, the
        Jacobian will be estimated.

    data : tuple, optional
        Default: None

    params0 : list, optional
        Default: None

    parinfo : list, optional
        Default: None

    ftol : float, optional
        Default: 1e-10

    xtol : float, optional
        Default: 1e-10

    epsfcn : float, optional
        Default: 2.2204460492503131e-16

    stepfactor : float, optional
        Default: 100.0

    covtol : float, optional
        Default: 1e-14

    maxiter : int, optional
        Default: 200

    maxfev : int, optional
        Default: 0

    nofinitecheck : bool, optional
        Default: False

    nan_policy : str, optional
        Default: 'omit'. Determines how NaN values are handled: 'raise',
        'propagate' or 'omit'.

    Notes
    -----
    Objects of this class are callable, returning the fitted parameters.

    **Residuals function**
    The residuals function must return an ndarray with weighted deviations
    between the model and the data. It takes two arguments, a list of the
    parameter values and a reference for the attribute :attr:`data`, a tuple
    e.g. ``(x, y, err)``. In a typical scientific problem the residuals should
    be weighted so that each deviate has a Gaussian sigma of 1.0.  If ``x``
    represents the independent variable, ``y`` represents an intensity for
    each value of ``x``, and ``err`` represents the error, then the deviates
    could be calculated as follows:

    .. math::

       d = (y - f(x)) / err

    where *f* is the model function. If *err* are 1-sigma uncertainties in
    ``y``, then

    .. math::

       \sum d^2

    is the total chi-squared.  :py:meth:`Fitter.fit` will minimize this value.
    ``x``, ``y`` and ``err`` are passed to the residuals function from
    :attr:`data`.

    Attributes
    ----------
    parinfo
    params0
    data
    ftol
    xtol
    gtol
    epsfcn
    stepfactor
    covtol
    maxiter
    maxfev
    params
    xerror
    covar
    chi2_min
    orignorm
    rchi2_min
    stderr
    npar
    nfree
    npegged
    dof
    resid
    niter
    nfev
    status
    message
    residuals
    nofinitecheck

    Methods
    -------
    fit
    plot
    build_param_table
    __call__
    """

    def __init__(self, residuals, derivatives=None, data=None, params0=None, parinfo=None, ftol=1e-10, xtol=1e-10,
                 gtol=1e-10, epsfcn=None, stepfactor=100.0, covtol=1e-14, maxiter=200, maxfev=None,
                 nofinitecheck=False, nan_policy='omit'):

        self._m = 0
        self.result = namedtuple('result', [])
        self.config = namedtuple('config', [])

        self.residuals = residual_wrapper(residuals)
        if derivatives is not None:
            self.deriv = residual_wrapper(derivatives)
        else:
            self.deriv = None

        self.data = data
        self.params0 = params0
        self.parinfo = parinfo
        self.ftol = ftol
        self.xtol = xtol
        self.gtol = gtol
        self.epsfcn = epsfcn
        self.stepfactor = stepfactor
        self.covtol = covtol
        self.maxiter = maxiter
        self.maxfev = maxfev
        self.nofinitecheck = nofinitecheck
        self.nan_policy = nan_policy

    def __call__(self, params0=None):
        if hasattr(self, 'params'):
            return self.params
        elif self.params0 is not None and params0 is None:
            self.fit(self.params0)
            return self.params
        elif params0 is not None:
            self.fit(params0)
            return self.params
        else:
            raise ValueError('params0 is undefined, no fit can be performed')

    @property
    def parinfo(self):
        r"""A list of dicts with parameter constraints, one dict per
        parameter, or None if not given.

        Each dict can have zero or more items with the following keys and
        values:

         ``'fixed'``: bool
            Parameter to be fixed. Default: False.

         ``'limits'``: list
            Two-element list with upper end lower parameter limits or None,
            which indicates that the parameter is not bounded on this side.
            Default: None.
        """
        return self._parinfo

    @parinfo.setter
    def parinfo(self, value):
        if isinstance(value, (list, tuple)):
            if np.all([isinstance(item, (type(None), dict)) for item in value]):
                self._parinfo = value
            else:
                raise ValueError
        elif value is None:
            self._parinfo = None
        else:
            raise ValueError

    @property
    def params(self):
        r"""The fitted parameters. This attribute has the same type as
        :attr:`params0`.
        """
        return self.result.params

    @property
    def params0(self):
        r"""Required attribute. A NumPy array, a tuple or a list with the
        initial parameters values.
        """
        return self._params0

    @params0.setter
    def params0(self, value):
        if isinstance(value, (list, tuple, np.ndarray)):
            self._params0 = value
        elif value is None:
            self._params0 = None
        else:
            raise ValueError

    @property
    def data(self):
        r"""Required attribute. Python object with information for the
        residuals function and the derivatives function. See above.
        """
        return self._data

    @data.setter
    def data(self, value):
        if isinstance(value, tuple):
            self._data = value
        else:
            ValueError

    @property
    def deriv(self):
        return self._deriv

    @deriv.setter
    def deriv(self, value):
        self._deriv = value

    @property
    def ftol(self):
        r"""Relative :math:`\chi^2` convergence criterium. Default: 1e-10
        """
        return self.config.ftol

    @ftol.setter
    def ftol(self, value):
        if isinstance(value, numbers.Number):
            self.config.ftol = value
        else:
            raise ValueError

    @property
    def xtol(self):
        r"""Relative parameter convergence criterium. Default: 1e-10
        """
        return self.config.xtol

    @xtol.setter
    def xtol(self, value):
        if isinstance(value, numbers.Number):
            self.config.xtol = value
        else:
            raise ValueError

    @property
    def gtol(self):
        r"""Orthogonality convergence criterium. Default: 1e-10
        """
        return self.config.gtol

    @gtol.setter
    def gtol(self, value):
        if isinstance(value, numbers.Number):
            self.config.gtol = value
        else:
            raise ValueError

    @property
    def epsfcn(self):
        r"""Finite derivative step size. Default: 2.2204460492503131e-16
        """
        return self.config.epsfcn

    @epsfcn.setter
    def epsfcn(self, value):
        if value is None:
            value = np.finfo(np.float64).eps
        if isinstance(value, numbers.Number):
            self.config.epsfcn = value
        else:
            raise ValueError

    @property
    def stepfactor(self):
        r"""Initial step bound. Default: 100.0
        """
        return self.config.stepfactor

    @stepfactor.setter
    def stepfactor(self, value):
        if isinstance(value, numbers.Number):
            self.config.stepfactor = value
        else:
            raise ValueError

    @property
    def covtol(self):
        r"""(DEPRECIATED) Range tolerance for covariance calculation.
        Default: 1e-14
        """
        warnings.warn('covtol is Depreciated and has no effect', DeprecationWarning)
        return self.config.covtol

    @covtol.setter
    def covtol(self, value):
        if isinstance(value, numbers.Number):
            self.config.covtol = value
        else:
            raise ValueError

    @property
    def maxiter(self):
        r"""(DEPRECIATED) Maximum number of iterations. Default: 200
        """
        warnings.warn('maxiter is Depreciated and has no effect', DeprecationWarning)
        return self.config.maxiter

    @maxiter.setter
    def maxiter(self, value):
        if isinstance(value, int):
            self.config.maxiter = value
        else:
            raise ValueError

    @property
    def maxfev(self):
        r"""Maximum number of function evaluations. Default: 0
        """
        return self.config.maxfev

    @maxfev.setter
    def maxfev(self, value):
        if isinstance(value, int):
            self.config.maxfev = value
        elif value is None:
            self.config.maxfev = 0
        else:
            raise ValueError

    @property
    def nofinitecheck(self):
        r"""(DEPRECIATED) Does not check for finite values. Default: False
        """
        warnings.warn('nofinitecheck is Depreciated and has no effect', DeprecationWarning)
        return self._nofinitecheck

    @nofinitecheck.setter
    def nofinitecheck(self, value):
        if isinstance(value, bool):
            self.config.nofinitecheck = value
        else:
            raise ValueError

    @property
    def nan_policy(self):
        r"""Determines how NaN's are handled by minimizer. Default: 'omit'
        """
        return self._nan_policy

    @nan_policy.setter
    def nan_policy(self, value):
        if isinstance(value, str):
            self._nan_policy = value
        else:
            raise ValueError

    @property
    def npar(self):
        r"""Number of parameters
        """
        try:
            return len(self.params0)
        except TypeError:
            return None

    @property
    def message(self):
        """Success/error message
        """
        return self.result.message

    @property
    def chi2_min(self):
        """Final :math:`\chi^2`
        """
        return self.result.bestnorm

    @property
    def orignorm(self):
        """Initial :math:`\chi^2`.
        """
        return self.result.orignorm

    @property
    def niter(self):
        """Number of iterations
        """
        return self.result.niter

    @property
    def nfev(self):
        """Number of function evaluations
        """
        return self.result.nfev

    @property
    def status(self):
        """Status code of fit passed from scipy.optimize.leastsq
        """
        return self.result.status

    @property
    def nfree(self):
        """Number of free parameters
        """
        return self.result.nfree

    @property
    def npegged(self):
        """Number of fixed parameters
        """
        return self.result.npegged

    @property
    def covar(self):
        """Parameter covariance matrix
        """
        return self.result.covar

    @property
    def resid(self):
        """Residuals
        """
        return self.result.resid

    @property
    def xerror(self):
        """Parameter uncertainties (:math:`1 \sigma`)
        """
        return self.result.xerror

    @property
    def dof(self):
        """Degrees of freedom
        """
        return self._m - self.nfree

    @property
    def rchi2_min(self):
        """Minimum reduced :math:`\chi^2`.
        """
        return self.result.redchi

    @property
    def stderr(self):
        """Standard errors estimated from
        :math:`\sqrt{diag(covar) * \chi^{2}_{reduced}`
        """
        return np.sqrt(np.diagonal(self.covar) * self.rchi2_min)



    def fit(self, params0):
        r"""Perform a fit with the provided parameters.

        Parameters
        ----------
        params0 : list
            Initial fitting parameters

        """
        self.params0 = params0
        p = Parameters()

        if self.parinfo is None:
            self.parinfo = [None] * len(self.params0)
        else:
            assert (len(self.params0) == len(self.parinfo))

        for i, (p0, parin) in enumerate(zip(self.params0, self.parinfo)):
            p.add(name='p{0}'.format(i), value=p0)

            if parin is not None:
                if 'limits' in parin:
                    p['p{0}'.format(i)].set(min=parin['limits'][0])
                    p['p{0}'.format(i)].set(max=parin['limits'][1])
                if 'fixed' in parin:
                    p['p{0}'.format(i)].set(vary=not parin['fixed'])

        if np.all([not value.vary for value in p.values()]):
            raise Exception('All parameters are fixed!')

        self.lmfit_minimizer = Minimizer(self.residuals, p, nan_policy=self.nan_policy, fcn_args=(self.data,))

        self.result.orignorm = np.sum(self.residuals(params0, self.data) ** 2)

        result = self.lmfit_minimizer.minimize(Dfun=self.deriv, method='leastsq', ftol=self.ftol,
                                               xtol=self.xtol, gtol=self.gtol, maxfev=self.maxfev, epsfcn=self.epsfcn,
                                               factor=self.stepfactor)

        self.result.bestnorm = result.chisqr
        self.result.redchi = result.redchi
        self._m = result.ndata
        self.result.nfree = result.nfree
        self.result.resid = result.residual
        self.result.status = result.ier
        self.result.covar = result.covar
        self.result.xerror = [result.params['p{0}'.format(i)].stderr for i in range(len(result.params))]

        self.result.params = [result.params['p{0}'.format(i)].value for i in range(len(result.params))]

        self.result.message = result.message

        self.lmfit_result = result

        if not result.errorbars or not result.success:
            warnings.warn(self.result.message)

        return result.success
Exemplo n.º 43
0
    v = params.valuesdict()

    model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
    return model - data


# create a set of Parameters
params = Parameters()
params.add('amp', value=10, min=0)
params.add('decay', value=0.1)
params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2)
params.add('omega', value=3.0)

# do fit, here with leastsq model
minner = Minimizer(fcn2min, params, fcn_args=(x, data))
result = minner.minimize()

# calculate final result
final = data + result.residual

# write error report
report_fit(result)

# try to plot results
try:
    import matplotlib.pyplot as plt
    plt.plot(x, data, 'k+')
    plt.plot(x, final, 'r')
    plt.show()
except ImportError:
    pass
    return model - data


if __name__ == '__main__':
    p = Parameters()
    p.add('a', value=0.01)
    p.add('b', value=0.01)
    p.add('c', value=0.1)
    p.add('d', value=0.01, min=-0.9999, max=0.9999)
    p.add('e', value=0.01)
    # p.add('f', value=0.010)
    x = np.linspace(0, 200, 20)
    data = np.sin(x)
    # do fit, here with leastsq model
    minner = Minimizer(residual, p, fcn_args=(x, data))
    result = minner.minimize(method='least_squares')

    # calculate final result
    final = data + result.residual

    # write error report
    report_fit(result)

    # try to plot results
    try:
        import pylab

        pylab.plot(x, data, 'k+')
        pylab.plot(x, final, 'r')
        pylab.show()
    except:
Exemplo n.º 45
0
class CommonMinimizerTest(unittest.TestCase):

    def setUp(self):
        """
        test scale minimizers except newton-cg (needs jacobian) and
        anneal (doesn't work out of the box).
        """
        p_true = Parameters()
        p_true.add('amp', value=14.0)
        p_true.add('period', value=5.33)
        p_true.add('shift', value=0.123)
        p_true.add('decay', value=0.010)
        self.p_true = p_true

        n = 2500
        xmin = 0.
        xmax = 250.0
        noise = np.random.normal(scale=0.7215, size=n)
        self.x = np.linspace(xmin, xmax, n)
        self.data = self.residual(p_true, self.x) + noise

        fit_params = Parameters()
        fit_params.add('amp', value=11.0, min=5, max=20)
        fit_params.add('period', value=5., min=1., max=7)
        fit_params.add('shift', value=.10,  min=0.0, max=0.2)
        fit_params.add('decay', value=6.e-3, min=0, max=0.1)
        self.fit_params = fit_params

        self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])

    def residual(self, pars, x, data=None):
        amp = pars['amp'].value
        per = pars['period'].value
        shift = pars['shift'].value
        decay = pars['decay'].value

        if abs(shift) > pi/2:
            shift = shift - np.sign(shift) * pi
        model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
        if data is None:
            return model
        return model - data
        
    def test_diffev_bounds_check(self):
        # You need finite (min, max) for each parameter if you're using
        # differential_evolution.
        self.fit_params['decay'].min = None
        self.minimizer = 'differential_evolution'
        np.testing.assert_raises(ValueError, self.scalar_minimizer)

    def test_scalar_minimizers(self):
        # test all the scalar minimizers
        for method in SCALAR_METHODS:
            if method in ['newton', 'dogleg', 'trust-ncg']:
                continue
            self.minimizer = SCALAR_METHODS[method]
            if method == 'Nelder-Mead':
                sig = 0.2
            else:
                sig = 0.15
            self.scalar_minimizer(sig=sig)
        
    def scalar_minimizer(self, sig=0.15):
        try:
            from scipy.optimize import minimize as scipy_minimize
        except ImportError:
            raise SkipTest

        print(self.minimizer)
        out = self.mini.scalar_minimize(method=self.minimizer)

        self.residual(out.params, self.x)

        for name, par in out.params.items():
            nout = "%s:%s" % (name, ' '*(20-len(name)))
            print("%s: %s (%s) " % (nout, par.value, self.p_true[name].value))

        for para, true_para in zip(out.params.values(),
                                   self.p_true.values()):
            check_wo_stderr(para, true_para.value, sig=sig)

    @decorators.slow
    def test_emcee(self):
        # test emcee
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        out = self.mini.emcee(nwalkers=100, steps=200,
                                      burn=50, thin=10)

        check_paras(out.params, self.p_true, sig=3)

    @decorators.slow
    def test_emcee_PT(self):
        # test emcee with parallel tempering
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        self.mini.userfcn = residual_for_multiprocessing
        out = self.mini.emcee(ntemps=4, nwalkers=50, steps=200,
                              burn=100, thin=10, workers=2)

        check_paras(out.params, self.p_true, sig=3)

    @decorators.slow
    def test_emcee_multiprocessing(self):
        # test multiprocessing runs
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        self.mini.userfcn = residual_for_multiprocessing
        out = self.mini.emcee(steps=10, workers=4)

    def test_emcee_bounds_length(self):
        # the log-probability functions check if the parameters are
        # inside the bounds. Check that the bounds and parameters
        # are the right lengths for comparison. This can be done
        # if nvarys != nparams
        if not HAS_EMCEE:
            return True
        self.mini.params['amp'].vary=False
        self.mini.params['period'].vary=False
        self.mini.params['shift'].vary=False

        out = self.mini.emcee(steps=10)

    @decorators.slow
    def test_emcee_partial_bounds(self):
        # mcmc with partial bounds
        if not HAS_EMCEE:
            return True

        np.random.seed(123456)
        # test mcmc output vs lm, some parameters not bounded
        self.fit_params['amp'].max = None
        # self.fit_params['amp'].min = None
        out = self.mini.emcee(nwalkers=100, steps=300,
                                      burn=100, thin=10)

        check_paras(out.params, self.p_true, sig=3)

    def test_emcee_init_with_chain(self):
        # can you initialise with a previous chain
        if not HAS_EMCEE:
            return True

        out = self.mini.emcee(nwalkers=100, steps=5)
        # can initialise with a chain
        out2 = self.mini.emcee(nwalkers=100, steps=1, pos=out.chain)

        # can initialise with a correct subset of a chain
        out3 = self.mini.emcee(nwalkers=100,
                               steps=1,
                               pos=out.chain[..., -1, :])

        # but you can't initialise if the shape is wrong.
        assert_raises(ValueError,
                      self.mini.emcee,
                      nwalkers=100,
                      steps=1,
                      pos=out.chain[..., -1, :-1])

    def test_emcee_reuse_sampler(self):
        if not HAS_EMCEE:
            return True

        self.mini.emcee(nwalkers=100, steps=5)

        # if you've run the sampler the Minimizer object should have a _lastpos
        # attribute
        assert_(hasattr(self.mini, '_lastpos'))

        # now try and re-use sampler
        out2 = self.mini.emcee(steps=10, reuse_sampler=True)
        assert_(out2.chain.shape[1] == 15)

        # you shouldn't be able to reuse the sampler if nvarys has changed.
        self.mini.params['amp'].vary = False
        assert_raises(ValueError, self.mini.emcee, reuse_sampler=True)

    def test_emcee_lnpost(self):
        # check ln likelihood is calculated correctly. It should be
        # -0.5 * chi**2.
        result = self.mini.minimize()

        # obtain the numeric values
        # note - in this example all the parameters are varied
        fvars = np.array([par.value for par in result.params.values()])

        # calculate the cost function with scaled values (parameters all have
        # lower and upper bounds.
        scaled_fvars = []
        for par, fvar in zip(result.params.values(), fvars):
            par.value = fvar
            scaled_fvars.append(par.setup_bounds())

        val = self.mini.penalty(np.array(scaled_fvars))

        # calculate the log-likelihood value
        bounds = np.array([(par.min, par.max)
                           for par in result.params.values()])
        val2 = _lnpost(fvars,
                       self.residual,
                       result.params,
                       result.var_names,
                       bounds,
                       userargs=(self.x, self.data))

        assert_almost_equal(-0.5 * val, val2)

    def test_emcee_output(self):
        # test mcmc output
        if not HAS_EMCEE:
            return True
        try:
            from pandas import DataFrame
        except ImportError:
            return True
        out = self.mini.emcee(nwalkers=10, steps=20, burn=5, thin=2)
        assert_(isinstance(out, MinimizerResult))
        assert_(isinstance(out.flatchain, DataFrame))

        # check that we can access the chains via parameter name
        assert_(out.flatchain['amp'].shape[0] == 80)
        assert_(out.errorbars is True)
        assert_(np.isfinite(out.params['amp'].correl['period']))

        # the lnprob array should be the same as the chain size
        assert_(np.size(out.chain)//4 == np.size(out.lnprob))

    @decorators.slow
    def test_emcee_float(self):
        # test that it works if the residuals returns a float, not a vector
        if not HAS_EMCEE:
            return True

        def resid(pars, x, data=None):
            return -0.5 * np.sum(self.residual(pars, x, data=data)**2)

        # just return chi2
        def resid2(pars, x, data=None):
            return np.sum(self.residual(pars, x, data=data)**2)

        self.mini.userfcn = resid
        np.random.seed(123456)
        out = self.mini.emcee(nwalkers=100, steps=200,
                                      burn=50, thin=10)
        check_paras(out.params, self.p_true, sig=3)

        self.mini.userfcn = resid2
        np.random.seed(123456)
        out = self.mini.emcee(nwalkers=100, steps=200,
                              burn=50, thin=10, float_behavior='chi2')
        check_paras(out.params, self.p_true, sig=3)
Exemplo n.º 46
0
            (par['y']-par['i'])**2) / par['scale']))

def f3(p):
    par = p.valuesdict()
    return (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 +
            (par['y']-par['l'])**2) / par['scale']))

# define objective function: returns the array to be minimized
def f(params):
    return f1(params) + f2(params) + f3(params)


# 1. show the effect of 'Ns': finite bounds for varying parameters ('x' and 'y'),
# and brute_step = 0.25
fitter = Minimizer(f, params)
result = fitter.minimize(method='brute', Ns=10, keep=5)
grid_x = np.unique([par.ravel() for par in result.brute_grid][0])
grid_y = np.unique([par.ravel() for par in result.brute_grid][1])

print("==========================================="
      "\nExample 1, taken from scipy.optimize.brute:\n"
      "===========================================\n\n"
      "Varying parameters with finite bounds and brute_step = 0.25:")
print("   {}\n   {}".format(params['x'], params['y']))
print("\nUsing the brute method with"
      "\n\tresult = fitter.minimize(method='brute', keep=5) "
      "\nwill generate a 2-dimensional grid, where 'x' and 'y' vary between "
      "\n'min' and 'max' (exclusive) spaced by 'brute_step': "
      "\n\nx: {}\ny: {}".format(grid_x, grid_y))
print("\nThe objective function is evaluated on this grid, and the raw output "
      "\nfrom scipy.optimize.brute is stored in brute_<parname> attributes.")
Exemplo n.º 47
0
#params.add('M', value= 0.53, min=.5,max=1.5)
#params.add('I', value= 0.002, min=.00001)
#params.add('a', value= 5.6, min=3,max=20)
#params.add('b', value= 2.10, min=.0001,max=7)
#params.add('c', value= 1.505, min=.0001,max=7)

params.add('l', value= 0.45, min=.2,max=2)
#params.add('m', value= .35, min= .2,max=1)
#params.add('M', value= 0.8, min=.5,max=1.5)
params.add('I', value= 0.0007, min=.00001)
params.add('a', value= 8.75, min=3,max=20)
params.add('b', value= 3.17, min=.0001,max=7)
params.add('c', value= 2.37, min=.0001,max=7)

minner = Minimizer(predict, params, fcn_args=(time, np.concatenate((x_dot,theta,theta_dot))))
result = minner.minimize(method='powell')
report_fit(result)

fig = plt.figure()
ax1 = fig.add_subplot(111)

ax1.set_title("Pendulum parameters fitting")
ax1.set_xlabel('Time, sec')


ax1.plot(time, x,      color='blue', label='cart position, m')
ax1.plot(time, x_dot,  color='cyan', label='cart speed, m/s')

ax1.plot(time, theta,     color='red', label='pendulum angle, rad')
ax1.plot(time, theta_dot, color='magenta', label='pendulum speed, rad/s')