예제 #1
0
def test_confidence_warnings(data, pars):
    """Make sure the proper warnings are emitted when needed."""
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
    out = minimizer.minimize(method='leastsq')

    with pytest.warns(UserWarning) as record:
        lmfit.conf_interval(minimizer, out, maxiter=1)
        assert 'maxiter=1 reached and prob' in str(record[0].message)
예제 #2
0
def test_confidence_sigma_vs_prob(data, pars):
    """Calculate confidence by specifying sigma or probability."""
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
    out = minimizer.leastsq()

    ci_sigmas = lmfit.conf_interval(minimizer, out, sigmas=[1, 2, 3])
    ci_1sigma = lmfit.conf_interval(minimizer, out, sigmas=[1])
    ci_probs = lmfit.conf_interval(minimizer, out, sigmas=[0.68269, 0.9545,
                                                           0.9973])

    assert_allclose(ci_sigmas['a'][0][1], ci_probs['a'][0][1], rtol=0.01)
    assert_allclose(ci_sigmas['b'][2][1], ci_probs['b'][2][1], rtol=0.01)
    assert len(ci_1sigma['a']) == 3
    assert len(ci_probs['a']) == 7
예제 #3
0
파일: dynspec.py 프로젝트: gitj/dynISM
 def __init__(self,dt,df,acf,maxf=None,tdif0=None,fdif0=None):
     self.tdif0=tdif0
     self.fdif0=fdif0
     nt,nf = acf.shape
     self.facf = acf[nt/2+1,nf/2+1:]
     self.fr = df*np.arange(1,self.facf.shape[0]+1)
     if maxf:
         maxidx = int(maxf/df)-1
         self.fr = self.fr[:maxidx]
         self.facf = self.facf[:maxidx]
     self.tacf = acf[nt/2+1:,nf/2]
     self.t = dt*np.arange(1,self.tacf.shape[0]+1)
     if maxf is None:
         mi = ismfit.simFitAcf(self.t, self.tacf, self.fr, self.facf,tdif0=tdif0,fdif0=fdif0)
     else:
         mi = ismfit.fitIf3(self.fr, self.facf, fdif0=fdif0)
     try:
         self.ci,self.trace = lmfit.conf_interval(mi,trace=True)
     except:
         self.ci = None
         self.trace = None
     self.params = dict([(x,makePickleableParam(mi.params[x])) for x in mi.params.keys()])
     self.fitfacf = ismfit.gammaIf3(self.params,self.fr)
     self.offset = self.params['offs'].value
     self.scale = self.params['scale'].value
     if maxf is None:
         self.fittacf = ismfit.gammaIs3(self.params,self.t)
         self.fittacfn = self.normalize(self.fittacf)
     self.fitfacfn = self.normalize(self.fitfacf)
     self.facfn = self.normalize(self.facf)
     self.tacfn = self.normalize(self.tacf)
예제 #4
0
def test_confidence1():
    x = np.linspace(0.3,10,100)
    np.random.seed(0)

    y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)

    pars = lmfit.Parameters()
    pars.add_many(('a', 0.1), ('b', 1))

    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(x, y) )
    out = minimizer.leastsq()
    # lmfit.report_fit(out)

    assert(out.nfev >   5)
    assert(out.nfev < 500)
    assert(out.chisqr < 3.0)
    assert(out.nvarys == 2)

    assert_paramval(out.params['a'],  0.1, tol=0.1)
    assert_paramval(out.params['b'], -2.0, tol=0.1)

    ci = lmfit.conf_interval(minimizer, out)
    assert_allclose(ci['b'][0][0],  0.997,  rtol=0.01)
    assert_allclose(ci['b'][0][1], -2.022,  rtol=0.01)
    assert_allclose(ci['b'][2][0],  0.674,  rtol=0.01)
    assert_allclose(ci['b'][2][1], -1.997,  rtol=0.01)
    assert_allclose(ci['b'][5][0],  0.95,   rtol=0.01)
    assert_allclose(ci['b'][5][1], -1.96,   rtol=0.01)
예제 #5
0
def test_confidence2():
    x = np.linspace(0.3,10,100)
    np.random.seed(0)

    y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)

    pars = lmfit.Parameters()
    pars.add_many(('a', 0.1), ('b', 1), ('c', 1.0))
    pars['a'].max = 0.25
    pars['a'].min = 0.00
    pars['a'].value = 0.2
    pars['c'].vary = False

    minimizer = lmfit.Minimizer(residual2, pars, fcn_args=(x, y) )
    out = minimizer.minimize(method='nelder')
    out = minimizer.minimize(method='leastsq', params=out.params)
    # lmfit.report_fit(out)

    assert(out.nfev >   5)
    assert(out.nfev < 500)
    assert(out.chisqr < 3.0)
    assert(out.nvarys == 2)

    assert_paramval(out.params['a'],  0.1, tol=0.1)
    assert_paramval(out.params['b'], -2.0, tol=0.1)

    ci = lmfit.conf_interval(minimizer, out)
    assert_allclose(ci['b'][0][0],  0.997,  rtol=0.01)
    assert_allclose(ci['b'][0][1], -2.022,  rtol=0.01)
    assert_allclose(ci['b'][2][0],  0.674,  rtol=0.01)
    assert_allclose(ci['b'][2][1], -1.997,  rtol=0.01)
    assert_allclose(ci['b'][5][0],  0.95,   rtol=0.01)
    assert_allclose(ci['b'][5][1], -1.96,   rtol=0.01)

    lmfit.printfuncs.report_ci(ci)
예제 #6
0
 def calc_ci(self, sigmas=[0.68, 0.90]):
     # `conf_interval' requires the fitted results have valid `stderr',
     # so we need to re-fit the model with method `leastsq'.
     fitted = self.fitter.minimize(method="leastsq",
             params=self.fitted.params)
     self.ci, self.trace = lmfit.conf_interval(self.fitter, fitted,
             sigmas=sigmas, trace=True)
예제 #7
0
def confidence_intervals(fit_result, sigmas=(1, 2, 3), _larch=None,  **kws):
    """calculate the confidence intervals from a fit
    for supplied sigma values

    wrapper around lmfit.conf_interval
    """
    fitter = getattr(fit_result, 'fitter', None)
    result = getattr(fit_result, 'fit_details', None)
    return conf_interval(fitter, result, sigmas=sigmas, **kws)
예제 #8
0
def main():
    f_tf = open('Ihfit4_taufast_a.p', 'r')
    tf = f_tf.load()
    p = lmfit.Parameters()
    p.add_many(('dc', 0), ('a', 1), ('vh1', -40), ('t1', 100.), ('b', 1), ('vh2', -100), ('t2', 100.))
    mi = lmfit.minimize(taucurve, p)
    mi.leastsq()
    lm.printfuncs.report_fit(mi.params)
    ci = lmfit.conf_interval(mi)
    lmfit.printfuncs.report_ci(ci)
예제 #9
0
def test_ci():
    np.random.seed(1)
    p_true = Parameters()
    p_true.add('amp', value=14.0)
    p_true.add('period', value=5.33)
    p_true.add('shift', value=0.123)
    p_true.add('decay', value=0.010)

    def residual(pars, x, data=None):
        amp = pars['amp']
        per = pars['period']
        shift = pars['shift']
        decay = pars['decay']

        if abs(shift) > pi / 2:
            shift = shift - np.sign(shift) * pi
        model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
        if data is None:
            return model
        return model - data


    n = 2500
    xmin = 0.
    xmax = 250.0
    noise = np.random.normal(scale=0.7215, size=n)
    x = np.linspace(xmin, xmax, n)
    data = residual(p_true, x) + noise

    fit_params = Parameters()
    fit_params.add('amp', value=13.0)
    fit_params.add('period', value=4)
    fit_params.add('shift', value=0.1)
    fit_params.add('decay', value=0.02)

    out = minimize(residual, fit_params, args=(x,), kws={'data': data})

    fit = residual(fit_params, x)

    print( ' N fev = ', out.nfev)
    print( out.chisqr, out.redchi, out.nfree)

    report_errors(fit_params)
    ci, tr = conf_interval(out, sigmas=[0.674], trace=True)
    report_ci(ci)
    for p in out.params:
        diff1 = ci[p][1][1] - ci[p][0][1]
        diff2 = ci[p][2][1] - ci[p][1][1]
        stderr = out.params[p].stderr
        assert(abs(diff1 - stderr) / stderr < 0.05)
        assert(abs(diff2 - stderr) / stderr < 0.05)
예제 #10
0
def test_confidence_with_trace(data, pars):
    """Test calculation of confidence intervals with trace."""
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
    out = minimizer.leastsq()

    ci, tr = lmfit.conf_interval(minimizer, out, sigmas=[0.6827], trace=True)
    for p in out.params:
        diff1 = ci[p][1][1] - ci[p][0][1]
        diff2 = ci[p][2][1] - ci[p][1][1]
        stderr = out.params[p].stderr
        assert abs(diff1 - stderr) / stderr < 0.05
        assert abs(diff2 - stderr) / stderr < 0.05

        assert p in tr.keys()
        assert 'prob' in tr[p].keys()
예제 #11
0
    def time_confinterval(self):
        np.random.seed(0)
        x = np.linspace(0.3,10,100)
        y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)

        p = Parameters()
        p.add_many(('a', 0.1), ('b', 1))

        def residual(p):
            a = p['a'].value
            b = p['b'].value

            return 1/(a*x)+b-y

        minimizer = Minimizer(residual, p)
        out = minimizer.leastsq()
        return conf_interval(minimizer, out)
예제 #12
0
def fitter(model,
           params,
           args,
           mcmc=False,
           pos=None,
           nwalkers=100,
           steps=1000,
           burn=0.2,
           progress=True,
           get_ci=False,
           nan_policy='raise',
           max_nfev=None,
           thin=10,
           is_weighted=True):

    # Do fit
    maxfev = [0 if max_nfev is None else max_nfev]
    maxfev = int(maxfev[0])
    func = Minimizer(model,
                     params,
                     fcn_args=args,
                     nan_policy=nan_policy,
                     maxfev=maxfev)
    results = func.minimize()
    if mcmc:
        func = Minimizer(model, results.params, fcn_args=args)
        mcmc_results = func.emcee(nwalkers=nwalkers,
                                  steps=steps,
                                  burn=int(burn * steps),
                                  pos=pos,
                                  is_weighted=is_weighted,
                                  progress=progress,
                                  thin=thin)
        results = mcmc_results

    if get_ci:
        if results.errorbars:
            ci = conf_interval(func, results)
        else:
            ci = ''
        return results, ci
    else:
        return results
예제 #13
0
def graph_profile(profile, fit, sky_error):
	ci = lm.conf_interval(fit)
	profile.confs.update(ci)
	fig, main, res = prepare_single_figure()
	plot_fit(profile, main, fit, sky_error)
	plot_residuals(profile, res, fit, False)
	# plot_residuals(profile, res, fit, True, 'g.')
	plot_ci(profile, fit, ci, 2, main)
	plot_ci_residual(profile, fit, ci, 2, res)
	main.yaxis.set_major_locator(MaxNLocator(prune='upper'))
	main.set_ylabel('$\mu$ [mag arcsec$^{-1}]$')
	res.set_xlabel('$R$ [arcsec]')
	res.set_ylabel('$\Delta\mu$ [mag arcsec$^{-1}$]')
	title = ''
	if profile.gal_name: title += profile.gal_name
	if profile.cam_name: title += profile.cam_name
	if profile.name: title += profile.name
	main.set_title(title)
	return fig
예제 #14
0
def fit_quadlimb(time, flux, flux_err, stretch=0.5):
    """
    Perform a fit of time/flux data to the quadratic limb darkening model
    """
    params = Parameters()
    params.add('r_p', value=0.12, min=0)
    params.add('r_s', value=1.4, min=0)
    params.add('stretch', value=stretch, min=0, max=1)
    params.add('shift', value=0, min=-0.9, max=1.1)

    mf = ModelFit(len(time), flux, flux_err)
    result = minimize(mf.residual, params)

    f = mf.model(params)
    ci = conf_interval(result, sigmas=[0.95])
    rp_err = ci['r_p'][1][1] - ci['r_p'][0][1]

    print(params['r_p'], rp_err)
    return f, params['r_p'].value, abs(rp_err)
예제 #15
0
def test_ci_report():
    """test confidence interval report"""
    def residual(pars, x, data=None):
        argu = (x * pars['decay'])**2
        shift = pars['shift']
        if abs(shift) > np.pi / 2:
            shift = shift - np.sign(shift) * np.pi
        model = pars['amp'] * np.sin(shift +
                                     x / pars['period']) * np.exp(-argu)
        if data is None:
            return model
        return model - data

    p_true = Parameters()
    p_true.add('amp', value=14.0)
    p_true.add('period', value=5.33)
    p_true.add('shift', value=0.123)
    p_true.add('decay', value=0.010)

    n = 2500
    xmin = 0.
    xmax = 250.0
    x = np.linspace(xmin, xmax, n)
    data = residual(p_true, x) + np.random.normal(scale=0.7215, size=n)

    fit_params = Parameters()
    fit_params.add('amp', value=13.0)
    fit_params.add('period', value=2)
    fit_params.add('shift', value=0.0)
    fit_params.add('decay', value=0.02)

    mini = Minimizer(residual,
                     fit_params,
                     fcn_args=(x, ),
                     fcn_kws={'data': data})
    out = mini.leastsq()
    report = fit_report(out)
    assert (len(report) > 500)

    ci, tr = conf_interval(mini, out, trace=True)
    report = ci_report(ci)
    assert (len(report) > 250)
예제 #16
0
    def get_errs(self, res, appr, method='manual', ci_vals=(0.683,), **kwargs):
        """Find dictionary of confidence intervals

        Input:
            res = lmfit.Minimizer() from fit
            appr = 'full' or 'simp'
            method = 'manual' (default), 'lmfit', or 'stderr'

            **kwargs are passed to relevant functions
                method='lmfit': maxiter=200, prob_func=None
                                (see lmfit.conf_interval docstring)
                method='manual': adapt=True, anneal=True, eps=None;
                                 remaining **kwargs sent to brentq
                                 (anneal keyword only if appr='full')
        Output:
            confidence interval dictionary
        """
        self._vprint('Finding {} fit errors; method={}'.format(appr, method))

        if method == 'lmfit':
            return lmfit.conf_interval(res, sigmas=ci_vals,
                                       verbose=self._verbose, **kwargs)
        if method == 'stderr':
            def f(conf_intv, res, pstr):
                if conf_intv != 0.683:
                    print ('Warning: requested CI={}. But, stderr only '
                           'gives 68.3% errors').format(conf_intv)
                v, s = res.params[pstr].value, res.params[pstr].stderr
                return v - s, v + s
        elif method == 'manual':
            f = lambda *args: self.get_bounds(*args, appr=appr,
                                verbose=self._verbose, **kwargs)
        else:
            raise Exception('ERROR: method must be one of lmfit/stderr/manual')

        ci = build_ci_dict(res, f, ci_vals=ci_vals)
        if self._verbose:
            self._vprint('Finished finding fit errors:')
            print lmfit.printfuncs.ci_report(ci)
            self._vprint('')
        return ci
예제 #17
0
    def confidence_intervals(self, p_names=None, sigmas=(1, 2, 3)):
        """Prints a confidence intervals.

        Parameters
        ----------
        p_names : {list, None}, optional
            Names of the parameters for which the confidence intervals are calculated. If None (default),
            the confidence intervals are calculated for every parameter.
        sigmas : {list, tuple}, optional
            The sigma-levels to find (default is [1, 2, 3]). See Note below.

        Note
        ----
        The values for sigma are taken as the number of standard deviations for a normal distribution
        and converted to probabilities. That is, the default sigma=[1, 2, 3] will use probabilities of
        0.6827, 0.9545, and 0.9973. If any of the sigma values is less than 1, that will be interpreted
        as a probability. That is, a value of 1 and 0.6827 will give the same results, within precision.

        """
        ci = conf_interval(self.minimizer, self.result, p_names=p_names, sigmas=sigmas)
        report_ci(ci)
예제 #18
0
def test_ci_report():
    """test confidence interval report"""

    def residual(pars, x, data=None):
        argu = (x*pars['decay'])**2
        shift = pars['shift']
        if abs(shift) > np.pi/2:
            shift = shift - np.sign(shift)*np.pi
        model = pars['amp']*np.sin(shift + x/pars['period']) * np.exp(-argu)
        if data is None:
            return model
        return model - data

    p_true = Parameters()
    p_true.add('amp', value=14.0)
    p_true.add('period', value=5.33)
    p_true.add('shift', value=0.123)
    p_true.add('decay', value=0.010)

    n = 2500
    xmin = 0.
    xmax = 250.0
    x = np.linspace(xmin, xmax, n)
    data = residual(p_true, x) + np.random.normal(scale=0.7215, size=n)

    fit_params = Parameters()
    fit_params.add('amp', value=13.0)
    fit_params.add('period', value=2)
    fit_params.add('shift', value=0.0)
    fit_params.add('decay', value=0.02)

    mini = Minimizer(residual, fit_params, fcn_args=(x,),
                     fcn_kws={'data': data})
    out = mini.leastsq()
    report = fit_report(out)
    assert(len(report) > 500)

    ci, tr = conf_interval(mini, out, trace=True)
    report = ci_report(ci)
    assert(len(report) > 250)
예제 #19
0
def test_confidence_leastsq(data, pars, verbose, capsys):
    """Calculate confidence interval after leastsq minimization."""
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
    out = minimizer.leastsq()

    assert 5 < out.nfev < 500
    assert out.chisqr < 3.0
    assert out.nvarys == 2
    assert_paramval(out.params['a'], 0.1, tol=0.1)
    assert_paramval(out.params['b'], -2.0, tol=0.1)

    ci = lmfit.conf_interval(minimizer, out, verbose=verbose)
    assert_allclose(ci['b'][0][0], 0.997, rtol=0.01)
    assert_allclose(ci['b'][0][1], -2.022, rtol=0.01)
    assert_allclose(ci['b'][2][0], 0.683, rtol=0.01)
    assert_allclose(ci['b'][2][1], -1.997, rtol=0.01)
    assert_allclose(ci['b'][5][0], 0.95, rtol=0.01)
    assert_allclose(ci['b'][5][1], -1.96, rtol=0.01)

    if verbose:
        captured = capsys.readouterr()
        assert 'Calculating CI for' in captured.out
예제 #20
0
def test_confidence_leastsq(data, pars, verbose, capsys):
    """Calculate confidence interval after leastsq minimization."""
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
    out = minimizer.leastsq()

    assert 5 < out.nfev < 500
    assert out.chisqr < 3.0
    assert out.nvarys == 2
    assert_paramval(out.params['a'], 0.1, tol=0.1)
    assert_paramval(out.params['b'], -2.0, tol=0.1)

    ci = lmfit.conf_interval(minimizer, out, verbose=verbose)
    assert_allclose(ci['b'][0][0], 0.997, rtol=0.01)
    assert_allclose(ci['b'][0][1], -2.022, rtol=0.01)
    assert_allclose(ci['b'][2][0], 0.683, rtol=0.01)
    assert_allclose(ci['b'][2][1], -1.997, rtol=0.01)
    assert_allclose(ci['b'][5][0], 0.95, rtol=0.01)
    assert_allclose(ci['b'][5][1], -1.96, rtol=0.01)

    if verbose:
        captured = capsys.readouterr()
        assert 'Calculating CI for' in captured.out
예제 #21
0
def test_confidence_exceptions(data, pars):
    """Make sure the proper exceptions are raised when needed."""
    minimizer = lmfit.Minimizer(residual, pars, calc_covar=False,
                                fcn_args=data)
    out = minimizer.minimize(method='nelder')
    out_lsq = minimizer.minimize(params=out.params, method='leastsq')

    # no uncertainty estimated
    msg = 'Cannot determine Confidence Intervals without sensible uncertainty'
    with pytest.raises(lmfit.MinimizerException, match=msg):
        lmfit.conf_interval(minimizer, out)

    # uncertainty is NaN
    out_lsq.params['a'].stderr = np.nan
    with pytest.raises(lmfit.MinimizerException, match=msg):
        lmfit.conf_interval(minimizer, out_lsq)

    # only one varying parameter
    out_lsq.params['a'].vary = False
    msg = r'Cannot determine Confidence Intervals with < 2 variables'
    with pytest.raises(lmfit.MinimizerException, match=msg):
        lmfit.conf_interval(minimizer, out_lsq)
예제 #22
0
def test_confidence_bounds_reached(data, pars):
    """Check if conf_interval handles bounds correctly"""

    # Should work
    pars['a'].max = 0.2
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
    out = minimizer.leastsq()
    out.params['a'].stderr = 1
    lmfit.conf_interval(minimizer, out, verbose=True)

    # Should warn (i.e,. limit < para.min)
    pars['b'].max = 2.03
    pars['b'].min = 1.97
    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
    out = minimizer.leastsq()
    out.params['b'].stderr = 0.005
    out.params['a'].stderr = 0.01
    with pytest.warns(UserWarning, match="Bound reached"):
        lmfit.conf_interval(minimizer, out, verbose=True)

    # Should warn (i.e,. limit > para.max)
    out.params['b'].stderr = 0.1
    with pytest.warns(UserWarning, match="Bound reached"):
        lmfit.conf_interval(minimizer, out, verbose=True)
예제 #23
0
def test_confidence_exceptions(data, pars):
    """Make sure the proper exceptions are raised when needed."""
    minimizer = lmfit.Minimizer(residual,
                                pars,
                                calc_covar=False,
                                fcn_args=data)
    out = minimizer.minimize(method='nelder')
    out_lsq = minimizer.minimize(params=out.params, method='leastsq')

    # no uncertainty estimated
    msg = 'Cannot determine Confidence Intervals without sensible uncertainty'
    with pytest.raises(lmfit.MinimizerException, match=msg):
        lmfit.conf_interval(minimizer, out)

    # uncertainty is NaN
    out_lsq.params['a'].stderr = np.nan
    with pytest.raises(lmfit.MinimizerException, match=msg):
        lmfit.conf_interval(minimizer, out_lsq)

    # only one varying parameter
    out_lsq.params['a'].vary = False
    msg = r'Cannot determine Confidence Intervals with < 2 variables'
    with pytest.raises(lmfit.MinimizerException, match=msg):
        lmfit.conf_interval(minimizer, out_lsq)
예제 #24
0
def fit_v06_model_joint(r, sb_src, sb_src_err, instruments, theta, energy, results_pickle=None):
    """
    Fit a v06 x psf model to any combination of instruments via joint
    likelihood

    Arguments:
    """

    # settings
    APPLY_PSF = True
    DO_ZERO_PAD = True
    DO_FIT = True
    FIT_METHOD = 'simplex'
    # FIT_METHOD = 'leastsq'     # 'leastsq' - Levemberg-Markquardt,
                               # 'simplex' - simplex
    CALC_1D_CI = False         # in most cases standard error is good
                               # enough, this is not needed then
    CALC_2D_CI = False
    PLOT_PROFILE = True
    PRINT_FIT_DIAGNOSTICS = True

    ######################################################################
    # modelling is done in 2D and then projected - setup here the 2D
    # parameters

    size = 2.0 * r.max()
    xsize = size
    ysize = xsize
    xcen = xsize/2
    ycen = ysize/2
    # imsize = input_im.shape         # FIXME: is this necessary? I could just use it inside the model
    imsize = (size, size)         # FIXME: is this necessary? I could just use it inside the model

    xsize_obj = xsize # 100             # if running t1.fits set to 100 else xsize
    ysize_obj = xsize_obj
    xcen_obj = xsize_obj / 2
    ycen_obj = ysize_obj / 2
    r_aper = xsize_obj  / 2        # aperture for the fitting

    # pre-calculate distmatrix for speedup - it is same for all
    # instruments
    distmatrix = distance_matrix(zeros((imsize[0]-2, imsize[1]-2)), xcen_obj, ycen_obj).astype(int) # need int for bincount

    # set the ancilarry parameters
    # +1 bc of the central divergence
    data = empty(imsize)
    distmatrix_input = distance_matrix(data, xcen_obj, ycen_obj).astype('int') + 1
    bgrid = unique(distmatrix_input.flat)

    # r contains the start of the innermost bin for integration, but not needed for plotting
    rplt = r[1:]

    ######################################################################
    # scale the data

    # scale_sb_src = {}
    # scale_sb_src_err = {}
    psf_dict = {}
    ndata = 0

    for instrument in instruments:
        ndata += len(sb_src[instrument])
        psf_dict[instrument] = make_2d_king(distmatrix_input, instrument, theta[instrument], energy)

    ######################################################################
    # init beta model

    n0 = 1e+0
    rc = 20.0
    beta = 4.0/3.0
    rs = 20.0
    alpha = 1.5
    gamma = 3.0
    epsilon = 1.5
    r500_pix = r.max()

    # v06 pars lmfit structure
    pars = lm.Parameters()
    pars.add('rc'      , value=rc, vary=True, min=0.05, max=r.max())
    pars.add('beta'    , value=beta, vary=True, min=0.05, max=2.0)
    pars.add('rs'      , value=rs, vary=True, min=0.05, max=2*r.max())
    pars.add('alpha'   , value=alpha, vary=True, min=0.01, max=3.0)
    pars.add('epsilon' , value=epsilon, vary=True, min=0.0, max=5.0)
    pars.add('gamma'   , value=gamma, vary=False)

    # FIXME: reasonable initial value and bounds!
    for instrument in instruments:
        pars.add('n0_'+instrument, value=n0, #value=mean(sb_src[instrument]),
                 vary=True, min=1.0e-9, max=1.0e3)

    # non-fit arguments
    nonfit_args = (distmatrix_input, bgrid, r500_pix, instruments, psf_dict,
                   xcen_obj, ycen_obj, r, sb_src, sb_src_err)

    # fit stop criteria
    if FIT_METHOD == 'leastsq':
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+0} # debug set; quickest
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+4} # debug set; some evol
        leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+7}
        # leastsq_kws={'xtol': 1.0e-8, 'ftol': 1.0e-8, 'maxfev': 1.0e+9}

    if FIT_METHOD == 'simplex':
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+1} # debug set; quickest
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+4} # debug set; some evol
        leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+7}
        # leastsq_kws={'xtol': 1.0e-8, 'ftol': 1.0e-8, 'maxfun': 1.0e+9}

    ######################################################################
    # do the fit: beta

    if DO_FIT:
        print "starting v06 fit with method :: ", FIT_METHOD
        t1 = time.clock()

        result = lm.minimize(v06_psf_2d_lmfit_profile_joint,
                             pars,
                             args=nonfit_args,
                             method=FIT_METHOD,
                             **leastsq_kws)

        t2 = time.clock()

        print
        print
        print "fitting took: ", t2-t1, " s"
        print
        print

        ######################################################################
        # get the output model

        (r_model, profile_norm_model) = \
            v06_psf_2d_lmfit_profile_joint(pars, distmatrix_input, bgrid,
                                           r500_pix, instruments, psf_dict,
                                           xcen_obj, ycen_obj)

        ######################################################################
        # save structures

        if results_pickle:
            outstrct = lmfit_result_to_dict(result, pars)

            with open(results_pickle, 'wb') as output:
                pickle.dump(outstrct, output, pickle.HIGHEST_PROTOCOL)

                print "results written to:: ", results_pickle

        ######################################################################
        # output

        if PRINT_FIT_DIAGNOSTICS:
            print_fit_diagnostics(result, t2-t1, ndata, leastsq_kws)

        # print_result_tab(pars_true, pars)
        lm.printfuncs.report_errors(result.params)

        with open(results_pickle+'.txt', 'w') as f:
            sys.stdout = f

            if PRINT_FIT_DIAGNOSTICS:
                print_fit_diagnostics(result, t2-t1, ndata, leastsq_kws)

            print
            print
            lm.printfuncs.report_errors(result.params)
            print
            print

            sys.stdout = sys.__stdout__

        print
        print "fitting subroutine done!"

    ######################################################################
    # plot beta fit and data profiles

    if DO_FIT and PLOT_PROFILE:
        for instrument in instruments:
            output_figure = results_pickle+'.'+instrument+'.beta_psf.png'

            print "result plot :: ", output_figure

            # FIXME: implement plotter for joint fits
            plot_data_model_resid(rplt, sb_src[instrument],
                              r_model, profile_norm_model[instrument],
                              output_figure, sb_src_err[instrument])

    ######################################################################
    # calculate confidence intervals

    if DO_FIT and CALC_1D_CI:
        print "Calculating 1D confidence intervals"
        sigmas = [0.682689492137, 0.954499736104, 0.997300203937]
        # sigmas = [0.682689492137, 0.954499736104]
        # sigmas = [0.997300203937]
        # sigmas = [0.954499736104]
        # sigmas = [0.682689492137]
        # ci_pars = ['rc', 'beta']
        # ci_pars = ['rc']
        # ci_pars = ['n0_pn', 'rc']
        ci_pars = ['n0_'+instruments[0]]

        t1 = time.clock()
        ci, trace = lm.conf_interval(result, p_names=ci_pars, sigmas=sigmas,
                                     trace=True, verbose=True, maxiter=1e3)

        t2 = time.clock()

        # save to file
        with open(results_pickle+'.ci', 'wb') as output:
            pickle.dump(ci, output, pickle.HIGHEST_PROTOCOL)

        print
        print "Confidence interval calculation took : ", t2 - t1

        lm.printfuncs.report_ci(ci)

    ######################################################################
    # FIXME: not done yet: Calculate 2D confidence intervals

    if DO_FIT and  CALC_2D_CI:
        output_figure = results_pickle+'.2d_like_beta_psf.png'
        from timer import Timer

        with Timer() as t:
            print "Calculating 2D confidence intervals"
            x, y, likelihood = lm.conf_interval2d(result,'rcore','beta', 10, 10)
            plt_like_surface(x, y, likelihood, output_figure, 'rcore', 'beta')

        print "elasped time:", t.secs, " s"

    return 0
예제 #25
0
def process(filename, post, post2_start, fit=True):
    kin = [-600, post[1]]
    fig_width = 14
    fs = 18
    meth = 'em'   # by default use data generated from this fiting method
    def savefig(title, **kwargs):
        plt.savefig("figures/%s %s.png" % (filename, title))

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Load Data
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    methods, windows, step = _get_methods_windows_step(filename)
    bursts = load_bursts_data(filename, windows[1], step)
    bursts_pre = bursts[bursts.tstop < 0].copy()
    bursts_post = bursts[(bursts.tstart > post[0]) & (bursts.tstart < post[1])].copy()
    bursts_post2 = bursts[bursts.tstart > post2_start].copy()
    params_all = load_fit_data(filename)
    params, params_pre, params_post, params_post2 = partition_fit_data(params_all, kin, post, post2_start)
    p = {window: params[meth, window, step] for window in windows}

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Number of Bursts
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot(bursts.tstart, bursts.num_bursts)
    ax.axvline(0, color='k', ls='--')
    ax.axvline(post[1], color='k', ls='--')
    ax.axvline(post2_start, color='k', ls='--')
    title = 'Number of Bursts - Full measurement'
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Burst Duration
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot(bursts.tstart, bursts.burst_width)
    ax.axvline(0, color='k', ls='--')
    ax.axvline(post[1], color='k', ls='--')
    ax.axvline(post2_start, color='k', ls='--')
    title = 'Burst Duration - Full measurement'
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Number of Bursts in PRE, POST, POST2 time ranges
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    for nb, label in zip((bursts_pre, bursts_post, bursts_post2),
                         ('PRE', 'POST', 'POST2')):
        slope, intercept, r_value, p_value, std_err = linregress(nb.tstart, nb.num_bursts)
        y_model = nb.tstart*slope + intercept
        nb_corr = (nb.num_bursts - y_model) + nb.num_bursts.mean()
        nb['num_bursts_corr'] = nb_corr
        nb['linregress'] = y_model

        nbc = nb.num_bursts_corr
        nbm = nb.num_bursts.mean()
        print("%5s Number of bursts (detrended): %7.1f MEAN, %7.1f VAR, %6.3f VAR/MEAN" %
              (label, nbm, nbc.var(), nbc.var()/nbm))
        fig, ax = plt.subplots(1, 2, figsize=(fig_width, 4))
        ax[0].plot(nb.tstart, nb.num_bursts)
        ax[0].plot(nb.tstart, nb.linregress, 'r')
        ax[1].plot(nb.tstart, nb.num_bursts_corr)
        ax[1].plot(nb.tstart, np.repeat(nbm, nb.shape[0]), 'r')
        title = 'Number of bursts - %s-kinetics' % label
        fig.text(0.35, 0.95, title, fontsize=fs)
        savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Full Kinetic Curve (Population Fraction)
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot('kinetics', data=params_all[meth, windows[0], step], marker='h', lw=0, color='gray', alpha=0.2)
    ax.plot('kinetics', data=params_all[meth, windows[1], step], marker='h', lw=0, alpha=0.5)
    ax.axvline(0, color='k', ls='--')
    ax.axvline(post[1], color='k', ls='--')
    ax.axvline(post2_start, color='k', ls='--')
    title = 'Population Fraction - Full measurement'
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Kinetic Curve Auto-Correlation
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    w = windows[0]
    d = np.array(p[w].kinetics.loc[post[0] : post[1]])
    delta_t_max = 600  # seconds
    corr, t_corr = autocorrelation(d, t_step=step, delta_t_max=delta_t_max)

    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot(t_corr, corr, '-o')
    ax.set_xlabel(r'$\Delta t$ (seconds)')
    title = 'Kinetic Curve Auto-Correlation - window = %d s' % w
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Kinetic Curve in Stationary Time Ranges
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    for px, label in zip([params_pre, params_post, params_post2],  ('PRE', 'POST', 'POST2')):
        fig, ax = plt.subplots(1, 2, figsize=(fig_width, 4))
        ax[0].plot('kinetics', data=px[meth, windows[0], step], marker='h', lw=0, color='gray', alpha=0.2)
        ax[0].plot('kinetics', data=px[meth, windows[1], step], marker='h', lw=0, alpha=0.5)
        ax[0].plot('kinetics_linregress', data=px[meth, windows[1], step], color='r')
        s1, s2 = slice(None, None, windows[0]//step), slice(None, None, windows[1]//step)
        ax[1].plot(px[meth, windows[0], step].index[s1], px[meth, windows[0], step].kinetics[s1],
                   marker='h', lw=0, color='gray', alpha=0.2)
        ax[1].plot(px[meth, windows[1], step].index[s2], px[meth, windows[1], step].kinetics[s2],
                   marker='h', lw=0, alpha=1)
        ax[1].plot('kinetics_linregress', data=px[meth, windows[1], step], color='r')
        print('%5s Kinetics 30s:     %.3f STD, %.3f STD detrended.' %
              (label, (100*px[meth, windows[1], step].kinetics).std(),
               (100*px[meth, windows[1], step].kinetics_linregress).std()))
        title = 'Population Fraction - %s-kinetics' % label
        fig.text(0.40, 0.95, title, fontsize=fs)
        savefig(title)

    if not fit:
        return None, params
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Exploratory Fit
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    #method = 'nelder'
    decimation = 5

    t0_vary = False
    model = models.factory_model_exp(t0_vary=t0_vary)
    rest0f, tau = {}, {}
    for window, px in p.items():
        #____ = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        rest0f[window] = resx
        tau[window] = resx.best_values['tau']
    tau0, tau1 = tau[windows[0]], tau[windows[1]]
    print(' FIT  Simple Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%' %
          (t0_vary, windows[0], tau0, windows[1], tau1, 100*(tau0 - tau1)/tau0), flush=True)

    t0_vary = False
    reswt0f, tauw = {}, {}
    for window, px in p.items():
        modelw1 = models.factory_model_expwin(tau=150, t_window=window, decimation=decimation, t0_vary=t0_vary)
        #____ = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        reswt0f[window] = resx
        tauw[window] = resx.best_values['tau']
    tauw0, tauw1 = tauw[windows[0]], tauw[windows[1]]
    print(' FIT  Window Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%' %
          (t0_vary, windows[0], tauw0, windows[1], tauw1, 100*(tauw0 - tauw1)/tauw0), flush=True)

    t0_vary = True
    model = models.factory_model_exp(t0_vary=t0_vary)
    res, tau, ci = {}, {}, {}
    for window, px in p.items():
        #____ = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        res[window] = resx
        tau[window] = resx.best_values['tau']
        ci[window] = lmfit.conf_interval(resx, resx)
    tau0, tau1 = tau[windows[0]], tau[windows[1]]
    print(' FIT  Simple Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%' %
          (t0_vary, windows[0], tau0, windows[1], tau1, 100*(tau0 - tau1)/tau0), flush=True)

    t0_vary = True
    resw, tauw, ciw = {}, {}, {}
    for window, px in p.items():
        modelw1 = models.factory_model_expwin(tau=150, t_window=window, decimation=decimation, t0_vary=t0_vary)
        #____ = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        resw[window] = resx
        tauw[window] = resx.best_values['tau']
        ciw[window] = lmfit.conf_interval(resx, resx)
    tauw0, tauw1 = tauw[windows[0]], tauw[windows[1]]
    print(' FIT  Window Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%' %
          (t0_vary, windows[0], tauw0, windows[1], tauw1, 100*(tauw0 - tauw1)/tauw0), flush=True)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Kinetic Curve During Transient
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    t = params[meth, windows[0], step].tstart
    fig, ax = plt.subplots(1, 2, figsize=(fig_width, 6))
    ax[0].plot('tstart', 'kinetics', data=params[meth, windows[0], step], marker='h', lw=0, color='gray', alpha=0.2)
    ax[0].plot('tstart', 'kinetics', data=params[meth, windows[1], step], marker='h', lw=0, alpha=0.5)
    ax[0].plot(t, models.expwindec_func(t, **resw[windows[1]].best_values), 'm')
    ax[0].set_xlim(kin[0], kin[1])

    s1, s2 = slice(None, None, windows[0]//step), slice(None, None, windows[1]//step)
    ax[1].plot(params[meth, windows[0], step].index[s1], params[meth, windows[0], step].kinetics[s1],
               marker='h', lw=0, color='gray', alpha=0.2)
    ax[1].plot(params[meth, windows[1], step].index[s2], params[meth, windows[1], step].kinetics[s2],
               marker='h', lw=0, alpha=1)
    ax[1].plot(t, models.expwindec_func(t, **resw[windows[1]].best_values), 'm')
    title = 'Population Fraction - kinetics'
    fig.text(0.40, 0.95, title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Plot Fitted Kinetic Curves
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fitcycler = cycler('color', ('k', 'm', red))
    datacycler = cycler('color', ('grey', blue, green)) + cycler('alpha', (0.2, 0.5, 0.5))
    fig, ax = plt.subplots(2, 1, figsize=(fig_width, 8), sharex=True)

    tau_str = r'$\tau_{%ds} = %.1f s (%.1f, %.1f)$'
    for i, ((w, px), fitsty, datsty) in enumerate(zip(sorted(p.items()), fitcycler, datacycler)):
        if i == 2: break
        ax[0].plot('tstart', 'kinetics', 'o', data=px, label='', **datsty)
        label = tau_str % (w, tau[w], ci[w]['tau'][2][1], ci[w]['tau'][4][1])
        ax[0].plot(px.tstart, models.exp_func(px.tstart, **res[w].best_values),
                   label=label, **fitsty)
    ax[0].legend(loc='lower right', fontsize=fs)
    w = windows[1]
    ax[1].plot(p[w].tstart, p[w].kinetics - models.exp_func(p[w].tstart, **res[w].best_values),
               'o', color=purple)
    ax[1].set_title('Residuals - $\chi_\mathrm{red}^2 = %.4f \, 10^{-3}$' %
                    (res[w].redchi*1e3), fontsize=fs)
    ax[0].set_xlim(kin[0], kin[1])
    title = 'Kinetics Fit - Simple Exponential (t0_vary=%s)' % t0_vary
    ax[0].set_title(title, fontsize=fs)
    savefig(title)

    fig, ax = plt.subplots(2, 1, figsize=(fig_width, 8), sharex=True)
    for i, ((w, px), fitsty, datsty) in enumerate(zip(sorted(p.items()), fitcycler, datacycler)):
        if i == 2: break
        ax[0].plot('tstart', 'kinetics', 'o', data=px, label='', **datsty)
        label = tau_str % (w, tauw[w], ciw[w]['tau'][2][1], ciw[w]['tau'][4][1])
        ax[0].plot(px.tstart, models.expwindec_func(px.tstart, **resw[w].best_values),
                   label=label, **fitsty)
    ax[0].legend(loc='lower right', fontsize=fs)
    w = windows[1]
    ax[1].plot(p[w].tstart, p[w].kinetics - models.exp_func(p[w].tstart, **res[w].best_values),
               'o', color=purple)
    ax[1].set_title('Residuals - $\chi_\mathrm{red}^2 = %.4f \, 10^{-3}$' %
                    (resw[w].redchi*1e3), fontsize=fs)
    ax[0].set_xlim(kin[0], kin[1])
    title = 'Kinetics Fit - Integrated Exponential (t0_vary=%s)' % t0_vary
    ax[0].set_title(title, fontsize=fs)
    savefig(title)
    return (res, resw, rest0f, reswt0f, ci, ciw), params
예제 #26
0
def thindiskcurve_fitter(xsep,
                         velo,
                         error=None,
                         mguess=20 * u.M_sun,
                         rinner=20 * u.au,
                         router=50 * u.au,
                         fixedmass=False,
                         conf_int=False,
                         **kwargs):

    parameters = lmfit.Parameters()
    parameters.add(
        'mass',
        value=u.Quantity(mguess, u.M_sun).value,
        min=min([10, mguess.value]),
        max=25,
        vary=not fixedmass,
    )
    parameters.add('rinner',
                   value=u.Quantity(rinner, u.au).value,
                   min=3,
                   max=50)
    parameters.add('delta', value=20, min=10, max=50)
    parameters.add('router',
                   value=u.Quantity(router, u.au).value,
                   min=20,
                   max=100,
                   expr='rinner+delta')
    parameters.add('vcen', value=vcen.value, min=3.5, max=7.5)

    fcn_kws = kwargs
    fcn_kws.update({
        'xsep': u.Quantity(xsep, u.au),
        'velo': u.Quantity(velo, u.km / u.s),
        'error': error
    })

    minimizer = lmfit.Minimizer(thindiskcurve_residual,
                                parameters,
                                epsfcn=0.005,
                                fcn_kws=fcn_kws)

    result = minimizer.minimize()

    result.params.pretty_print()

    if fixedmass:
        assert parameters['mass'].value == mguess.value

    if conf_int:
        lmfit.report_fit(result.params, min_correl=0.5)

        ci, trace = lmfit.conf_interval(minimizer,
                                        result,
                                        sigmas=[1, 2],
                                        trace=True,
                                        verbose=False)
        lmfit.printfuncs.report_ci(ci)
        return result, ci, trace, minimizer

    return result
예제 #27
0
def residual(p):
    v = p.valuesdict()
    return v['a1'] * np.exp(-x / v['t1']) + v['a2'] * np.exp(
        -(x - 0.1) / v['t2']) - y


# first solve with Nelder-Mead
mi = lmfit.minimize(residual, p, method='Nelder')

mi = lmfit.minimize(residual, p)

lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)

ci, trace = lmfit.conf_interval(mi,
                                sigmas=[0.68, 0.95],
                                trace=True,
                                verbose=False)
lmfit.printfuncs.report_ci(ci)

plot_type = 3
if plot_type == 0:
    plt.plot(x, y)
    plt.plot(x, residual(p) + y)

elif plot_type == 1:
    cx, cy, grid = lmfit.conf_interval2d(mi, 'a2', 't2', 30, 30)
    plt.contourf(cx, cy, grid, np.linspace(0, 1, 11))
    plt.xlabel('a2')
    plt.colorbar()
    plt.ylabel('t2')
예제 #28
0
fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)

out = minimize(residual, fit_params, args=(x, ), kws={'data': data})

fit = residual(fit_params, x)

print(' N fev = ', out.nfev)
print(out.chisqr, out.redchi, out.nfree)

report_fit(fit_params)
#ci=calc_ci(out)
ci, tr = conf_interval(out, trace=True)
report_ci(ci)

if HASPYLAB:
    names = fit_params.keys()
    i = 0
    gs = pylab.GridSpec(4, 4)
    sx = {}
    sy = {}
    for fixed in names:
        j = 0
        for free in names:
            if j in sx and i in sy:
                ax = pylab.subplot(gs[i, j], sharex=sx[j], sharey=sy[i])
            elif i in sy:
                ax = pylab.subplot(gs[i, j], sharey=sy[i])
예제 #29
0
#     # standard error is sigma/sqrt(n)
#     se = numpy.std(y)/numpy.sqrt(len(y))
#     return out.params['gradient'] /

params = Parameters()
params.add('gradient', value=1)
params.add('intercept', value=1)

mini = lmfit.Minimizer(residual, params, args=(x, y))
out = mini.minimize()  #residual, params, args=(x,y))
print(fit_report(out))
print('Residual Standard Error = reduced chi-sqr sqrt',
      np.sqrt(out.redchi))  # RSE, redchi has N_values - N_DOF in denominator
print('R-squared', rsqr(out, y))

modm = out.params['gradient']
modc = out.params['intercept']
moddata = modm * x + modc

print('testing gradient standard error', out.params['gradient'].stderr)
print('manually this is', np.sqrt(np.std(y)**2 / np.sum((x - np.mean(x))**2)))
import matplotlib.pyplot as plt

# plt.figure()
# plt.scatter(x,y)
# plt.plot(x,moddata)
# plt.show()

ci, trace = lmfit.conf_interval(out, sigmas=[1, 2], trace=True)
lmfit.printfuncs.report_ci(ci)
예제 #30
0
fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)

out = minimize(residual, fit_params, args=(x,), kws={'data':data})

fit = residual(fit_params, x)

print( ' N fev = ', out.nfev)
print( out.chisqr, out.redchi, out.nfree)

report_fit(fit_params)
#ci=calc_ci(out)
ci, tr = conf_interval(out, trace=True)
report_ci(ci)
    
if HASPYLAB:
    names=fit_params.keys()
    i=0  
    gs=pylab.GridSpec(4,4)
    sx={}
    sy={}
    for fixed in names:   
        j=0        
        for free in names:                                         
            if j in sx and i in sy:                
                ax=pylab.subplot(gs[i,j],sharex=sx[j],sharey=sy[i])                                        
            elif i in sy:
                ax=pylab.subplot(gs[i,j],sharey=sy[i])
예제 #31
0
import lmfit
import numpy as np

x = np.linspace(0.3,10,100)
np.random.seed(0)

y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)

p = lmfit.Parameters()
p.add_many(('a', 0.1), ('b', 1))

def residual(p):
   a = p['a'].value
   b = p['b'].value

   return 1/(a*x)+b-y

mi = lmfit.minimize(residual, p)
lmfit.printfuncs.report_fit(mi.params)

ci = lmfit.conf_interval(mi)
lmfit.printfuncs.report_ci(ci)


예제 #32
0
#!/usr/bin/env python

# <examples/doc_confidence_basic.py>
import numpy as np

import lmfit

x = np.linspace(0.3, 10, 100)
np.random.seed(0)
y = 1/(0.1*x) + 2 + 0.1*np.random.randn(x.size)

pars = lmfit.Parameters()
pars.add_many(('a', 0.1), ('b', 1))


def residual(p):
    return 1/(p['a']*x) + p['b'] - y


mini = lmfit.Minimizer(residual, pars)
result = mini.minimize()
print(lmfit.fit_report(result.params))

ci = lmfit.conf_interval(mini, result)
lmfit.printfuncs.report_ci(ci)
# <end examples/doc_confidence_basic.py>
예제 #33
0
파일: test_lmfit.py 프로젝트: rsuhada/code
print '='*70
print '='*70

# create parameter container
pars=lm.Parameters()
pars.add_many(('a', 2.0),('b', 3.0))

FIT_METHOD='leastsq'
leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+7}
result = do_fit()

# confidence interval
# sigmas = [0.682689492137]
sigmas = [0.682689492137, 0.954499736104, 0.997300203937]
ci_pars = ['a', 'b']
ci, trace = lm.conf_interval(result, p_names=ci_pars, sigmas=sigmas,
                      trace=True, verbose=True, maxiter=1e3)
lm.printfuncs.report_ci(ci)

print

######################################################################
# do the fitting: simplex

print
print
print '='*70
print '='*70

# create parameter container
pars=lm.Parameters()
pars.add_many(('a', 2.0),('b', 3.0))
예제 #34
0
    minus = []
    for i in range(nParameters):
        plus.append(np.percentile(pSamples[i], 97.5) - mean[i])
        minus.append(mean[i] - np.percentile(pSamples[i], 2.5))

    # Note that the limits are not symmetric
    print('Computed 95 percent percentiles from the Monte Carlo run:')
    for i in range(nParameters):
        print(toFit[i] + ': ', mean[i], "+/- ", plus[i], minus[i])

    print('Time for monte carlo = ', time.time() - to)
    print('Number of simulations = ', nsims)

if False:
    to = time.time()
    ci = lmfit.conf_interval(minimizer, result)
    print('Time for conf Interval = ', time.time() - to)
    lmfit.printfuncs.report_ci(ci)
    #
    cx, cy, grid = lmfit.conf_interval2d(minimizer, result, toFit[0], toFit[1],
                                         100, 100)
    plt.contourf(cx, cy, grid, np.linspace(0, 1, 11))
    plt.xlabel(toFit[0])
    plt.colorbar()
    plt.ylabel(toFit[1])

if False:
    np.set_printoptions(precision=4, linewidth=150)

    print(result.covar)
예제 #35
0
def OBS(kic):
    path = 'TEMP/' + str(kic) + '/'

    color = "#ff7f0e"
    c1 = "#00ff00"
    c2 = '#66cccc'
    c3 = '#cc00ff'
    c4 = '#ee0000'
    labels = {
        "l0": '$l=0$',
        "l1": '$l=1$',
        "l2": '$l=2$',
        "lz": 'Lorentz peak'
    }

    ############################################################################################################
    def separacao(freq, psd):
        """This function will calculate the distance between two frequencies
         It is good to automate the calculation of large and small separations"""
        global X
        X = [], []
        fig = plt.figure(figsize=(17, 5), dpi=130)
        plt.plot(freq, psd, 'k-', lw=0.5, alpha=0.5)
        plt.title("Select the high frequency region")
        plt.xlim(min(freq), max(freq))
        plt.ylim(min(psd), max(psd))

        def onclick(event):
            global X
            x = event.xdata
            X = np.append(X, x)
            if len(X) == 1:
                plt.plot((x, x), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.xlim(min(freq), max(freq))
                plt.ylim(min(psd), max(psd))
                plt.draw()
            if len(X) == 2:
                plt.plot((X[-1], X[-1]), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.fill_betweenx((min(psd), max(psd)), (X[-2], X[-2]),
                                  (X[-1], X[-1]),
                                  color='red',
                                  alpha=0.2)
                plt.xlim(min(freq), max(freq))
                plt.ylim(min(psd), max(psd))
                plt.draw()
            if len(X) > 2:
                plt.clf()
                plt.plot(freq, psd, 'k', lw=0.5, alpha=0.5)
                plt.plot((X[-2], X[-2]), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.plot((X[-1], X[-1]), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.fill_betweenx((min(psd), max(psd)), (X[-2], X[-2]),
                                  (X[-1], X[-1]),
                                  color='red',
                                  alpha=0.2)
                plt.xlim(min(freq), max(freq))
                plt.ylim(min(psd), max(psd))
                plt.draw()
            print('Ultimo click: x = ', x)

        fig.canvas.mpl_connect('button_press_event', onclick)
        plt.show()
        plt.clf()
        return abs(X[-2] - X[-1]), X[-2], X[-1]

    def filtragem(x, y, ajuste):

        plt.subplots_adjust(bottom=0.2)
        box_kernel = Box1DKernel(25)
        yha = convolve(y, box_kernel)
        yhat = ((y - yha)**2)
        yhat = (yhat / yhat.mean())
        yoriginal = copy.copy(yhat)
        original, = plt.plot(x, yoriginal, 'b-', alpha=0.3)
        l, = plt.plot(x, yhat, 'k-', alpha=0.8)
        tresh = 0.05
        j = peakutils.indexes(yhat, thres=tresh)
        inf = min(yhat[j])
        linha, = plt.plot((min(x), max(x)), (inf, inf),
                          ls='--',
                          color='blue',
                          lw=0.8)

        k = peakutils.indexes(yhat, thres=tresh)
        xx = x[k]
        yy = yhat[k]
        s = Lorentz1D(xx, yy, fwhm=0.025)
        freqs = np.array(s.amplitude)
        psds = np.array(s.x_0)
        modos, = plt.plot(freqs,
                          psds,
                          'x',
                          color='green',
                          alpha=0.4,
                          label=labels['lz'])
        plt.annotate(np.str(tresh), xy=(np.mean(xx), max(yy) / 2))

        class Index(object):
            ajuste = 25
            ajuste2 = 0.05

            def next(self, event):
                global yhat
                self.ajuste += 5
                print(self.ajuste)
                box_kernel = Box1DKernel(self.ajuste)
                yha = convolve(y, box_kernel)
                yhat = ((y - yha)**2)
                yhat = (yhat / yhat.mean())
                original.set_ydata(yoriginal)
                l.set_ydata(yhat)
                plt.draw()

            def prev(self, event):
                global yhat
                self.ajuste -= 5
                if self.ajuste < 1:
                    self.ajuste = 5
                print(self.ajuste)
                box_kernel = Box1DKernel(self.ajuste)
                yha = convolve(y, box_kernel)
                yhat = ((y - yha)**2)
                yhat = (yhat / yhat.mean())
                original.set_ydata(yoriginal)
                l.set_ydata(yhat)
                plt.draw()

            def up(self, event):
                global inf, limite, yhat
                self.ajuste2 += 0.01
                j = peakutils.indexes(yhat, thres=self.ajuste2)
                inf = min(yhat[j])
                infinito = (inf, inf)
                k = peakutils.indexes(yhat, thres=self.ajuste2)
                xx = x[k]
                yy = yhat[k]
                s = Lorentz1D(xx, yy, fwhm=0.025)
                freqs = np.array(s.amplitude)
                psds = np.array(s.x_0)
                print('With tresh: ', np.round(self.ajuste2, 3), ' Found ',
                      len(psds), ' mods')
                original.set_ydata(yoriginal)
                linha.set_ydata(infinito)
                modos.set_ydata(psds)
                modos.set_xdata(freqs)
                plt.annotate(np.str(self.ajuste2),
                             xy=(np.mean(xx), max(yy) / 2))
                limite = self.ajuste2
                plt.draw()

            def down(self, event):
                global inf, limite, yhat
                self.ajuste2 -= 0.01
                if self.ajuste2 < 0:
                    self.ajuste2 = 0
                j = peakutils.indexes(yhat, thres=self.ajuste2)
                inf = min(yhat[j])
                infinito = (inf, inf)
                k = peakutils.indexes(yhat, thres=self.ajuste2)
                xx = x[k]
                yy = yhat[k]
                s = Lorentz1D(xx, yy, fwhm=0.025)
                freqs = np.array(s.amplitude)
                psds = np.array(s.x_0)
                print('With tresh ', np.round(self.ajuste2, 3), ' found ',
                      len(psds), ' mods')
                original.set_ydata(yoriginal)
                linha.set_ydata(infinito)
                modos.set_ydata(psds)
                modos.set_xdata(freqs)
                limite = self.ajuste2
                plt.draw()

        callback = Index()
        axprev = plt.axes([0.7, 0.05, 0.1, 0.075])
        axnext = plt.axes([0.81, 0.05, 0.1, 0.075])
        axup = plt.axes([0.2, 0.05, 0.1, 0.075])
        axdown = plt.axes([0.31, 0.05, 0.1, 0.075])
        bnext = Button(axnext, 'more')
        bnext.on_clicked(callback.next)
        bprev = Button(axprev, 'less')
        bprev.on_clicked(callback.prev)

        bup = Button(axup, 'up')
        bup.on_clicked(callback.up)

        down = Button(axdown, 'down')
        down.on_clicked(callback.down)
        plt.show()

        return x, yhat, limite

    f = np.loadtxt(str(path) + 'PS_' + str(kic) + '.txt')

    freq = f[:, 0]
    psd = f[:, 1]

    diff, primeiro, segundo = separacao(freq, psd)

    l = (primeiro < freq) & (freq < segundo)
    x, y = freq[l], psd[l]
    mod = GaussianModel(prefix='gauss_')
    pars = mod.guess(y, x=x)
    out = mod.fit(y, pars, x=x)
    data = out.fit_report()
    print(data)
    result = out.minimize(method='leastsq')
    ci = conf_interval(out,
                       result,
                       p_names=['gauss_center', 'gauss_amplitude'],
                       sigmas=(1, 2, 3, 5))
    report = lmfit.printfuncs.report_ci(ci)
    print(report)
    numax = np.array(out.params['gauss_center'])

    plt.annotate('',
                 xy=(primeiro, 0.8 * max(psd)),
                 xytext=(primeiro, max(psd)),
                 arrowprops=dict(shrink=0.025,
                                 alpha=0.8,
                                 fc=c1,
                                 ec='k',
                                 headwidth=5))
    plt.annotate('',
                 xy=(segundo, 0.8 * max(psd)),
                 xytext=(segundo, max(psd)),
                 arrowprops=dict(shrink=0.025,
                                 alpha=0.8,
                                 fc=c1,
                                 ec='k',
                                 headwidth=5))
    plt.annotate(r'$\nu_{max}$',
                 xy=(numax, 0.7 * max(psd)),
                 xytext=(numax, max(psd)),
                 arrowprops=dict(alpha=0.5,
                                 fc='r',
                                 ec='r',
                                 headwidth=2.4,
                                 width=2))
    plt.grid(alpha=0.4)
    plt.ylabel('PSD[Amplitude Units]')
    plt.xlabel('Frequency [$\mu$Hz]')
    plt.title('KIC {}'.format(kic))
    plt.plot(freq, psd, 'k-', lw=0.5, alpha=0.5)
    plt.tight_layout()
    plt.show()
    plt.clf()
    plt.close()

    deltanu = 135 * ((numax / 3050)**0.8)
    np.savetxt(str(path) + 'Data_asteroseismic',
               np.c_[deltanu, numax],
               header='DeltaNu and Numax')
    print('DeltaNu: {}\nNumax: {}'.format(deltanu, numax))

    j = peakutils.indexes(y, thres=0.015)
    inf = min(y[j])
    plt.axhline(inf, xmin=0.045, xmax=0.95, ls='--', color='blue', lw=0.8)

    x, yhat, tresh = filtragem(x, y, 25)
    print(tresh)
    j = peakutils.indexes(yhat, thres=tresh)
    inf = min(yhat[j])
    plt.axhline(inf, xmin=0.045, xmax=0.95, ls='--', color='blue', lw=0.8)
    plt.plot(freq[l], yhat, 'k-', lw=0.5)
    k = peakutils.indexes(yhat, thres=tresh)
    xx = x[k]
    yy = yhat[k]
    s = Lorentz1D(xx, yy, fwhm=0.025)
    freqs = np.array(s.amplitude)
    psds = np.array(s.x_0)
    erro = s.fwhm * psds
    dados_lorenz = np.c_[freqs, psds, erro]
    Freq_Region = np.c_[freq[l], yhat]
    np.savetxt(
        str(path) + str(kic) + '_data_modos_obs.txt',
        dados_lorenz,
        header=
        'all peaks (frequencies) of modes detected by lorentz profile, power, error'
    )
    np.savetxt(str(path) + 'High_Freq_Region_' + str(kic) + '.txt',
               Freq_Region,
               header='High-frequency region with filter Box1DKernel')
    for i in range(0, len(freqs)):
        plt.plot([freqs[i]], [psds[i]],
                 'x',
                 color='green',
                 alpha=0.4,
                 label=labels['lz'])
        labels['lz'] = "_nolegend_"
    plt.tight_layout()
    plt.legend(loc='best', scatterpoints=1)
    plt.savefig(str(path) + 'peak_ident_KIC' + str(kic) + '_oversample.png',
                dpi=170)
    idx = np.argmax(psd[l])
    print('Greater power in the Gaussian region: {}'.format(x[idx]))
    plt.show()
예제 #36
0
def main():

    # sets up command line argument parser
    args = init_argparse()

    #       The file format is:
    #           X values of size n
    #           Number of replicates
    #           A values
    #           Y values with n values on each line
    if (args.input):  # has i flag, read from specified file
        f = open(args.input, 'r')
        x = array([float(val) for val in f.readline().split()])
        num_rep = int(f.readline())
        a = array([float(val) for val in f.readline().split()])
        y = zeros((len(a), len(x)))
        stddev = zeros((len(a), len(x)))
        for i in range(len(a)):
            all_y = zeros((num_rep, len(x)))
            for j in range(num_rep):
                all_y[j] = array([float(val) for val in f.readline().split()])
            y[i] = average(all_y, axis=0)
            stddev[i] = std(all_y, axis=0)
        f.close()

    else:  #read from cmdline
        x = array([float(val) for val in raw_input().split()])
        num_rep = int(raw_input())
        a = array([float(val) for val in raw_input().split()])
        y = zeros((len(a), len(x)))
        stddev = zeros((len(a), len(x)))
        for i in range(len(a)):
            all_y = zeros((num_rep, len(x)))
            for j in range(num_rep):
                all_y[j] = array([float(val) for val in raw_input().split()])
            y[i] = average(all_y, axis=0)
            stddev[i] = std(all_y, axis=0)

    #adding parameters, initial guesses, and constraints
    params = Parameters()
    params.add('Kd', value=1, min=0)
    params.add('EmFRETMAX', value=1, min=0)
    return_data = {}
    ci = []
    emfretmax = []
    emfretmax_error = []

    #run fitting procedure and display results
    #this needs to be repeated for each A value. Note that A[i] corresponds to Y[i]
    #X is assumed to hold constant across all of these, so it remains unchanged across iterations
    for i in range(len(a)):
        result = minimize(residuals, params, args=(x, y[i], a[i]))
        ci.append(conf_interval(result, maxiter=1000))
        emfretmax.append(params['EmFRETMAX'].value)
        emfretmax_error.append(params['EmFRETMAX'].stderr)

        # generate table of results with tabulate
        return_data[a[i]] = [
            a[i],
            round(params['Kd'].value, 4),
            round(params['Kd'].stderr, 4),
            round(params['EmFRETMAX'].value, 4),
            round(params['EmFRETMAX'].stderr, 4),
            round(1 - result.residual.var() / var(y), 4)
        ]

        # plots data and curve on graph and displays if output file is given
        if (args.scatter):
            create_scatter(args.scatter, result, x, y[i], a[i], stddev[i], i,
                           args.unit)
    if (args.bar):
        create_bar(args.bar, emfretmax, emfretmax_error, a)

    print(json.dumps(return_data))
예제 #37
0
import lmfit
import numpy as np

x = np.linspace(0.3,10,100)
np.random.seed(0)

y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)

p = lmfit.Parameters()
p.add_many(('a', 0.1), ('b', 1))

def residual(p):
   return 1/(p['a']*x)+p['b']-y

minimizer = lmfit.Minimizer(residual, p)
out = minimizer.leastsq()
lmfit.printfuncs.report_fit(out.params)

ci = lmfit.conf_interval(minimizer, out)
lmfit.printfuncs.report_ci(ci)
예제 #38
0
# <examples/doc_confidence_basic.py>
import numpy as np

import lmfit

x = np.linspace(0.3, 10, 100)
np.random.seed(0)
y = 1/(0.1*x) + 2 + 0.1*np.random.randn(x.size)

pars = lmfit.Parameters()
pars.add_many(('a', 0.1), ('b', 1))


def residual(p):
    return 1/(p['a']*x) + p['b'] - y


mini = lmfit.Minimizer(residual, pars)
result = mini.minimize()

print(lmfit.fit_report(result.params))

ci = lmfit.conf_interval(mini, result)
lmfit.printfuncs.report_ci(ci)
# <end examples/doc_confidence_basic.py>
예제 #39
0
    plot_q_u_p(qspec, sqspec, uspec, suspec, p, sp, thetp, sthetp, polarnu, polarwave, plotout, qmjd, oldq, oldu, restnu)
    normflux = 10.0**np.fix(np.log10(np.mean(polarflux)))
    normnu = 10.0**np.fix(np.log10(np.mean(polarnu)))

#################### Fit the polarized flux with a power-law####################
##### use the lmfit package to determine alpha##################################
    plaw_params = Parameters() 
    initialnormal = np.float64(np.median(polarflux)*np.median(polarnu))/normflux
    plaw_params.add('norm', value=initialnormal, min=np.float64(0.0))
    plaw_params.add('alpha', value=np.float64(1.5))
    output = minimize(find_plaw_resid, plaw_params, args=(polarnu, polarflux/normflux, 4.0*spflux/normflux))
    model = find_plaw(plaw_params, polarnu)*normflux
    lmfit.printfuncs.report_fit(output.params)
    name='justplaw'
    plot_polarized_flux_fit(polarflux, spflux, polarnu, qmjd, plotout, restnu, oldq, oldu, nuorig, model, name)
    ci = lmfit.conf_interval(output, maxiter=1000)
#################################################################################    

########### Fit the polarized flux with a modified power-law###################
##########  F = (A * nu + B) * Norm * nu^(-alpha)##############################
########## Start with fitting the polarization with P = (A nu + B)##############
    linepol_params = Parameters()
    linepol_params.add('slope', value = 1/gom(np.median(restnu)))
    linepol_params.add('intercept', value = 1.0)
    linepol_output = minimize(find_line_resid, linepol_params, args=(polarnu, p, sp))
    linepol_model = find_line(linepol_params, polarnu)
    ### Use the Slope and Intercept as fixed paramters for the modified power-law
    modified_plaw_params = Parameters()
    minslope = linepol_params['slope'].value - 2*linepol_params['slope'].stderr
    maxslope = linepol_params['slope'].value + 2*linepol_params['slope'].stderr
    minint = linepol_params['intercept'].value - 2*linepol_params['intercept'].stderr
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)

###############################################################################
# Set-up the minimizer and perform the fit using ``leastsq`` algorithm, and
# show the report:
mini = Minimizer(residual, fit_params, fcn_args=(x, ), fcn_kws={'data': data})
out = mini.leastsq()

fit = residual(out.params, x)
report_fit(out)

###############################################################################
# Calculate the confidence intervals for parameters and display the results:
ci, tr = conf_interval(mini, out, trace=True)

report_ci(ci)

names = out.params.keys()
i = 0
gs = plt.GridSpec(4, 4)
sx = {}
sy = {}
for fixed in names:
    j = 0
    for free in names:
        if j in sx and i in sy:
            ax = plt.subplot(gs[i, j], sharex=sx[j], sharey=sy[i])
        elif i in sy:
            ax = plt.subplot(gs[i, j], sharey=sy[i])
예제 #41
0
def process(filename, post, post2_start, fit=True):
    kin = [-600, post[1]]
    fig_width = 14
    fs = 18
    meth = 'em'  # by default use data generated from this fiting method

    def savefig(title, **kwargs):
        plt.savefig("figures/%s %s.png" % (filename, title))

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Load Data
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    methods, windows, step = _get_methods_windows_step(filename)
    bursts = load_bursts_data(filename, windows[1], step)
    bursts_pre = bursts[bursts.tstop < 0].copy()
    bursts_post = bursts[(bursts.tstart > post[0])
                         & (bursts.tstart < post[1])].copy()
    bursts_post2 = bursts[bursts.tstart > post2_start].copy()
    params_all = load_fit_data(filename)
    params, params_pre, params_post, params_post2 = partition_fit_data(
        params_all, kin, post, post2_start)
    p = {window: params[meth, window, step] for window in windows}

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Number of Bursts
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot(bursts.tstart, bursts.num_bursts)
    ax.axvline(0, color='k', ls='--')
    ax.axvline(post[1], color='k', ls='--')
    ax.axvline(post2_start, color='k', ls='--')
    title = 'Number of Bursts - Full measurement'
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Burst Duration
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot(bursts.tstart, bursts.burst_width)
    ax.axvline(0, color='k', ls='--')
    ax.axvline(post[1], color='k', ls='--')
    ax.axvline(post2_start, color='k', ls='--')
    title = 'Burst Duration - Full measurement'
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Number of Bursts in PRE, POST, POST2 time ranges
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    for nb, label in zip((bursts_pre, bursts_post, bursts_post2),
                         ('PRE', 'POST', 'POST2')):
        slope, intercept, r_value, p_value, std_err = linregress(
            nb.tstart, nb.num_bursts)
        y_model = nb.tstart * slope + intercept
        nb_corr = (nb.num_bursts - y_model) + nb.num_bursts.mean()
        nb['num_bursts_corr'] = nb_corr
        nb['linregress'] = y_model

        nbc = nb.num_bursts_corr
        nbm = nb.num_bursts.mean()
        print(
            "%5s Number of bursts (detrended): %7.1f MEAN, %7.1f VAR, %6.3f VAR/MEAN"
            % (label, nbm, nbc.var(), nbc.var() / nbm))
        fig, ax = plt.subplots(1, 2, figsize=(fig_width, 4))
        ax[0].plot(nb.tstart, nb.num_bursts)
        ax[0].plot(nb.tstart, nb.linregress, 'r')
        ax[1].plot(nb.tstart, nb.num_bursts_corr)
        ax[1].plot(nb.tstart, np.repeat(nbm, nb.shape[0]), 'r')
        title = 'Number of bursts - %s-kinetics' % label
        fig.text(0.35, 0.95, title, fontsize=fs)
        savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Full Kinetic Curve (Population Fraction)
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot('kinetics',
            data=params_all[meth, windows[0], step],
            marker='h',
            lw=0,
            color='gray',
            alpha=0.2)
    ax.plot('kinetics',
            data=params_all[meth, windows[1], step],
            marker='h',
            lw=0,
            alpha=0.5)
    ax.axvline(0, color='k', ls='--')
    ax.axvline(post[1], color='k', ls='--')
    ax.axvline(post2_start, color='k', ls='--')
    title = 'Population Fraction - Full measurement'
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Kinetic Curve Auto-Correlation
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    w = windows[0]
    d = np.array(p[w].kinetics.loc[post[0]:post[1]])
    delta_t_max = 600  # seconds
    corr, t_corr = autocorrelation(d, t_step=step, delta_t_max=delta_t_max)

    fig, ax = plt.subplots(figsize=(fig_width, 3))
    ax.plot(t_corr, corr, '-o')
    ax.set_xlabel(r'$\Delta t$ (seconds)')
    title = 'Kinetic Curve Auto-Correlation - window = %d s' % w
    ax.set_title(title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Kinetic Curve in Stationary Time Ranges
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    for px, label in zip([params_pre, params_post, params_post2],
                         ('PRE', 'POST', 'POST2')):
        fig, ax = plt.subplots(1, 2, figsize=(fig_width, 4))
        ax[0].plot('kinetics',
                   data=px[meth, windows[0], step],
                   marker='h',
                   lw=0,
                   color='gray',
                   alpha=0.2)
        ax[0].plot('kinetics',
                   data=px[meth, windows[1], step],
                   marker='h',
                   lw=0,
                   alpha=0.5)
        ax[0].plot('kinetics_linregress',
                   data=px[meth, windows[1], step],
                   color='r')
        s1, s2 = slice(None, None,
                       windows[0] // step), slice(None, None,
                                                  windows[1] // step)
        ax[1].plot(px[meth, windows[0], step].index[s1],
                   px[meth, windows[0], step].kinetics[s1],
                   marker='h',
                   lw=0,
                   color='gray',
                   alpha=0.2)
        ax[1].plot(px[meth, windows[1], step].index[s2],
                   px[meth, windows[1], step].kinetics[s2],
                   marker='h',
                   lw=0,
                   alpha=1)
        ax[1].plot('kinetics_linregress',
                   data=px[meth, windows[1], step],
                   color='r')
        print('%5s Kinetics 30s:     %.3f STD, %.3f STD detrended.' %
              (label, (100 * px[meth, windows[1], step].kinetics).std(),
               (100 * px[meth, windows[1], step].kinetics_linregress).std()))
        title = 'Population Fraction - %s-kinetics' % label
        fig.text(0.40, 0.95, title, fontsize=fs)
        savefig(title)

    if not fit:
        return None, params
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Exploratory Fit
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    #method = 'nelder'
    decimation = 5

    t0_vary = False
    model = models.factory_model_exp(t0_vary=t0_vary)
    rest0f, tau = {}, {}
    for window, px in p.items():
        #____ = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        rest0f[window] = resx
        tau[window] = resx.best_values['tau']
    tau0, tau1 = tau[windows[0]], tau[windows[1]]
    print(
        ' FIT  Simple Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%'
        % (t0_vary, windows[0], tau0, windows[1], tau1, 100 *
           (tau0 - tau1) / tau0),
        flush=True)

    t0_vary = False
    reswt0f, tauw = {}, {}
    for window, px in p.items():
        modelw1 = models.factory_model_expwin(tau=150,
                                              t_window=window,
                                              decimation=decimation,
                                              t0_vary=t0_vary)
        #____ = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        reswt0f[window] = resx
        tauw[window] = resx.best_values['tau']
    tauw0, tauw1 = tauw[windows[0]], tauw[windows[1]]
    print(
        ' FIT  Window Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%'
        % (t0_vary, windows[0], tauw0, windows[1], tauw1, 100 *
           (tauw0 - tauw1) / tauw0),
        flush=True)

    t0_vary = True
    model = models.factory_model_exp(t0_vary=t0_vary)
    res, tau, ci = {}, {}, {}
    for window, px in p.items():
        #____ = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = model.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        res[window] = resx
        tau[window] = resx.best_values['tau']
        ci[window] = lmfit.conf_interval(resx, resx)
    tau0, tau1 = tau[windows[0]], tau[windows[1]]
    print(
        ' FIT  Simple Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%'
        % (t0_vary, windows[0], tau0, windows[1], tau1, 100 *
           (tau0 - tau1) / tau0),
        flush=True)

    t0_vary = True
    resw, tauw, ciw = {}, {}, {}
    for window, px in p.items():
        modelw1 = models.factory_model_expwin(tau=150,
                                              t_window=window,
                                              decimation=decimation,
                                              t0_vary=t0_vary)
        #____ = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False, method=method)
        resx = modelw1.fit(np.array(px.kinetics), t=px.tstart, verbose=False)
        resw[window] = resx
        tauw[window] = resx.best_values['tau']
        ciw[window] = lmfit.conf_interval(resx, resx)
    tauw0, tauw1 = tauw[windows[0]], tauw[windows[1]]
    print(
        ' FIT  Window Exp (t0_vary=%s):  tau(w=%ds) = %.1fs  tau(w=%ds) = %.1fs  Delta = %.1f%%'
        % (t0_vary, windows[0], tauw0, windows[1], tauw1, 100 *
           (tauw0 - tauw1) / tauw0),
        flush=True)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Kinetic Curve During Transient
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    t = params[meth, windows[0], step].tstart
    fig, ax = plt.subplots(1, 2, figsize=(fig_width, 6))
    ax[0].plot('tstart',
               'kinetics',
               data=params[meth, windows[0], step],
               marker='h',
               lw=0,
               color='gray',
               alpha=0.2)
    ax[0].plot('tstart',
               'kinetics',
               data=params[meth, windows[1], step],
               marker='h',
               lw=0,
               alpha=0.5)
    ax[0].plot(t, models.expwindec_func(t, **resw[windows[1]].best_values),
               'm')
    ax[0].set_xlim(kin[0], kin[1])

    s1, s2 = slice(None, None,
                   windows[0] // step), slice(None, None, windows[1] // step)
    ax[1].plot(params[meth, windows[0], step].index[s1],
               params[meth, windows[0], step].kinetics[s1],
               marker='h',
               lw=0,
               color='gray',
               alpha=0.2)
    ax[1].plot(params[meth, windows[1], step].index[s2],
               params[meth, windows[1], step].kinetics[s2],
               marker='h',
               lw=0,
               alpha=1)
    ax[1].plot(t, models.expwindec_func(t, **resw[windows[1]].best_values),
               'm')
    title = 'Population Fraction - kinetics'
    fig.text(0.40, 0.95, title, fontsize=fs)
    savefig(title)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Plot Fitted Kinetic Curves
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fitcycler = cycler('color', ('k', 'm', red))
    datacycler = cycler('color', ('grey', blue, green)) + cycler(
        'alpha', (0.2, 0.5, 0.5))
    fig, ax = plt.subplots(2, 1, figsize=(fig_width, 8), sharex=True)

    tau_str = r'$\tau_{%ds} = %.1f s (%.1f, %.1f)$'
    for i, ((w, px), fitsty,
            datsty) in enumerate(zip(sorted(p.items()), fitcycler,
                                     datacycler)):
        if i == 2: break
        ax[0].plot('tstart', 'kinetics', 'o', data=px, label='', **datsty)
        label = tau_str % (w, tau[w], ci[w]['tau'][2][1], ci[w]['tau'][4][1])
        ax[0].plot(px.tstart,
                   models.exp_func(px.tstart, **res[w].best_values),
                   label=label,
                   **fitsty)
    ax[0].legend(loc='lower right', fontsize=fs)
    w = windows[1]
    ax[1].plot(p[w].tstart,
               p[w].kinetics -
               models.exp_func(p[w].tstart, **res[w].best_values),
               'o',
               color=purple)
    ax[1].set_title('Residuals - $\chi_\mathrm{red}^2 = %.4f \, 10^{-3}$' %
                    (res[w].redchi * 1e3),
                    fontsize=fs)
    ax[0].set_xlim(kin[0], kin[1])
    title = 'Kinetics Fit - Simple Exponential (t0_vary=%s)' % t0_vary
    ax[0].set_title(title, fontsize=fs)
    savefig(title)

    fig, ax = plt.subplots(2, 1, figsize=(fig_width, 8), sharex=True)
    for i, ((w, px), fitsty,
            datsty) in enumerate(zip(sorted(p.items()), fitcycler,
                                     datacycler)):
        if i == 2: break
        ax[0].plot('tstart', 'kinetics', 'o', data=px, label='', **datsty)
        label = tau_str % (w, tauw[w], ciw[w]['tau'][2][1],
                           ciw[w]['tau'][4][1])
        ax[0].plot(px.tstart,
                   models.expwindec_func(px.tstart, **resw[w].best_values),
                   label=label,
                   **fitsty)
    ax[0].legend(loc='lower right', fontsize=fs)
    w = windows[1]
    ax[1].plot(p[w].tstart,
               p[w].kinetics -
               models.exp_func(p[w].tstart, **res[w].best_values),
               'o',
               color=purple)
    ax[1].set_title('Residuals - $\chi_\mathrm{red}^2 = %.4f \, 10^{-3}$' %
                    (resw[w].redchi * 1e3),
                    fontsize=fs)
    ax[0].set_xlim(kin[0], kin[1])
    title = 'Kinetics Fit - Integrated Exponential (t0_vary=%s)' % t0_vary
    ax[0].set_title(title, fontsize=fs)
    savefig(title)
    return (res, resw, rest0f, reswt0f, ci, ciw), params
예제 #42
0
# create Minimizer
mini = lmfit.Minimizer(residual, p, nan_policy='omit')

# first solve with Nelder-Mead
out1 = mini.minimize(method='Nelder')

# then solve with Levenberg-Marquardt using the
# Nelder-Mead solution as a starting point
out2 = mini.minimize(method='leastsq', params=out1.params)

lmfit.report_fit(out2.params, min_correl=0.5)

ci, trace = lmfit.conf_interval(mini,
                                out2,
                                sigmas=[1, 2],
                                trace=True,
                                verbose=False)
lmfit.printfuncs.report_ci(ci)

plot_type = 2

if plot_type == 0:
    plt.plot(x, y)
    plt.plot(x, residual(out2.params) + y)

elif plot_type == 1:
    cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a2', 't2', 30, 30)
    plt.contourf(cx, cy, grid, np.linspace(0, 1, 11))
    plt.xlabel('a2')
    plt.colorbar()
        -(x - 0.1) / p['t2']) - y


# create Minimizer
mini = lmfit.Minimizer(residual, p, nan_policy='propagate')

# first solve with Nelder-Mead algorithm
out1 = mini.minimize(method='Nelder')

# then solve with Levenberg-Marquardt using the
# Nelder-Mead solution as a starting point
out2 = mini.minimize(method='leastsq', params=out1.params)

lmfit.report_fit(out2.params, min_correl=0.5)

ci, trace = lmfit.conf_interval(mini, out2, sigmas=[1, 2], trace=True)
lmfit.printfuncs.report_ci(ci)

# plot data and best fit
plt.figure()
plt.plot(x, y, 'b')
plt.plot(x, residual(out2.params) + y, 'r-')

# plot confidence intervals (a1 vs t2 and a2 vs t2)
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a1', 't2', 30, 30)
ctp = axes[0].contourf(cx, cy, grid, np.linspace(0, 1, 11))
fig.colorbar(ctp, ax=axes[0])
axes[0].set_xlabel('a1')
axes[0].set_ylabel('t2')
예제 #44
0
def residual(p):
   return p['a1']*np.exp(-x/p['t1']) + p['a2']*np.exp(-(x-0.1)/p['t2'])-y

# create Minimizer
mini = lmfit.Minimizer(residual, p)

# first solve with Nelder-Mead
out1 = mini.minimize(method='Nelder')

# then solve with Levenberg-Marquardt using the
# Nelder-Mead solution as a starting point
out2 = mini.minimize(method='leastsq', params=out1.params)

lmfit.report_fit(out2.params, min_correl=0.5)

ci, trace = lmfit.conf_interval(mini, out2, sigmas=[0.68,0.95],
                                trace=True, verbose=False)
lmfit.printfuncs.report_ci(ci)

plot_type = 2
if plot_type == 0:
    plt.plot(x, y)
    plt.plot(x, residual(out2.params)+y )

elif plot_type == 1:
    cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a2','t2',30,30)
    plt.contourf(cx, cy, grid, np.linspace(0,1,11))
    plt.xlabel('a2')
    plt.colorbar()
    plt.ylabel('t2')

elif plot_type == 2:
예제 #45
0
import lmfit
import numpy as np

x = np.linspace(0.3, 10, 100)
np.random.seed(0)

y = 1 / (0.1 * x) + 2 + 0.1 * np.random.randn(x.size)

p = lmfit.Parameters()
p.add_many(('a', 0.1), ('b', 1))


def residual(p):
    return 1 / (p['a'] * x) + p['b'] - y


minimizer = lmfit.Minimizer(residual, p)
out = minimizer.leastsq()
lmfit.printfuncs.report_fit(out.params)

ci = lmfit.conf_interval(minimizer, out)
lmfit.printfuncs.report_ci(ci)
예제 #46
0
np.save('2014_04_13_freq.npy',y)
## assume quantum projection noise
yerr = np.arcsin(1/np.sqrt(4*100)/0.35)/(2*np.pi*ramsey_time)

params = lmfit.Parameters()

params.add('A', value = 0, vary = True) ##cos ### -3 cXZ sin 2 chi: 7 +- 23 mHz
params.add('B', value = 0, vary = True) ##sin ### -3 cYZ sin 2 chi: 32 +- 56 mHz
params.add('C', value = 0, vary = True) ##cos2 ### -1.5 (cXX-cYY) sin^2 chi: 15 +- 22 mHz
params.add('D', value = 0, vary = True) ##sin2 ### -3 cXY sin^2 chi: 8 +- 20 mHz
params.add('offset', value = 0.069818)

result = lmfit.minimize(cosine_fit, params, args = (x, y, yerr))

fit_values  = y + result.residual

lmfit.report_errors(params)

print "Reduced chi-squared = ", result.redchi

ci = lmfit.conf_interval(result)

lmfit.report_ci(ci)

x_plot = np.linspace(x.min(),x.max()+100000,1000)

figure = pyplot.figure(1)
figure.clf()
pyplot.plot(x,y,'o')
pyplot.plot(x_plot,cosine_model(params,x_plot),linewidth = 3.0)
pyplot.show()
예제 #47
0
def fit_plots(x, y, minimizer, minimizer_result, residual,
              model, title='',
              contour_level=0.6827, cmap=plt.cm.coolwarm,
              xlabel='x', ylabel='y', xlim=None, ylim=None):

    params = minimizer_result.params

    ci, trace = lmfit.conf_interval(minimizer, minimizer_result, trace=True)
    param_names = list(params.keys())

    figs = []

    ##################################
    # Figure 1: Fit Parameters as text
    ##################################

    fig = plt.figure()

    s = '%s\n\n' % model
    if title:
        s += title + '\n'
    for ndx, k in enumerate(param_names):
        s += '%s: %.3f ± %.3f' % (k, minimizer_result.params[k].value, minimizer_result.params[k].stderr)
        s += '\n'

    plt.text(0.5, 0.5, s, fontsize=12, ha='center', va='center')
    plt.axis('off')

    figs.append(fig)

    #############################
    # Figure 2: Fit and residuals
    #############################

    fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1]})

    ax1.plot(x, y, '.')
    xs = np.linspace(x[0], x[-1])
    ax1.plot(xs, residual(minimizer_result.params, xs), '-')
    ax1.set_ylabel(ylabel)
    if ylim:
        ax1.set_ylim(ylim)

    r = residual(minimizer_result.params, x, data=y)
    ax2.plot(x, r)
    ax2.axhline(y=0, color='k', linestyle=':')
    mx = np.max(np.abs(r))
    ax2.set_ylim([-mx, mx])
    ax2.set_ylabel('R')
    ax2.set_xlabel(xlabel)

    if xlim:
        ax2.set_xlim(xlim)

    figs.append(fig)

    #############################
    # Figure 3: Probability plots
    #############################

    contours = {}

    fig, axs = plt.subplots(len(param_names), len(param_names))

    for ndx1 in range(len(param_names)):
        for ndx2 in range(len(param_names)):
            ax = axs[ndx2][ndx1]

            if ndx1 > ndx2:
                ax.set_axis_off()
                continue

            if ndx1 == ndx2:
                x = trace[param_names[ndx1]][param_names[ndx1]]
                y = trace[param_names[ndx1]]['prob']

                t, s = np.unique(x, True)
                f = interp1d(t, y[s], 'slinear')
                xn = np.linspace(x.min(), x.max(), 50)
                ax.plot(xn, f(xn), 'g', lw=1)

                contours[ndx1] = (x, y)

            else:
                x, y, m = lmfit.conf_interval2d(minimizer, minimizer_result, param_names[ndx1], param_names[ndx2], 20, 20)
                ax.contourf(x, y, m, np.linspace(0, 1, 10), cmap=cmap)

                ch = QuadContourGenerator.from_rectilinear(x, y, m, numpy_formatter)

                contours[(ndx1, ndx2)] = ch.contour(contour_level)

            if ndx1 == 0:
                if ndx2 > 0:
                    ax.set_ylabel(param_names[ndx2])
                else:
                    ax.set_ylabel('prob')
            else:
                ax.set_yticks([])

            if ndx2 == len(param_names) - 1:
                ax.set_xlabel(param_names[ndx1])
            else:
                ax.set_xticks([])

    figs.append(fig)

    return figs, contours
예제 #48
0
 def conf_int(self):
     """Return confidence intervals."""
     result = lmfit.conf_interval(self.lm_model, self._result)
     return result
예제 #49
0
def fit_beta_model_joint(r, sb_src, sb_src_err, instruments, theta, energy, results_pickle=None):
    """
    Fit a beta x psf model to any combination of instruments via joint
    likelihood

    Arguments:
    """
    # settings
    APPLY_PSF = True
    DO_ZERO_PAD = True
    DO_FIT = True
    # FIT_METHOD = 'simplex'
    FIT_METHOD = 'leastsq'     # 'leastsq' - Levemberg-Markquardt,
                                 # 'simplex' - simplex
    CALC_1D_CI = False           # in most cases standard error is good
                                # enough, this is not needed then
    CALC_2D_CI = False
    PLOT_PROFILE = True
    PRINT_FIT_DIAGNOSTICS = True

    ######################################################################
    # modelling is done in 2D and then projected - setup here the 2D
    # parameters

    # FIXME:
    # 2013-07-11 - lot of vars are deprecated, but it works ok, just
    # redundant

    size = 2.0 * r.max()
    xsize = size
    ysize = xsize
    xcen = xsize/2
    ycen = ysize/2
    # imsize = input_im.shape         # FIXME: is this necessary? I could just use it inside the model
    imsize = (size, size)         # FIXME: is this necessary? I could just use it inside the model

    xsize_obj = xsize # 100             # if running t1.fits set to 100 else xsize
    ysize_obj = xsize_obj
    xcen_obj = xsize_obj / 2
    ycen_obj = ysize_obj / 2
    r_aper = xsize_obj  / 2        # aperture for the fitting

    # pre-calculate distmatrix for speedup - it is same for all
    # instruments
    distmatrix = distance_matrix(zeros((imsize[0]-2, imsize[1]-2)), xcen_obj, ycen_obj).astype(int) # need int for bincount

    # r contains the start of the innermost bin for integration, but not needed for plotting
    rplt = r[1:]

    ######################################################################
    # scale the data - not really necessary? do val-min/(max-min)

    # scale_sb_src = {}
    # scale_sb_src_err = {}
    ndata = 0
    psf_dict = {}

    for instrument in instruments:
        # scale_sb_src[instrument] = median(sb_src[instrument])
        # sb_src[instrument] = sb_src[instrument] / scale_sb_src[instrument]
        # sb_src_err[instrument] = sb_src_err[instrument] / scale_sb_src[instrument]
        ndata += len(sb_src[instrument])

        # calculate the PSF
        # could be changed to the newer function make_2d_king but need small changes
        # to the centering/size - not worth at the moment
        psf_dict[instrument] = make_2d_king_old(imsize, xcen, ycen, instrument, theta[instrument], energy)


    ######################################################################
    # init beta model

    pars = lm.Parameters()
    pars.add('rcore', value=5.0, vary=True, min=0.05, max=80.0) # [rcore]
    pars.add('beta', value=0.67, vary=True, min=0.1, max=2.0)
    pars.add('xcen', value=xcen_obj, vary=False)   # fitting not fully suported yet [imcoord]
    pars.add('ycen', value=ycen_obj, vary=False)   # fitting not fully suported yet [imcoord]

    for instrument in instruments:
        pars.add('norm_'+instrument, value=mean(sb_src[instrument]),
                 vary=True, min=min(sb_src[instrument]),
                 max=sum(abs(sb_src[instrument])))


    nonfit_args = (imsize, xsize_obj, ysize_obj, distmatrix, instruments,
                   psf_dict, APPLY_PSF, DO_ZERO_PAD, r, sb_src,
                   sb_src_err)

    # fit stop criteria
    if FIT_METHOD == 'leastsq':
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+0} # debug set; quickest
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+4} # debug set; some evol
        leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': int(1.0e+7)}
        # leastsq_kws={'xtol': 1.0e-8, 'ftol': 1.0e-8, 'maxfev': 1.0e+9}

    if FIT_METHOD == 'simplex':
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+0} # debug set; quickest
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+4} # debug set; some evol
        leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': int(1.0e+7)}
        # leastsq_kws={'xtol': 1.0e-8, 'ftol': 1.0e-8, 'maxfun': 1.0e+9}

    ######################################################################
    # do the fit: beta

    if DO_FIT:
        print "starting beta fit"
        t1 = time.clock()

        result = lm.minimize(beta_psf_2d_lmfit_profile_joint,
                             pars,
                             args=nonfit_args,
                             method=FIT_METHOD,
                             **leastsq_kws)

        t2 = time.clock()

        print
        print
        print "fitting took: ", t2-t1, " s"
        print
        print

        ######################################################################
        # scale the data back

        # for instrument in instruments:
        #     sb_src[instrument] = sb_src[instrument] * scale_sb_src[instrument]
        #     sb_src_err[instrument] = sb_src_err[instrument] * scale_sb_src[instrument]

        #     # scale also the fitted norm
        #     pars['norm_'+instrument].value = pars['norm_'+instrument].value * scale_sb_src[instrument]
        #     pars['norm_'+instrument].stderr = pars['norm_'+instrument].stderr * scale_sb_src[instrument]
        #     pars['norm_'+instrument].max = pars['norm_'+instrument].max * scale_sb_src[instrument]
        #     pars['norm_'+instrument].min = pars['norm_'+instrument].min * scale_sb_src[instrument]

        ######################################################################
        # get the output model

        (r_model, profile_norm_model) = \
            beta_psf_2d_lmfit_profile_joint(pars, imsize,
                                            xsize_obj, ysize_obj,
                                            distmatrix,
                                            instruments,
                                            psf_dict,
                                            APPLY_PSF, DO_ZERO_PAD)

        ######################################################################
        # save structures

        if results_pickle:
            outstrct = lmfit_result_to_dict(result, pars)

            with open(results_pickle, 'wb') as output:
                pickle.dump(outstrct, output, pickle.HIGHEST_PROTOCOL)

                print "results written to:: ", results_pickle

        ######################################################################
        # output

        if PRINT_FIT_DIAGNOSTICS:
            print_fit_diagnostics(result, t2-t1, ndata, leastsq_kws)

        # print_result_tab(pars_true, pars)
        lm.printfuncs.report_errors(result.params)

        with open(results_pickle+'.txt', 'w') as f:
            sys.stdout = f

            if PRINT_FIT_DIAGNOSTICS:
                print_fit_diagnostics(result, t2-t1, ndata, leastsq_kws)

            print
            print
            lm.printfuncs.report_errors(result.params)
            print
            print

            sys.stdout = sys.__stdout__

        print
        print "fitting subroutine done!"

    ######################################################################
    # plot beta fit and detcprofiles

    if DO_FIT and PLOT_PROFILE:
        for instrument in instruments:
            output_figure = results_pickle+'.'+instrument+'.beta_psf.png'

            print "result plot :: ", output_figure

            # FIXME: implement plotter for joint fits
            plot_data_model_resid(rplt, sb_src[instrument],
                              r_model, profile_norm_model[instrument],
                              output_figure, sb_src_err[instrument])

    ######################################################################
    # FIXME: not yet ported, confidence intervals

    if DO_FIT and CALC_1D_CI:
        print "Calculating 1D confidence intervals"
        # sigmas = [0.682689492137, 0.954499736104, 0.997300203937]
        # sigmas = [0.682689492137, 0.954499736104]
        # sigmas = [0.997300203937]
        sigmas = [0.954499736104]
        # sigmas = [0.682689492137]
        # ci_pars = ['rc', 'beta']
        # ci_pars = ['rc']
        # ci_pars = ['norm_pn', 'rc']
        ci_pars = ['norm_'+instruments[0]]

        t1 = time.clock()
        ci, trace = lm.conf_interval(result, p_names=ci_pars, sigmas=sigmas,
                                     trace=True, verbose=True, maxiter=1e3)

        t2 = time.clock()

        # save to file
        with open(results_pickle+'.ci', 'wb') as output:
            pickle.dump(ci, output, pickle.HIGHEST_PROTOCOL)

        print
        print "Confidence interval calculation took : ", t2 - t1

        lm.printfuncs.report_ci(ci)

    return 0
예제 #50
0
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)

mini = Minimizer(residual, fit_params, fcn_args=(x,),
                 fcn_kws={'data':data})
out = mini.leastsq()

fit = residual(out.params, x)

print( ' N fev = ', out.nfev)
print( out.chisqr, out.redchi, out.nfree)

report_fit(out)
#ci=calc_ci(out)

ci, tr = conf_interval(mini, out, trace=True)
report_ci(ci)

if HASPYLAB:
    names=out.params.keys()
    i=0
    gs=pylab.GridSpec(4,4)
    sx={}
    sy={}
    for fixed in names:
        j=0
        for free in names:
            if j in sx and i in sy:
                ax=pylab.subplot(gs[i,j],sharex=sx[j],sharey=sy[i])
            elif i in sy:
                ax=pylab.subplot(gs[i,j],sharey=sy[i])
예제 #51
0
def fit_v06_model(r, sb_src, sb_src_err, instrument, theta, energy, results_pickle=None):
    """
    Fit of v06 model with psf convolution
    """
    # settings
    APPLY_PSF = True
    DO_ZERO_PAD = True
    DO_FIT = True
    FIT_METHOD = 'simplex'
    # FIT_METHOD = 'leastsq'     # 'leastsq' - Levemberg-Markquardt,
                                 # 'simplex' - simplex
    CALC_1D_CI = True           # in most cases standard error is good
                                # enough, this is not needed then
    CALC_2D_CI = False
    PLOT_PROFILE = True
    PRINT_FIT_DIAGNOSTICS = True

    ######################################################################
    # modelling is done in 2D and then projected - setup here the 2D
    # parameters

    size = 2.0 * r.max()
    xsize = size
    ysize = xsize
    xcen = xsize/2
    ycen = ysize/2
    # imsize = input_im.shape     # FIXME: is this necessary? I could just use it inside the model
    imsize = (size, size)         # FIXME: is this necessary? I could just use it inside the model

    xsize_obj = xsize # 100             # if running t1.fits set to 100 else xsize
    ysize_obj = xsize_obj
    xcen_obj = xsize_obj / 2
    ycen_obj = ysize_obj / 2
    r_aper = xsize_obj  / 2        # aperture for the fitting

    ######################################################################
    # init model

    n0 = 1.0
    rc = 20.0
    beta = 4.0/3.0
    rs = 20.0
    alpha = 1.5
    gamma = 3.0
    epsilon = 1.5

    # rmax = 2*r500_pix
    r500_pix = r.max()
    ndata = len(sb_src)

    # v06 pars lmfit structure
    pars = lm.Parameters()
    pars.add('n0_'+instrument, value=n0, vary=True, min=1.0e-9, max=1.0e3)
    pars.add('rc'      , value=rc, vary=True, min=0.05, max=r.max())
    pars.add('beta'    , value=beta, vary=True, min=0.05, max=2.0)
    pars.add('rs'      , value=rs, vary=True, min=0.05, max=2*r.max())
    pars.add('alpha'   , value=alpha, vary=True, min=0.01, max=3.0)
    pars.add('epsilon' , value=epsilon, vary=True, min=0.0, max=5.0)
    pars.add('gamma'   , value=gamma, vary=False)

    # set the ancilarry parameters
    # +1 bc of the central divergence
    data = empty(imsize)
    distmatrix_input = distance_matrix(data, xcen_obj, ycen_obj).astype('int') + 1
    bgrid = unique(distmatrix_input.flat)

    ######################################################################
    # do the fit: v06

    nonfit_args = (distmatrix_input, bgrid, r500_pix, instrument, theta, energy,
                   xcen_obj, ycen_obj, r, sb_src, sb_src_err)

    # fit stop criteria
    if FIT_METHOD == 'leastsq':
        leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+0} # debug set; quickest
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+4} # debug set; some evol
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfev': 1.0e+7}
        # leastsq_kws={'xtol': 1.0e-8, 'ftol': 1.0e-8, 'maxfev': 1.0e+9}

    if FIT_METHOD == 'simplex':
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+0} # debug set; quickest
        # leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+4} # debug set; some evol
        leastsq_kws={'xtol': 1.0e-7, 'ftol': 1.0e-7, 'maxfun': 1.0e+7}
        # leastsq_kws={'xtol': 1.0e-8, 'ftol': 1.0e-8, 'maxfun': 1.0e+9}

    ######################################################################
    # do the actual fitting

    if DO_FIT:
        print "starting v06 fit with method :: ", FIT_METHOD
        t1 = time.clock()

        result = lm.minimize(v06_psf_2d_lmfit_profile,
                             pars,
                             args=nonfit_args,
                             method=FIT_METHOD,
                             **leastsq_kws)

        t2 = time.clock()
        print "fitting took: ", t2-t1, " s"

        # get the output model
        (r_model, profile_norm_model) = v06_psf_2d_lmfit_profile(pars,
                                                                 distmatrix_input,
                                                                 bgrid,
                                                                 r500_pix,
                                                                 instrument, theta, energy,
                                                                 xcen_obj,
                                                                 ycen_obj)

        ######################################################################
        # save structures

        if results_pickle:
            outstrct = lmfit_result_to_dict(result, pars)

            with open(results_pickle, 'wb') as output:
                pickle.dump(outstrct, output, pickle.HIGHEST_PROTOCOL)

                print "results written to:: ", results_pickle

        ######################################################################
        # output

        if PRINT_FIT_DIAGNOSTICS:
            print_fit_diagnostics(result, t2-t1, ndata, leastsq_kws)

        # print_result_tab(pars_true, pars)
        lm.printfuncs.report_errors(result.params)

        with open(results_pickle+'.txt', 'w') as f:
            sys.stdout = f

            if PRINT_FIT_DIAGNOSTICS:
                print_fit_diagnostics(result, t2-t1, ndata, leastsq_kws)

            print
            print
            lm.printfuncs.report_errors(result.params)
            print
            print

            sys.stdout = sys.__stdout__

        print
        print "fitting subroutine done!"

    ######################################################################
    # plot v06 fit and data profiles

    if DO_FIT and PLOT_PROFILE:
        output_figure = results_pickle+'.'+instrument+'.v06_psf.png'

        print "result plot :: ", output_figure

        # FIXME: implement plotter for joint fits
        plot_data_model_resid(r, sb_src,
                              r_model, profile_norm_model,
                              output_figure, sb_src_err)

    ######################################################################
    # FIXME: not yet ported, confidence intervals

    if DO_FIT and CALC_1D_CI:
        print "Calculating 1D confidence intervals"
        # sigmas = [0.682689492137, 0.954499736104, 0.997300203937]
        sigmas = [0.682689492137, 0.954499736104]
        # just an example
        ci_pars = ['rc', 'beta']

        ci, trace = lm.conf_interval(result, p_names=ci_pars, sigmas=sigmas,
                              trace=True, verbose=True, maxiter=1e3)

        lm.printfuncs.report_ci(ci)

    # FIXME: this seems to fail for beta and rc (i think problem is
    # due to parameter degeneracy no code issue)
    if DO_FIT and  CALC_2D_CI:
        output_figure = results_pickle+'.2d_like_v06_psf.png'
        from timer import Timer

        with Timer() as t:
            print "Calculating 2D confidence intervals"
            x, y, likelihood = lm.conf_interval2d(result,'rc','beta', 10, 10)
            plt_like_surface(x, y, likelihood, output_figure, 'rc', 'beta')

        print "elasped time:", t.secs, " s"


    # import IPython
    # IPython.embed()

    return 0
                                      x=T_min_plus1,
                                      weights=weight_mp1)  #±U Ts luminosity
                resultn4 = model1.fit(lum_min_plus2 / 1e50,
                                      par3,
                                      x=T_min_plus2,
                                      weights=weight_mp2)  #±30 percent lum

                norma3, normN3 = resultn3.best_values[
                    'pow_exponent'], resultn3.best_values[
                        'pow_amplitude']  #±U Ts luminosity params
                norma4, normN4 = resultn4.best_values[
                    'pow_exponent'], resultn4.best_values[
                        'pow_amplitude']  #±30 percent lum params

                #finding the errors on the best fit
                ci1 = lmfit.conf_interval(result1, result1, sigmas=[0.68])
                ci2 = lmfit.conf_interval(result2, result2, sigmas=[0.68])
                ci3 = lmfit.conf_interval(result3, result3, sigmas=[0.68])
                ci4 = lmfit.conf_interval(result4, result4, sigmas=[0.68])

                normci5 = lmfit.conf_interval(resultn5,
                                              resultn5,
                                              sigmas=[0.68])
                normci3 = lmfit.conf_interval(resultn3,
                                              resultn3,
                                              sigmas=[0.68])
                normci4 = lmfit.conf_interval(resultn4,
                                              resultn4,
                                              sigmas=[0.68])
                print(normci4)
예제 #53
0
def run_PSF_analysis(sel_PNe,
                     PNe_spectra,
                     obj_err,
                     wavelength,
                     x_fit,
                     y_fit,
                     z,
                     n_pixels=9.,
                     run_CI=False):
    """Fits multiple PNe simultaneously to evaluate the Point Spread Function (PSF) and Line Spread Function,
    of the galaxy (pointing dependant).

    Parameters
    ----------
    sel_PNe : list
        list of selected PNe, by index, for simultaneous PSF fitting.
    PNe_spectra : list / array
        residual data containing the emission lines of PNe. A list of minicubes, one for each PNe to be fitted for the PSF.
    obj_err : list / array
        objective function error, made during the spaxel-by-spaxel fitting stage.
    wavelength : array
        Wavelength array
    x_fit : list /array
        x array of matrix sized n_pixel x n_pixel.
    y_fit : list / array
        y array of matrix sized n_pixel x n_pixel.
    z : float
        Redshift value
    n_pixels : int, optional
        pixel width of PNe minicubes, by default 9.

    Returns
    -------
    PSF_results, 
        LMfit minimisation results, dictionary object.
    PSF_ci,
        PSF confidence intervals, from LMfit.
    """

    print("\n")
    print("################################################################")
    print("########################## Fitting PSF #########################")
    print("################################################################")
    print("\n")
    print(f"Using PNe: {sel_PNe}")

    selected_PNe = PNe_spectra[sel_PNe]
    selected_PNe_err = obj_err[sel_PNe]

    # Create parameters for each PNe: one set of entries per PN
    PSF_params = generate_PSF_params(sel_PNe,
                                     amp=750.0,
                                     mean=5006.77 * (1 + z))

    # Add in the PSF and LSF paramters
    PSF_params.add('FWHM', value=4.0, min=0.01, vary=True)
    PSF_params.add("beta", value=2.5, min=1.00, vary=True)
    PSF_params.add("LSF", value=3.0, min=0.01, vary=True)

    # minimisation of the PSF functions
    PSF_min = lmfit.Minimizer(PSF_residuals_3D,
                              PSF_params,
                              fcn_args=(wavelength, x_fit, y_fit, selected_PNe,
                                        selected_PNe_err, z),
                              nan_policy="propagate")
    PSF_results = PSF_min.minimize()
    # use LMfits confidence interval functionality to map out the errors between the 3 different parameters.
    print("Calculating Confidence Intervals for FWHM, beta and LSF")
    if run_CI == True:
        PSF_ci = lmfit.conf_interval(PSF_min,
                                     PSF_results,
                                     p_names=["FWHM", "beta", "LSF"],
                                     sigmas=[1, 2])
    else:
        PSF_ci = []

    print(
        "FWHM: ", round(PSF_results.params["FWHM"].value, 4), "+/-",
        round(PSF_results.params["FWHM"].stderr, 4), "(",
        round(
            PSF_results.params["FWHM"].stderr /
            PSF_results.params["FWHM"].value, 4) * 100, "%)")
    print(
        "Beta: ", round(PSF_results.params["beta"].value, 4), "+/-",
        round(PSF_results.params["beta"].stderr, 4), "(",
        round(
            PSF_results.params["beta"].stderr /
            PSF_results.params["beta"].value, 4) * 100, "%)")
    print(
        "LSF: ", round(PSF_results.params["LSF"].value, 4), "+/-",
        round(PSF_results.params["LSF"].stderr, 4), "(",
        round(
            PSF_results.params["LSF"].stderr / PSF_results.params["LSF"].value,
            4) * 100, "%)")

    return PSF_results, PSF_ci
## assume quantum projection noise
yerr = np.arcsin(1 / np.sqrt(4 * 100) / 0.35) / (2 * np.pi * ramsey_time)

params = lmfit.Parameters()

params.add('A', value=0, vary=True)  ##cos ### -3 cXZ sin 2 chi: 7 +- 23 mHz
params.add('B', value=0, vary=True)  ##sin ### -3 cYZ sin 2 chi: 32 +- 56 mHz
params.add('C', value=0,
           vary=True)  ##cos2 ### -1.5 (cXX-cYY) sin^2 chi: 15 +- 22 mHz
params.add('D', value=0, vary=True)  ##sin2 ### -3 cXY sin^2 chi: 8 +- 20 mHz
params.add('offset', value=0.069818)

result = lmfit.minimize(cosine_fit, params, args=(x, y, yerr))

fit_values = y + result.residual

lmfit.report_errors(params)

print "Reduced chi-squared = ", result.redchi

ci = lmfit.conf_interval(result)

lmfit.report_ci(ci)

x_plot = np.linspace(x.min(), x.max() + 100000, 1000)

figure = pyplot.figure(1)
figure.clf()
pyplot.plot(x, y, 'o')
pyplot.plot(x_plot, cosine_model(params, x_plot), linewidth=3.0)
pyplot.show()
예제 #55
0
import lmfit
import numpy as np

x = np.linspace(0.3, 10, 100)
np.random.seed(0)

y = 1 / (0.1 * x) + 2 + 0.1 * np.random.randn(x.size)

p = lmfit.Parameters()
p.add_many(('a', 0.1), ('b', 1))


def residual(p):
    a = p['a'].value
    b = p['b'].value

    return 1 / (a * x) + b - y


mi = lmfit.minimize(residual, p)
lmfit.printfuncs.report_fit(mi.params)

ci = lmfit.conf_interval(mi)
lmfit.printfuncs.report_ci(ci)
예제 #56
0
def confidence_intervals(minout, sigmas=(1, 2, 3), **kws):
    """explicitly calculate the confidence intervals from a fit
    for supplied sigma values"""
    return conf_interval(minout, sigmas=sigmas, **kws)