コード例 #1
0
    def fitModel(self, params: lmfit.Parameters = None, max_nfev: int = 100):
        """
        Fits the model by adjusting values of parameters based on
        differences between simulated and provided values of
        floating species.

        Parameters
        ----------
        params: starting values of parameters
        max_nfev: maximum number of function evaluations

        Example
        -------
        f.fitModel()
        """
        ParameterDescriptor = collections.namedtuple(
            "ParameterDescriptor",
            "params method std minimizer minimizerResult")
        block = Logger.join(self._loggerPrefix, "fitModel")
        guid = self.logger.startBlock(block)
        self._initializeRoadrunnerModel()
        if self.parametersToFit is None:
            # Compute fit and residuals for base model
            self.params = None
        else:
            if params is None:
                params = self.mkParams()
            # Fit the model to the data using one or more methods.
            # Choose the result with the lowest residual standard deviation
            paramDct = {}
            for method in self._fitterMethods:
                for _ in range(self._numFitRepeat):
                    minimizer = lmfit.Minimizer(self._residuals,
                                                params,
                                                max_nfev=max_nfev)
                    try:
                        minimizerResult = minimizer.minimize(method=method,
                                                             max_nfev=max_nfev)
                    except Exception as excp:
                        msg = "Error minimizing for method: %s" % method
                        self.logger.error(msg, excp)
                        continue
                    params = minimizerResult.params
                    std = np.std(self._residuals(params))
                    if method in paramDct.keys():
                        if std >= paramDct[method].std:
                            continue
                    paramDct[method] = ParameterDescriptor(
                        params=params.copy(),
                        method=method,
                        std=std,
                        minimizer=minimizer,
                        minimizerResult=minimizerResult,
                    )
            if len(paramDct) == 0:
                msg = "*** Minimizer failed for this model and data."
                raise ValueError(msg)
            # Select the result that has the smallest residuals
            sortedMethods = sorted(paramDct.keys(),
                                   key=lambda m: paramDct[m].std)
            bestMethod = sortedMethods[0]
            self.params = paramDct[bestMethod].params
            self.minimizer = paramDct[bestMethod].minimizer
            self.minimizerResult = paramDct[bestMethod].minimizerResult
        # Ensure that residualsTS and fittedTS match the parameters
        self.updateFittedAndResiduals(params=self.params)
        self.logger.endBlock(guid)
コード例 #2
0
def run_mcmc(log_like_fcn,
             params,
             error_param_dict,
             comparisons,
             nworkers=8,
             ntemps=1,
             nsteps=1000,
             nwalk=100,
             nburn=500,
             thin=5,
             start='optimum',
             fcn_args=None,
             fcn_kws=None):
    """ Sample from the posterior using emcee. 
    
        NOTE: The code save the "raw" chains (i.e. no burning or thinning) for later use.
    
    Args:
        log_like_fcn:     Obj. Function returning total likelihood as a Float. The first argument must
                          accept an LMFit 'Parameters' object
        params:           Obj. LMFit 'Parameters' object
        error_param_dict: Dict. Maps observed series to error terms e.g.
                              
                              {'Observed Q':'err_q'}
                          
                          Error terms must be named 'err_XXX'
        comparisons:      List. Datasets to be compared
        skip_timesteps:   Int. Number of steps to skip before performing comparison
        nworkers:         Int. Number of processes to use for parallelisation
        ntemps:           Int. Number of temperature for parallel-tempering. Use 1
                          to run the standard 'ensemble sampler'
        nsteps:           Int. Number of steps per chain
        nwalk:            Int. Number of chains/walkers
        nburn:            Int. Number of steps to discrad from the start of each chain as 'burn-in'
        thin:             Int. Keep only every 'thin' steps
        start:            Str. Either 'optimum' or 'uniform'. If 'optimum', MCMC chains will be 
                          randomly initialised from within a small "Gaussian ball" in the vicinty of
                          the supplied parameter values; if 'uniform', the chains will start from 
                          random locations sampled uniformly from the prior parameter ranges
        fcn_args:         List. Additional positional arguments to pass to log_like_fcn
        fcn_kws:          Dict. Additional keyword arguments to pass to log_like_fcn
        
    Returns:
        LMFit emcee result object.
    """
    # Check user input
    assert start in (
        'optimum', 'uniform'), "'start' must be either 'optimum' or 'uniform'."

    error_params = [i for i in params.keys() if i.split('_')[0] == 'err']
    for error_param in error_params:
        if np.isfinite(params[error_param].min):
            assert params[error_param].min > 0, \
                'Minimum bound for %s must be >0.' % error_param

    # Set starting locations for chains
    varying = np.asarray([par.vary for par in params.values()])
    lower_bounds = np.asarray([i.min for i in params.values()])[varying]
    upper_bounds = np.asarray([i.max for i in params.values()])[varying]

    if (ntemps == 1) and (start == 'uniform'):
        starting_guesses = list(
            np.random.uniform(low=lower_bounds,
                              high=upper_bounds,
                              size=(nwalk, len(lower_bounds))))
    elif (ntemps > 1) and (start == 'uniform'):
        starting_guesses = list(
            np.random.uniform(low=lower_bounds,
                              high=upper_bounds,
                              size=(ntemps, nwalk, len(lower_bounds))))
    else:
        starting_guesses = None

    # Run MCMC
    start = time.time()

    mcmc = lmfit.Minimizer(
        log_like_fcn,
        params,
        fcn_args=fcn_args,
        fcn_kws=fcn_kws,
        nan_policy='omit',
    )

    result = mcmc.emcee(
        params=params,
        burn=nburn,
        steps=nsteps,
        nwalkers=nwalk,
        thin=thin,
        ntemps=ntemps,
        workers=nworkers,
        pos=starting_guesses,
        float_behavior='posterior',
    )

    end = time.time()
    print('Time elapsed running emcee: %.2f minutes.\n' % ((end - start) / 60))

    #print('EMCEE average acceptance rate: %.2f' % np.mean(sampler.acceptance_fraction))

    return result
コード例 #3
0
def fit_models(
    distances,
    sig,
    fit="log",
    n_iter=1,
    method=["nelder", "leastsq", "least-squares"],
    p_power=None,
    p_exp=None,
    p_pow_exp=None,
    p_exp_exp=None,
):

    # add many takes input as a tuple (value, vary, min, max)
    if p_power is None:
        p_power = lmfit.Parameters()
        p_power.add_many(
            ("p_init", 0.5, True, 1e-10),
            ("p_decay_const", -0.5, True, -np.inf, -1e-10),
            ("intercept", 1e-5, True, 1e-10),
        )
    if p_exp is None:
        p_exp = lmfit.Parameters()
        p_exp.add_many(
            ("e_init", 0.5, True, 1e-10),
            ("e_decay_const", 0.1, True, 1e-10),
            ("intercept", 1e-5, True, 1e-10),
        )
    if p_pow_exp is None:
        p_pow_exp = lmfit.Parameters()
        p_pow_exp.add_many(
            ("e_init", 0.5, True, 1e-19),
            ("e_decay_const", 0.1, True, 1e-10),
            ("p_init", 0.5, True, 1e-10),
            ("p_decay_const", -0.5, True, -np.inf, -1e-10),
            ("intercept", 1e-5, True, 1e-10),
        )

    results_power_min = lmfit.Minimizer(
        model_res,
        p_power,
        fcn_args=(distances, sig, fit, powerlaw_decay),
        nan_policy="omit",
    )

    results_power = [
        fit_model_iter(results_power_min, n_iter=n_iter, **{"method": meth})
        for meth in method
    ]
    results_power = results_power[np.argmin([i.aic for i in results_power])]

    results_exp_min = lmfit.Minimizer(model_res,
                                      p_exp,
                                      fcn_args=(distances, sig, fit,
                                                exp_decay),
                                      nan_policy="omit")
    results_exp = [
        fit_model_iter(results_exp_min, n_iter=n_iter, **{"method": meth})
        for meth in method
    ]
    results_exp = results_exp[np.argmin([i.aic for i in results_exp])]

    results_pow_exp_min = lmfit.Minimizer(
        model_res,
        p_pow_exp,
        fcn_args=(distances, sig, fit, pow_exp_decay),
        nan_policy="omit",
    )
    results_pow_exp = [
        fit_model_iter(results_pow_exp_min, n_iter=n_iter, **{"method": meth})
        for meth in method
    ]
    results_pow_exp = results_pow_exp[np.argmin(
        [i.aic for i in results_pow_exp])]

    best_fit_model = np.array(["pow", "exp", "pow_exp", "concat"])[np.argmin(
        [results_power.aic, results_exp.aic, results_pow_exp.aic])]
    return results_power, results_exp, results_pow_exp, best_fit_model
コード例 #4
0
def run_fit(fit_filename, params, data, cl_fitmethod):
    """Perform the fit."""
    util.header1("Fit")

    fit_config = util.read_cfg_file(fit_filename)

    if not fit_config.sections():
        fit_config.add_section("Standard Calculation")

    for section in fit_config.sections():
        util.header2(section)
        items = fit_config.items(section)
        parameters.set_param_status(params, items)
        clusters = find_independent_clusters(data, params)
        fitmethod = fit_config.get(section, "fitmethod", fallback=cl_fitmethod)

        if fitmethod not in ALLOWED_FITMETHODS.keys():
            exit("The fitting method '{}', as specified in section ['{}'],"
                 "is invalid! Please choose from:\n  {}".format(
                     fitmethod, section,
                     list(sorted(ALLOWED_FITMETHODS.keys()))))

        print("Fitting method: {}\n".format(ALLOWED_FITMETHODS[fitmethod]))

        for c_name, c_data, c_params in clusters:
            if len(clusters) > 1:
                print(f"[{c_name}]")

            print("Chi2 / Reduced Chi2:")

            c_func = c_data.calculate_residuals
            c_minimizer = lmfit.Minimizer(c_func, c_params)

            try:
                if fitmethod == "brute":
                    c_result = c_minimizer.minimize(method=fitmethod,
                                                    keep="all")
                else:
                    c_result = c_minimizer.minimize(method=fitmethod)

            except KeyboardInterrupt:
                sys.stderr.write(
                    "\n -- Keyboard Interrupt: minimization stopped\n")
                c_result = c_minimizer.minimize(
                    params=c_minimizer.result.params, maxfev=1)

            for name, param in c_result.params.items():
                params[name] = param

            print("")

        if len(clusters) > 1:
            minimizer = lmfit.Minimizer(data.calculate_residuals, params)
            result = minimizer.prepare_fit()
            result.residual = data.calculate_residuals(params, verbose=False)
            result.params = params
            result._calculate_statistics()
            result.method = fitmethod
        else:
            result = c_result

        print(f"Final Chi2        : {result.chisqr:.3e}")
        print(f"Final Reduced Chi2: {result.redchi:.3e}")

    if result.method != "leastsq":
        print(
            "\nWarning: uncertainties and covariance of fitting parameters are only"
        )
        print("         calculated when using the 'leastsq' fitting method!")

    return result
コード例 #5
0
ファイル: fit_basic.py プロジェクト: reiserm/Xana
def fit_basic(
    x,
    y,
    dy=None,
    model="line",
    init={},
    fix=None,
    method="leastsq",
    emcee=False,
    plot_corner=False,
    **kwargs,
):

    if model in "linear":
        func = linear
    elif model in "power":
        func = power
    elif model in "quadratic":
        func = quadratic
    elif model in "exponential":
        func = exponential
    else:
        raise ValueError("Model {} not defined.".format(model))

    model = Model(func, nan_policy="omit")

    pars = init_pars(model, init, x, y)

    if fix is not None:
        for vn in fix:
            pars[vn].set(value=fix[vn], vary=0)

    if dy is not None:
        dy = np.abs(dy)
        wgt = np.array(
            [1.0 / dy[i] if dy[i] > 0 else 0 for i in range(len(y))])
        is_weighted = True
    else:
        wgt = None
        is_weighted = False

    if emcee:
        mi = lmfit.minimize(
            residual,
            pars,
            args=(x, model),
            kws={
                "data": y,
                "eps": wgt
            },
            method="nelder",
            nan_policy="omit",
        )
        # mi.params.add('f', value=1, min=0.001, max=2)
        mini = lmfit.Minimizer(residual,
                               mi.params,
                               fcn_args=(x, model),
                               fcn_kws={
                                   "data": y,
                                   "eps": wgt
                               })
        out = mini.emcee(burn=300,
                         steps=1000,
                         thin=20,
                         params=mi.params,
                         is_weighted=is_weighted)
        out, fit_report = get_ml_solution(out, fix)
        print(list(out.params.valuesdict().values()))
        if plot_corner:
            corner.corner(
                out.flatchain,
                labels=out.var_names,
                truths=list(out.params.valuesdict().values()),
            )

    else:
        out = lmfit.minimize(
            residual,
            pars,
            args=(x, model),
            method=method,
            kws={
                "data": y,
                "eps": wgt
            },
            nan_policy="omit",
            **kwargs,
        )
        fit_report = lmfit.fit_report(out)

    pars_arr = np.zeros((len(pars), 2))
    for i, vn in enumerate(pars):
        pars_arr[i, 0] = out.params[vn].value
        pars_arr[i, 1] = out.params[vn].stderr if out.params[vn].stderr else 0

    if not emcee:
        gof = np.array([out.chisqr, out.redchi, out.bic, out.aic])
    else:
        gof = 0
    return pars_arr, gof, out, fit_report, model
コード例 #6
0
    parameters.add('ptsrcy', value=ptsrcy, min=ptsrcy - 5, max=ptsrcy + 5)
    parameters.add('ptsrcamp', value=0.004, min=0.001, max=0.6)
    parhist[band][len(parameters)] = []
    result3 = lmfit.minimize(residual, parameters, epsfcn=epsfcn)
    print("Smoothed linear fit parameters with point source:")
    result3.params.pretty_print()
    #print("red Chi^2: {0:0.3g}".format(result3.chisqr / (ndata - result3.nvarys)))
    print("red Chi^2: {0:0.3g}".format(result3.redchi))
    print(result3.message)
    print()

    bestdiskplussourcemod = model(**result3.params)

    parameters.add('ptsrcwid', value=0.04, min=0.01, max=0.1)
    parhist[band][len(parameters)] = []
    minimizer = lmfit.Minimizer(residual, parameters, epsfcn=epsfcn)
    result4 = minimizer.minimize()
    print(
        "Smoothed linear fit parameters with horizontally smeared point source:"
    )
    result4.params.pretty_print()
    #print("red Chi^2: {0:0.3g}".format(result4.chisqr / (ndata - result4.nvarys)))
    print("red Chi^2: {0:0.3g}".format(result4.redchi))
    print(result4.message)
    print()

    bestdiskplussmearedsourcemod = model(**result4.params)

    # remove the source
    sourceless_pars = dict(**result4.params)
    sourceless_pars['ptsrcamp'] = 0
コード例 #7
0
    def run(self, p0, fix=None):
        """ do phase-determination fitting

        Args:
            p0 (list of floats): background, c1, phase difference in radians

        """
        self.initpars = lmfit.Parameters()

        self.initpars.add("c1", value=p0[1], min=0)

        if fix == "both":
            self.initpars.add("bg", value=p0[0], min=1e-4, vary=False)
            self.initpars.add(
                "delphi", value=p0[2], min=1e-4, max=np.pi, vary=False
            )
        elif fix == "background":
            self.initpars.add("bg", value=p0[0], min=1e-4, vary=False)
            self.initpars.add("delphi", value=p0[2], min=1e-4, max=np.pi)
        elif fix == "phase":
            self.initpars.add("bg", value=p0[0], min=1e-4)
            self.initpars.add(
                "delphi", value=p0[2], min=1e-4, max=np.pi, vary=False
            )
        else:
            self.initpars.add("bg", value=p0[0], min=0)
            self.initpars.add("delphi", value=p0[2], min=1e-4, max=np.pi)

        self.fitter = lmfit.Minimizer(
            self.resid, self.initpars, fcn_args=(self.x, self.y)
        )
        self.optres = self.fitter.minimize()
        # extract fitted parameters
        _popt = self.optres.params
        self.bg, self.bg_stderr = _popt["bg"].value, _popt["bg"].stderr
        self.c1, self.c1_stderr = _popt["c1"].value, _popt["c1"].stderr
        self.delphi = _popt["delphi"].value
        self.delphi_err = _popt["delphi"].stderr
        self.phase = rad2deg(_popt["delphi"].value)
        self.phase_stderr = rad2deg(_popt["delphi"].stderr)

        _str = (
            "SHG_B = {:.2f} +/- {:<.2f}\n"
            "C1 = {:.2e} +/- {:<.2e}\n"
            "Δφ = {:.2f}˚ +/- {:<.2f}˚\n"
            "in radians, {:.3f}  +/- {:.3f}"
        )

        fmt = PartialFormatter()

        self.res_str = fmt.format(
            _str,
            *[
                self.bg,
                self.bg_stderr,
                self.c1,
                self.c1_stderr,
                self.phase,
                self.phase_stderr,
                self.delphi,
                self.delphi_err,
            ],
        )

        self.model = lambda x: self.f([self.bg, self.c1, self.delphi], x)
        self.set_fit()
コード例 #8
0
def test_brute():
    # The tests below are to make sure that the implementation of the brute
    # method in lmfit works as intended.

    # restore original settings for paramers 'x' and 'y'
    params_lmfit.add_many(('x', -4.0, True, -4.0, 4.0, None, None),
                          ('y', -2.0, True, -2.0, 2.0, None, None))

    # TEST 1: only upper bound and brute_step specified, using default Ns=20
    Ns = 20
    params_lmfit['x'].set(min=-np.inf)
    params_lmfit['x'].set(brute_step=0.25)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute')
    grid_x_expected = np.linspace(
        params_lmfit['x'].max - Ns * params_lmfit['x'].brute_step,
        params_lmfit['x'].max, Ns, False)
    grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
    assert_almost_equal(grid_x_expected, grid_x, verbose=True)
    grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
    grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].max,
                                  Ns)
    assert_almost_equal(grid_y_expected, grid_y, verbose=True)

    # TEST 2: only lower bound and brute_step specified, using Ns=15
    Ns = 15
    params_lmfit['y'].set(max=np.inf)
    params_lmfit['y'].set(brute_step=0.1)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute', Ns=15)
    grid_x_expected = np.linspace(
        params_lmfit['x'].max - Ns * params_lmfit['x'].brute_step,
        params_lmfit['x'].max, Ns, False)
    grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
    assert_almost_equal(grid_x_expected, grid_x, verbose=True)
    grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
    grid_y_expected = np.linspace(
        params_lmfit['y'].min,
        params_lmfit['y'].min + Ns * params_lmfit['y'].brute_step, Ns, False)
    assert_almost_equal(grid_y_expected, grid_y, verbose=True)

    # TEST 3: only value and brute_step specified, using Ns=15
    Ns = 15
    params_lmfit['x'].set(max=np.inf)
    params_lmfit['x'].set(min=-np.inf)
    params_lmfit['x'].set(brute_step=0.1)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute', Ns=15)
    grid_x_expected = np.linspace(
        params_lmfit['x'].value - (Ns // 2) * params_lmfit['x'].brute_step,
        params_lmfit['x'].value + (Ns // 2) * params_lmfit['x'].brute_step, Ns)
    grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
    assert_almost_equal(grid_x_expected, grid_x, verbose=True)
    grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
    grid_y_expected = np.linspace(
        params_lmfit['y'].min,
        params_lmfit['y'].min + Ns * params_lmfit['y'].brute_step, Ns, False)
    assert_almost_equal(grid_y_expected, grid_y, verbose=True)

    # TEST 3: only value and brute_step specified, using Ns=15
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute', Ns=15)
    grid_x_expected = np.linspace(
        params_lmfit['x'].value - (Ns // 2) * params_lmfit['x'].brute_step,
        params_lmfit['x'].value + (Ns // 2) * params_lmfit['x'].brute_step, Ns)
    grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
    assert_almost_equal(grid_x_expected, grid_x, verbose=True)
    grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
    grid_y_expected = np.linspace(
        params_lmfit['y'].min,
        params_lmfit['y'].min + Ns * params_lmfit['y'].brute_step, Ns, False)
    assert_almost_equal(grid_y_expected, grid_y, verbose=True)

    # TEST 4: check for correct functioning of keep argument and candidates attribute
    params_lmfit.add_many(  # restore original settings for paramers 'x' and 'y'
        ('x', -4.0, True, -4.0, 4.0, None, None),
        ('y', -2.0, True, -2.0, 2.0, None, None))

    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute')
    assert (len(resbrute_lmfit.candidates) == 50
            )  # default number of stored candidates

    resbrute_lmfit = fitter.minimize(method='brute', keep=10)
    assert (len(resbrute_lmfit.candidates) == 10)

    assert (isinstance(resbrute_lmfit.candidates[0].params, lmfit.Parameters))
コード例 #9
0
def test_brute_lmfit_vs_scipy():
    # The tests below are to make sure that the implementation of the brute
    # method in lmfit gives identical results to scipy.optimize.brute, when
    # using finite bounds for all varying parameters.

    # TEST 1: using bounds, with (default) Ns=20 and no stepsize specified
    assert (not params_lmfit['x'].brute_step)  # brute_step for x == None
    assert (not params_lmfit['y'].brute_step)  # brute_step for y == None

    rranges = ((-4, 4), (-2, 2))
    resbrute = optimize.brute(f,
                              rranges,
                              args=params,
                              full_output=True,
                              Ns=20,
                              finish=None)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute', Ns=20)

    assert_equal(resbrute[2], resbrute_lmfit.brute_grid,
                 verbose=True)  # grid identical
    assert_equal(resbrute[3], resbrute_lmfit.brute_Jout,
                 verbose=True)  # function values on grid identical
    assert_equal(resbrute[0][0], resbrute_lmfit.brute_x0[0],
                 verbose=True)  # best fit x value identical
    assert_equal(resbrute[0][0],
                 resbrute_lmfit.params['x'].value,
                 verbose=True)  # best fit x value stored correctly
    assert_equal(resbrute[0][1], resbrute_lmfit.brute_x0[1],
                 verbose=True)  # best fit y value identical
    assert_equal(resbrute[0][1],
                 resbrute_lmfit.params['y'].value,
                 verbose=True)  # best fit y value stored correctly
    assert_equal(resbrute[1], resbrute_lmfit.brute_fval,
                 verbose=True)  # best fit function value identical
    assert_equal(resbrute[1], resbrute_lmfit.chisqr,
                 verbose=True)  # best fit function value stored correctly

    # TEST 2: using bounds, setting Ns=40 and no stepsize specified
    assert (not params_lmfit['x'].brute_step)  # brute_step for x == None
    assert (not params_lmfit['y'].brute_step)  # brute_step for y == None

    rranges = ((-4, 4), (-2, 2))
    resbrute = optimize.brute(f,
                              rranges,
                              args=params,
                              full_output=True,
                              Ns=40,
                              finish=None)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute', Ns=40)

    assert_equal(resbrute[2], resbrute_lmfit.brute_grid,
                 verbose=True)  # grid identical
    assert_equal(resbrute[3], resbrute_lmfit.brute_Jout,
                 verbose=True)  # function values on grid identical
    assert_equal(resbrute[0][0],
                 resbrute_lmfit.params['x'].value,
                 verbose=True)  # best fit x value identical
    assert_equal(resbrute[0][1],
                 resbrute_lmfit.params['y'].value,
                 verbose=True)  # best fit y value identical
    assert_equal(resbrute[1], resbrute_lmfit.chisqr,
                 verbose=True)  # best fit function value identical

    # TEST 3: using bounds and specifing stepsize for both parameters
    params_lmfit['x'].set(brute_step=0.25)
    params_lmfit['y'].set(brute_step=0.25)
    assert_equal(params_lmfit['x'].brute_step, 0.25, verbose=True)
    assert_equal(params_lmfit['y'].brute_step, 0.25, verbose=True)

    rranges = (slice(-4, 4, 0.25), slice(-2, 2, 0.25))
    resbrute = optimize.brute(f,
                              rranges,
                              args=params,
                              full_output=True,
                              Ns=20,
                              finish=None)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute')

    assert_equal(resbrute[2], resbrute_lmfit.brute_grid,
                 verbose=True)  # grid identical
    assert_equal(resbrute[3], resbrute_lmfit.brute_Jout,
                 verbose=True)  # function values on grid identical
    assert_equal(resbrute[0][0],
                 resbrute_lmfit.params['x'].value,
                 verbose=True)  # best fit x value identical
    assert_equal(resbrute[0][1],
                 resbrute_lmfit.params['y'].value,
                 verbose=True)  # best fit y value identical
    assert_equal(resbrute[1], resbrute_lmfit.chisqr,
                 verbose=True)  # best fit function value identical

    # TEST 4: using bounds, Ns=10, adn specifing stepsize for parameter 'x'
    params_lmfit['x'].set(brute_step=0.15)
    params_lmfit['y'].set(brute_step=0)  # brute_step for y == None
    assert_equal(params_lmfit['x'].brute_step, 0.15, verbose=True)
    assert (not params_lmfit['y'].brute_step)

    rranges = (slice(-4, 4, 0.15), (-2, 2))
    resbrute = optimize.brute(f,
                              rranges,
                              args=params,
                              full_output=True,
                              Ns=10,
                              finish=None)
    fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
    resbrute_lmfit = fitter.minimize(method='brute', Ns=10, keep='all')

    assert_equal(resbrute[2], resbrute_lmfit.brute_grid,
                 verbose=True)  # grid identical
    assert_equal(resbrute[3], resbrute_lmfit.brute_Jout,
                 verbose=True)  # function values on grid identical
    assert_equal(resbrute[0][0],
                 resbrute_lmfit.params['x'].value,
                 verbose=True)  # best fit x value identical
    assert_equal(resbrute[0][1],
                 resbrute_lmfit.params['y'].value,
                 verbose=True)  # best fit y value identical
    assert_equal(resbrute[1], resbrute_lmfit.chisqr,
                 verbose=True)  # best fit function value identical
コード例 #10
0
def sequence_preparing(config_file,
                       scanned_parameter, 
                       sequence_type, 
                       data_label,
                       redo_prepare, 
                       redo_h5, 
                       plot_data, 
                       gaussian_reconstruction,
                       plot_rois, 
                       bin_image, 
                       n_bins, 
                       bin_time, 
                       n_bins_t, 
                       undo_shear, 
                       w_fit):


    h5_file_name = 'processed_data/' + sequence_type + '.h5'
    
    if not redo_h5:
        try:
            print('Loading data from h5 file...')
            keys = ['t', 'sorted_od', 'rois_array']
            data_dict = raf.h5_to_dict(h5_file_name, keys)
            t = data_dict['t']
            sorted_od = data_dict['sorted_od']
            rois_array = data_dict['rois_array']
            
        except Exception as e:
            print('Data not found')
            print(e)
            t = np.nan
            sorted_od = np.nan
            rois_array = np.nan
    
    
    else:
        print('Preparing data again')
        parser = ConfigParser()
        parser.read(config_file)
#            sections = parser.sections()
        date = np.array(parser.get(data_label, 'date').split(' , '), 
                                   dtype=int)
        
        sequence_indices = []
        for sequence in parser.get(data_label, 'sequence_indices').split(' , '):
            sequence_indices.append(np.array(sequence.split(' '), dtype=int))
        
        x_rois = np.array(parser.get(data_label, 'x_rois').split(' '), dtype=int)
        y_rois = np.array(parser.get(data_label, 'y_rois').split(' '), dtype=int)
        x_offset = np.int(parser.get(data_label, 'x_offset'))
        y_offset = np.int(parser.get(data_label, 'y_offset'))
        
            
        n_rois = len(x_rois)

        sorted_od = []
        camera = 'XY_Mako'
        for i, date in enumerate(date):
            for sequence in sequence_indices[i]:
          
                df, ods = data_processing(camera, date, sequence, sequence_type, 
                                                        scanned_parameter=scanned_parameter,
                                                        long_image=False, redo_prepare=redo_prepare)
                sorted_od.append(ods)

        sorted_od = np.nanmean(np.array(sorted_od), axis=0)
#        t = df[scanned_parameter].values
#        print(t.shape)
#        idx = np.argmax(np.diff(t))
#        print(idx)
#        print(t[idx])
#        print(t[idx+1])
#        print(t)
        
        if bin_time:
            sorted_od = bin_trace(sorted_od, n_bins_t)
            t = bin_trace(df[scanned_parameter].values, n_bins_t)
        else:
            t = df[scanned_parameter].values
        
        nan_idx = np.isfinite(sorted_od[:,0,0])
        t = t[nan_idx]
        sorted_od = sorted_od[nan_idx]
            

        idx = 0
        od = sorted_od[idx]
        n_rois = len(x_rois)
        weights = np.array([1, 0.9245, 1.11739])
        weights = np.array([1, 1, 1])
        yvals = range(od.shape[0])
        xvals = range(od.shape[1])
        xy_grid = np.meshgrid(xvals, yvals)

        wx = w_fit
        wy = w_fit
        mask = np.zeros(od.shape)
        
        if plot_data:
            od = sorted_od[idx]
            plt.imshow(od, vmin=-0.05, vmax=0.9)
            plt.colorbar(label='OD')
            plt.xlabel('x pixel')
            plt.ylabel('y pixel')
            plt.show()
        for x0, y0 in zip(x_rois, y_rois):
            mask += puck(xy_grid, x0, wx, y0, wy)
        
        if plot_data:
        
            plt.imshow(od*mask, vmin=-0.05, vmax=1)
            plt.show()
            
        rois_array = make_rois_array(sorted_od, x_rois, y_rois, n_rois, wx, wy, 
                                           only_one=False, weights=weights)
        if plot_data:
            plt.imshow(rois_array.sum(axis=1)[idx])
            plt.show()

        x_rois += x_offset
        y_rois += y_offset
        
        if plot_rois:
            
            mask = np.zeros(od.shape)
            for x0, y0 in zip(x_rois, y_rois):
                mask += puck(grid(od), x0, wx, y0, wy)
            #mask = mask.clip(0.9)
            plt.title('Rois')
            plt.imshow(sorted_od[0]*mask, vmin=-0.05)
            plt.show()
            
            plt.imshow(sorted_od[sorted_od.shape[0]-10] * mask, vmin=-0.05)
            plt.show()
        
        if undo_shear:
        
            shear_m = 0.051#0.065
            shear_p = -0.051
            n_cut = 5
            shear_size = w_fit +n_cut
            x0 = x_offset
            y0 = y_offset
            print(x0)
            print(y0)
            shears = [shear_m, 0, shear_p]
            for i in tqdm(range(sorted_od.shape[0])):
                for j in range(0,3):
                    x_i = x_rois[j]-shear_size
                    x_f = x_rois[j]+shear_size
                    y_i = y_rois[j]-shear_size
                    y_f = y_rois[j]+shear_size
#                    plt.imshow(sorted_od[i, y_i:y_f, x_i:x_f])
#                    plt.show()
                    sorted_od[i, y_i:y_f, x_i:x_f] = shear_array(sorted_od[i, y_i:y_f, x_i:x_f],
                                                                 x0, 
                                                                 y0, 
                                                                 shears[j], 
                                                                 plotme=False)


        if gaussian_reconstruction:
            print(x_offset)
            print(y_offset)
            print('initial rois:')
            print(x_rois)
            print(y_rois)
            od = sorted_od[0]
            xy_vals = make_xy_grid(od)
            w = 60
            delta_x = 10
        
            
            if n_rois == 3:
                x1, x2, x3 = x_rois + x_offset
                y1, y2, y3 = y_rois + y_offset
                
            else:
                x1, x2 = x_rois
                y1, y2 = y_rois
                x3,y3 = [80, 80]
                
            rois_before_opt = make_rois_array(od, x_rois, y_rois, 
                                             n_rois, w, w, only_one=True,
                                             weights=weights)[0]
            
            for xx in range(2):
                params = lmfit.Parameters()
                params.add('x1', value= x1, vary=True, min=x1-delta_x, max=x1+delta_x)
                params.add('x2', value= x2, vary=False)#, min=x2-delta_x, max=x2+delta_x)
                params.add('x3', value= x3, vary=True, min=x3-delta_x, max=x3+delta_x)
                params.add('y1', value= y1, vary=True, min=y1-delta_x, max=y1+delta_x)
                params.add('y2', value= y2, vary=False)#, min=y2-delta_x, max=y2+delta_x)
                params.add('y3', value= y3, vary=True, min=y3-delta_x, max=y3+delta_x)
                params.add('w', value= w, vary=False)
                params.add('bkg', value= 0.019, vary=False)#, min=-0.1, max=0.1)
                params.add('amp', value=1.034, vary=True, min=0.8, max=1.4)
                params.add('x0', value=w, vary=True, min=w-10, max=w+10)
                params.add('sigmax', value=52, vary=True, min=40, max=60)
                params.add('y0', value=w, vary=True)#, min=w-10, max=w+10)
                params.add('sigmay', value=56,  vary=True)#, min=40, max=60)
                params.add('s1', value=1, vary=False)#, min=0.5, max=1.5)
                params.add('s2', value=1, vary=True)#, min=0.5, max=1.5)
                params.add('s3', value=1, vary=True)#min=0.5, max=1.5)
                minner = lmfit.Minimizer(residuals, params, fcn_args=(xy_vals, od))
                result = minner.minimize(method='powell')
                
                params_dict = result.params.valuesdict()
                x1 = params_dict['x1']
                x2 = params_dict['x2']
                x3 = params_dict['x3']
                y1 = params_dict['y1']
                y2 = params_dict['y2']
                y3 = params_dict['y3']
                w = params_dict['w']
                wx = w
                wy = w
                n_rois=3
                x_rois = np.array([x1, x2, x3], dtype=int)
                y_rois = np.array([y1, y2, y3], dtype=int)
            
            rois_array = make_rois_array(od, x_rois, y_rois, 
                                             n_rois, wx, wy, only_one=True,
                                             weights=weights)[0]
            
            
            
            rois_sum = np.zeros_like(rois_array)
            for i in range(3):
                rois_sum[i] = rois_array[i]*result.params['s%s'%(i+1)].value
            rois_sum = rois_sum.sum(axis=0)
            
            plt.figure(figsize=(3.5*5,3))
            gs = GridSpec(1,5)
            plt.subplot(gs[0])
            plt.imshow(rois_before_opt.sum(axis=0))
            plt.title('Initial rois recombined')
            plt.subplot(gs[2])
            gauss_fit = residuals(result.params, 
                                        make_xy_grid(od)).reshape((2*w,2*w), 
                                        order='F')
            plt.imshow(gauss_fit)
                                
            plt.title('Gaussian fit')
            plt.subplot(gs[3])
            res = residuals( result.params, 
                                 make_xy_grid(od), od).reshape((2*w,2*w), order='F')
            plt.imshow(res, cmap='RdBu',vmin=-0.4, vmax=0.4)
            plt.title('Optimized residuals')
            
            plt.subplot(gs[1])
            plt.imshow(rois_sum)
            plt.title('Optimal rois recombined')
            plt.subplot(gs[4])
            mask = np.zeros(od.shape)
            for x0, y0 in zip(x_rois, y_rois):
                mask += puck(grid(od), x0, wx+10, y0, wy+10)
            #mask = mask.clip(0.9)
            plt.title('Rois')
            plt.imshow(od*mask, vmin=-0.05, vmax=0.75)
            plt.show()
            print(result.params['x0'].value)
            print(result.params['y0'].value)
            print('optimized rois:')
            print(x_rois)
            print(y_rois)
        
        w = w_fit
        wx = w+20*0
        wy = w    
    
        rois_array_start = make_rois_array(sorted_od, x_rois, y_rois, 
                                         n_rois, wx, wy, only_one=False) 
         
        if bin_image:
        
            rois_array = []
            for rois in rois_array_start:
                rois_array.append(bin_image_arr(rois, n_rois, n_bins))
        
            rois_array = np.array(rois_array)
            x_rois = x_rois / n_bins
            y_rois = y_rois / n_bins
            w = int(w / n_bins)
            wx = int(wx / n_bins)
            wy = int(wy / n_bins)
            
        else:
            rois_array = rois_array_start    
        
        with h5py.File(h5_file_name) as h5f:
            
            print('Saving new processed data')
            
            try:
                  
                del h5f['t']
                del h5f['sorted_od']
                del h5f['rois_array']
                
                raf.h5_save(h5f, 'sorted_od', sorted_od)
                raf.h5_save(h5f, 'rois_array', rois_array)
                raf.h5_save(h5f, 't', t)
#                del h5f['prepare_params']
            
            except:
                
            
                raf.h5_save(h5f, 'sorted_od', sorted_od)
                raf.h5_save(h5f, 'rois_array', rois_array)
                raf.h5_save(h5f, 't', t)
#                raf.h5_save(h5f, 'prepare_params', prepare_params)
                    
    return t, sorted_od, rois_array
コード例 #11
0
ファイル: PNLF.py プロジェクト: tspriggs/MUSE_PNe_fitting
def PNLF_analysis(galaxy_name, loc, data, data_err, obs_comp, M_5007, m_5007, dM_in=31.5, c2_in=0.307,
                 vary_dict={"dM":True, "M_star":False, "c1":False, "c2":False, "c3":False}, mcmc=False, min_stat="KS_1samp", comp_lim=False):  
    """This is the execution function to fit the PNLF to a given data set of m_5007 of PNe.
    First, setup the paramters class from LMfit, then make a lmfit.Minimizer function.
    Second, use the .minimize method to execute the fitting.
    There is an if statement to check if mcmc is to be used.
    Otherwise, returns an LMfit dict object that contains the best-fit results from the LMfit package.

    Parameters
    ----------
    galaxy_name : [str]
        Name of the galaxy.
    loc : [str]
        pointing location: center, halo or middle.
    data : [list / array]
        PNe apparent magnitudes, in [OIII].
    obs_comp : [list / array]
        Observed completeness profile / ratio for the galaxy.
    M_5007 : [list / array]
        Absolute magnitude, in [OIII], array (-4.53 to 0.53).
    m_5007 : [list / array]
        Apparent magnitude, in [OIII], array (26.0 to 31.0).
    dM_in : float, optional
        Starting guess for the value of distance modulus, by default 31.5.
    c2_in : float, optional
        Starting guess for the c2 paramter, by default 0.307.
    vary_dict : dict, optional
        A dictionary with a series of boolean switches, deciding which paramters will be fitted.
        By default {"dM":True, "M_star":False, "c1":False, "c2":False, "c3":False}.
    mcmc : bool, optional
        A check of whether an mcmc minimisation should also be run on the PNLF fitting function.
        By default False.

    Returns
    -------
    [dict]
        LMfit object containing the fit results from the PNLF and PNe CDF minimisation.
    """
    PNLF_params = Parameters()

    if loc == "center":
        PNLF_params.add("dM", value=dM_in, min=dM_in-2, max=dM_in+2, vary=vary_dict["dM"], brute_step=0.001)
    elif loc in ["middle", "halo"]:
        gal_df = pd.read_csv("exported_data/galaxy_dataframe.csv", index_col=("Galaxy", "loc"))
        center_dM = gal_df.loc[(galaxy_name, "center"), "PNLF dM"]
        PNLF_params.add("dM", value=center_dM, min=29.5, max=33.0, vary=False)

    PNLF_params.add("c1", value=1, min=0.00, vary=vary_dict["c1"], brute_step=0.001)
    PNLF_params.add("c2", value=c2_in, min=c2_in-1.5, max=c2_in+1.5, vary=vary_dict["c2"], brute_step=0.001)
    PNLF_params.add("c3", value=3., min=0.0001, max=10, vary=vary_dict["c3"], brute_step=0.001)
    PNLF_params.add("M_star", value=-4.53, min=-4.7, max=-4.3, vary=vary_dict["M_star"], brute_step=0.001)

    PNLF_minimizer = lmfit.Minimizer(PNLF_fitter, PNLF_params, fcn_args=(data, data_err, obs_comp, M_5007, m_5007, min_stat, comp_lim), nan_policy="propagate")
    if min_stat == "chi2":
        PNLF_results = PNLF_minimizer.minimize()
    elif min_stat == "KS_1samp":
        PNLF_results = PNLF_minimizer.scalar_minimize(method="slsqp", tol=1e-6)#, options={"initial_simplex":np.array([[c2_in-0.3, dM_in-0.2] ,  [c2_in+0.3, dM_in] , [c2_in-0.3, dM_in+0.2] ])})

    ## testing
    if mcmc == True:
        PNLF_results.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(20))
        res = lmfit.minimize(PNLF_fitter, args=(data, data_err, obs_comp, M_5007, m_5007, min_stat, comp_lim), method='emcee', nan_policy='omit', nwalkers=200, burn=700, steps=3000, thin=20,
                     params=PNLF_results.params, is_weighted=False, progress=True)
        
        return res

    else:
        return PNLF_results
コード例 #12
0
def thindiskcurve_fitter(xsep,
                         velo,
                         error=None,
                         mguess=20 * u.M_sun,
                         rinner=20 * u.au,
                         router=50 * u.au,
                         fixedmass=False,
                         conf_int=False,
                         **kwargs):

    parameters = lmfit.Parameters()
    parameters.add(
        'mass',
        value=u.Quantity(mguess, u.M_sun).value,
        min=min([10, mguess.value]),
        max=25,
        vary=not fixedmass,
    )
    parameters.add('rinner',
                   value=u.Quantity(rinner, u.au).value,
                   min=3,
                   max=50)
    parameters.add('delta', value=20, min=10, max=50)
    parameters.add('router',
                   value=u.Quantity(router, u.au).value,
                   min=20,
                   max=100,
                   expr='rinner+delta')
    parameters.add('vcen', value=vcen.value, min=3.5, max=7.5)

    fcn_kws = kwargs
    fcn_kws.update({
        'xsep': u.Quantity(xsep, u.au),
        'velo': u.Quantity(velo, u.km / u.s),
        'error': error
    })

    minimizer = lmfit.Minimizer(thindiskcurve_residual,
                                parameters,
                                epsfcn=0.005,
                                fcn_kws=fcn_kws)

    result = minimizer.minimize()

    result.params.pretty_print()

    if fixedmass:
        assert parameters['mass'].value == mguess.value

    if conf_int:
        lmfit.report_fit(result.params, min_correl=0.5)

        ci, trace = lmfit.conf_interval(minimizer,
                                        result,
                                        sigmas=[1, 2],
                                        trace=True,
                                        verbose=False)
        lmfit.printfuncs.report_ci(ci)
        return result, ci, trace, minimizer

    return result
コード例 #13
0
ファイル: fit.py プロジェクト: john-livingston/transitfit
    def fit_map(self, lm_prefit=True, verbose=True, guess_t0=False):

        init_params = self.init_params
        par = get_par(get_theta(init_params), self.sm)
        if guess_t0:
            par['t0'].value = self.args[0].mean()
        args = self.args
        lm_logprob = -np.inf

        if lm_prefit:

            par['k'].vary = False
            par['r'].vary = False
            par['ls'].vary = False
            par['q1'].vary = False
            par['q2'].vary = False
            par['p'].vary = False

            res = lmfit.minimize(residual_lm, par, args=args[:4])
            if res.success:
                self.map_par = res.params.copy()
                print("Initial L-M least squares fit successful")
                if verbose:
                    print(lmfit.report_fit(res.params, show_correl=False))
                print("Transit depth: {0:.0f} [ppm]".format(
                    res.params['k'].value**2 * 1e6))
                print("Transit duration: {0:.2f} [h]".format(self.t14 * 24))
                par = res.params.copy()
                lm_logprob = logprob_lm(par, *args)
                print("Log-probability: {}".format(lm_logprob))
            else:
                print("Initial L-M least squares fit failed")
            par['t0'].vary = False
            par['ls'].vary = True

        mini = lmfit.Minimizer(lambda *x: -logprob_lm(*x),
                               par,
                               fcn_args=args,
                               nan_policy='propagate')
        try:
            res = mini.minimize(method='nelder-mead')
        except:
            print("Nelder-Mead failed, attempting Powell minimization")
            res = mini.minimize(method='powell')

        if verbose:
            print(res.success)
            print(lmfit.report_fit(res.params))
            print("Transit depth: {0:.0f} [ppm]".format(
                res.params['k'].value**2 * 1e6))
            print("Transit duration: {0:.2f} [h]".format(self.t14 * 24))
            map_logprob = logprob_lm(res.params, *args)
            print("Log-probability: {}".format(lm_logprob))
        if not res.success:
            print("WARNING: fit unsuccessful")
            self.map_par = par
        else:
            self.map_par = res.params.copy()

        self.map_par['t0'].vary = True
        self.map_par['k'].vary = True
        self.map_par['r'].vary = True
        self.map_par['q1'].vary = True
        self.map_par['q2'].vary = True
        self.map_par['p'].vary = True
コード例 #14
0
ファイル: fit.py プロジェクト: john-livingston/transitfit
    def fit_mcmc(self,
                 steps=1000,
                 nwalkers=100,
                 two_stage=False,
                 nproc=1,
                 use_priors='all',
                 verbose=True,
                 vary_per=True):

        if self.map_par is not None:
            par = self.map_par.copy()
        else:
            par = get_par(get_theta(self.init_params), self.sm)

        if not vary_per:
            par['p'].vary = False

        theta = [v for k, v in par.items() if par[k].vary]
        ndim = len(theta)

        pos0 = sample_ball(theta, [1e-4] * ndim, nwalkers)

        args = copy.deepcopy(self.args)
        if use_priors == 'none':
            args[-1] = {}
        elif type(use_priors) is list or type(use_priors) is tuple:
            for prior in 'ld per t0 rho'.split():
                if prior not in use_priors:
                    args[-1].pop(prior)
        elif type(use_priors) == dict:
            args[-1] = use_priors

        mini = lmfit.Minimizer(logprob_lm, par, fcn_args=args)
        if two_stage:
            print("Running stage 1 MCMC (250 steps)...")
            res = mini.emcee(burn=0,
                             steps=250,
                             thin=1,
                             pos=pos0,
                             workers=nproc)
            highest_prob = np.argmax(res.lnprob)
            hp_loc = np.unravel_index(highest_prob, res.lnprob.shape)
            theta = res.chain[hp_loc]
            pos0 = sample_ball(theta, [1e-5] * ndim, nwalkers)

        print("Running production MCMC for {} steps...".format(steps))
        res = mini.emcee(burn=0, steps=steps, thin=1, pos=pos0, workers=nproc)
        self.res_mcmc = res

        highest_prob = np.argmax(res.lnprob)
        hp_loc = np.unravel_index(highest_prob, res.lnprob.shape)
        self.map_soln = res.chain[hp_loc]

        par = res.params.copy()
        par_vary = [k for k, v in par.items() if par[k].vary]
        for k, v in zip(par_vary, self.map_soln):
            par[k].set(v)

        self.map_par = par
        nbv = self.sm.nbv
        self.sm.parameter_vector = self.map_soln[-nbv:]

        if not self.map_par['p'].vary:
            par = self.map_par.copy()
            par_names = list(par.valuesdict().keys())
            idx = par_names.index('p')
            self.best = list(self.map_soln[:idx]) + [par['p'].value] + list(
                self.map_soln[idx:])
        else:
            self.best = self.map_soln

        if verbose:
            print(lmfit.report_fit(res.params, show_correl=False))
コード例 #15
0
        p['p4'] * phase_hst**3. + p['p5'] * phase_hst**4. + 1.0
    )  #Simple transit model is baseline flux X transit model
    return model


def model_fine(p):
    params.rp = p['rprs'].value  # Set Batman rprs to new fit rprs
    params.t0 = p['t0'].value  # Set Batman rprs to new fit rprs
    m = batman.TransitModel(params, t_fine)  #initializes model
    transit_mjd = m.light_curve(params)  #Calculates Fine-Grid Tranist model
    model_fine = transit_mjd * p['f0']
    return model_fine


# create Minimizer
mini = lmfit.Minimizer(residual, p, nan_policy='omit')
# first solve with Nelder-Mead
#out1 = mini.minimize(method='Nelder')
# then solve with Levenberg-Marquardt using the
# Nelder-Mead solution as a starting point
# https://lmfit.github.io/lmfit-py/fitting.html
result = mini.minimize(method='leastsq')
#result = mini.minimize(method='leastsq', params=out1.params)
print(dir(result))  # To View All Atributes of the
print("redchi", result.redchi)
print("chi2", result.chisqr)
print("nfree", result.nfree)
print("bic", result.bic)
print("aic", result.aic)
print("L-M FIT Variable")
print(lmfit.fit_report(result.params))
コード例 #16
0
def main():

    breaks = np.array(
        [14.3076, 14.5415, 14.5760, 14.5889, 14.6051, 14.4015, 14.5930])
    breaks_e = np.array(
        [0.0002, 0.0005, 0.0004, 0.0003, 0.0005, 0.0001, 0.0003])

    slope1s = np.array([0.0006, 0.0317, 0.0475, 0.0116, 0.0297, 0.0458])
    slope1s_e = np.array([0.0001, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001])

    slope2s = np.array(
        [0.2105, 0.1538, 0.2799, 0.2245, 0.1853, 0.2244, 0.2043])
    slope2s_e = np.array(
        [0.0001, 0.0003, 0.0005, 0.0004, 0.0005, 0.0001, 0.0004])

    print(np.mean(breaks), np.sqrt(np.std(breaks)**2 + np.sum(breaks_e**2)))
    print(np.mean(slope1s), np.sqrt(np.std(slope1s)**2 + np.sum(slope1s_e**2)))
    print(np.mean(slope2s), np.sqrt(np.std(slope2s)**2 + np.sum(slope2s_e**2)))

    # slope1_e = np.array([0.001398, 0.001097, 0.001361, 0.000996, 0.001060, 0.001053, 0.001044])
    # slope1s = np.array([0.273295, 0.118976, 0.135172, 0.152629, 0.078017, 0.249216, 0.141438])
    # print(np.mean(slopes), np.sqrt(np.std(slopes)**2 + np.sum(slope_e**2)))
    # print(np.std(slopes))
    # exit()

    root_dir = "../data/"
    # OBs = ["OB1_stitched", "OB3_stitched", "OB4_stitched", "OB5_stitched", "OB6_stitched", "OB7_stitched", "OB8_stitched"]
    # OBs = ["OB1_stitched", "OB3_stitched"]
    OBs = ["weighted_spectrum"]
    fig, (ax1, ax2) = pl.subplots(nrows=2,
                                  ncols=1,
                                  sharex=True,
                                  gridspec_kw={
                                      'height_ratios': [3, 1],
                                      'hspace': 0
                                  })

    amp = []
    slope = []
    for ii in range(len(OBs)):

        data = np.genfromtxt("../data/%s.dat" % OBs[ii])
        wl, flux, error = data[:, 0], data[:, 1], data[:, 2]
        mask = ((wl > 11600) & (wl < 22500)) | ((wl > 5650) &
                                                (wl < 9800)) | ((wl > 3200) &
                                                                (wl < 5550))
        flux[((wl > 13300) & (wl < 15100)) | ((wl > 17400) &
                                              (wl < 20500))] = None
        mask = (mask & ~np.isnan(flux))  # & (flux/error > 30)
        # tell_mask = ((wl > 13300) & (wl < 15100)) | ((wl > 17400) & (wl < 20000))
        wl, flux, error = wl[mask], flux[mask], error[mask]

        nu = (wl * u.angstrom).to(u.Hz, equivalencies=u.spectral())
        f_nu = (flux * u.erg * u.cm**-2 * u.s**-1 * u.angstrom**-1).to(
            u.erg * u.cm**-2 * u.s**-1 * u.Hz**-1,
            equivalencies=u.spectral_density(wl * u.angstrom))
        # f_nu_e = (error * u.erg / u.cm**2 / u.s / u.angstrom).to(u.erg / u.cm**2 / u.s / u.Hz, equivalencies=u.spectral_density(wl * u.angstrom))
        f_nu_el = ((flux - error) * u.erg * u.cm**-2 * u.s**-1 *
                   u.angstrom**-1).to(u.erg * u.cm**-2 * u.s**-1 * u.Hz**-1,
                                      equivalencies=u.spectral_density(
                                          wl * u.angstrom))
        f_nu_eh = ((flux + error) * u.erg * u.cm**-2 * u.s**-1 *
                   u.angstrom**-1).to(u.erg * u.cm**-2 * u.s**-1 * u.Hz**-1,
                                      equivalencies=u.spectral_density(
                                          wl * u.angstrom))
        f_nu_e = np.mean(
            [f_nu.value - f_nu_el.value, f_nu_eh.value - f_nu.value], axis=0)

        ax1.errorbar(np.log10(nu.value),
                     np.log10(f_nu.value),
                     yerr=[
                         np.log10(f_nu.value) - np.log10(f_nu_el.value),
                         np.log10(f_nu_eh.value) - np.log10(f_nu.value)
                     ],
                     fmt=".",
                     color="black",
                     alpha=0.2,
                     rasterized=True)
        # pl.plot(np.log10(nu.value), medfilt(np.log10(f_nu_e.value), 21))
        # pl.show()
        # exit()
        NUV = np.genfromtxt("../data/crabNUV.txt")
        NUV_wl, NUV_flux = NUV[50:-20, 0], medfilt(NUV[50:-20, 1], 1)
        dust_ext = correct_for_dust(NUV_wl, 0.52)

        NUV_nu = (NUV_wl * u.angstrom).to(u.Hz, equivalencies=u.spectral())
        NUV_f_nu = (dust_ext * NUV_flux * u.erg * u.cm**-2 * u.s**-1 *
                    u.angstrom**-1).to(u.erg * u.cm**-2 * u.s**-1 * u.Hz**-1,
                                       equivalencies=u.spectral_density(
                                           NUV_wl * u.angstrom))

        ax1.plot(np.log10(NUV_nu.value),
                 np.log10(NUV_f_nu.value),
                 zorder=1,
                 rasterized=True)

        # pl.show()

        # pl.show()
        # exit()

        mask = (flux <= 1e-16)  #| (flux/error < 10) #| (wl > 10000)
        x, y, yerr = nu.value[~mask][::1], f_nu.value[~mask][::1], f_nu_e[
            ~mask][::1]
        logx = np.log10(x)
        logx_n = np.arange(12, 16, 0.001)[::-1]
        # print(logx, logx_n)
        # exit()
        logy = np.log10(y)
        logyerr = np.log10(np.array(y) + np.array(yerr)) - logy

        # print(logx)
        # exit()

        p = lmfit.Parameters()
        #           (Name,  Value,  Vary,   Min,  Max,  Expr)
        p.add_many(('amp_pow', -40, True, -np.inf, np.inf),
                   ('break_pos', 14.5, True, 14, 15),
                   ('slope1_pow', 0.27, True, 0, 1),
                   ('slope2_pow', 0.27, True, 0, 1))

        mi = lmfit.minimize(residual,
                            p,
                            method='leastsq',
                            args=(logx, logy, logyerr))
        print(lmfit.report_fit(mi))
        exit()

        # print(lmfit.report_fit(mi.params))
        # ax1.plot(logx, residual(mi.params, logx), lw = 3, zorder=2)
        # pl.ylim((-1e-18, 1e-16))
        # pl.show()
        # exit()

        def lnprob(pars):
            """
            This is the log-likelihood probability for the sampling.
            """
            model = residual(pars, logx)
            return -0.5 * np.sum((
                (model - logy) / logyerr)**2 + np.log(2 * np.pi * logyerr**2))

        mini = lmfit.Minimizer(lnprob, mi.params)

        nwalkers = 100
        v = mi.params.valuesdict()
        res = mini.emcee(nwalkers=nwalkers,
                         burn=100,
                         steps=1000,
                         thin=1,
                         ntemps=20,
                         is_weighted=True,
                         params=mi.params,
                         seed=12345)
        print(mini.sampler.thermodynamic_integration_log_evidence())

        for i in ["amp_pow", "break_pos", "slope1_pow", "slope2_pow"]:
            mcmc = np.percentile(res.flatchain[i], [16, 50, 84])
            q = np.diff(mcmc)
            print(mcmc[1], q[0], q[1])

        p_lo, p_hi = 50 - 99.9999999 / 2, 50 + 99.9999999 / 2

        out_arr = add_powerlaws(res.flatchain, logx_n)
        # out_arr = out_arr/np.median(out_arr, axis=0)
        ax1.plot(logx_n,
                 np.percentile(out_arr, 50, axis=0),
                 lw=2,
                 zorder=10,
                 color="firebrick",
                 linestyle="dashed",
                 rasterized=True)
        ax1.fill_between(logx_n,
                         np.percentile(out_arr, p_lo, axis=0),
                         np.percentile(out_arr, p_hi, axis=0),
                         alpha=0.5,
                         zorder=9,
                         color="firebrick",
                         rasterized=True)

        bf = np.percentile(out_arr, 50, axis=0)
        f = interpolate.interp1d(logx_n,
                                 bf,
                                 bounds_error=False,
                                 fill_value=np.nan)

        ax2.errorbar(np.log10(nu.value),
                     np.log10(f_nu.value) - f(np.log10(nu.value)),
                     yerr=[
                         np.log10(f_nu.value) - np.log10(f_nu_el.value),
                         np.log10(f_nu_eh.value) - np.log10(f_nu.value)
                     ],
                     fmt=".",
                     color="black",
                     alpha=0.2,
                     rasterized=True)
        ax2.plot(np.log10(NUV_nu.value),
                 medfilt(
                     np.log10(NUV_f_nu.value) - f(np.log10(NUV_nu.value)), 11),
                 zorder=1,
                 rasterized=True)

        ax2.plot(logx_n,
                 bf - bf,
                 lw=2,
                 zorder=10,
                 color="firebrick",
                 linestyle="dashed",
                 rasterized=True)
        ax2.fill_between(logx_n,
                         np.percentile(out_arr, p_lo, axis=0) - bf,
                         np.percentile(out_arr, p_hi, axis=0) - bf,
                         alpha=0.5,
                         zorder=9,
                         color="firebrick",
                         rasterized=True)

    # print(out_arr.shape)
    ax1.set_xlim(14, 15.3)
    ax1.set_ylim(-25.99, -25.01)
    ax2.set_ylim(-0.05, 0.10)

    ax2.set_xlabel(r"$\log (\nu/\mathrm{Hz})$")
    # ax1.set_ylabel(r'$\log \mathrm{F}_\nu$ [$\mathrm{erg}~\mathrm{s}^{-1}~\mathrm{cm}^{-2}\mathrm{Hz}^{-1}$]')
    ax1.set_ylabel(
        r'$\log (F_\nu / \mathrm{erg}~\mathrm{s}^{-1}~\mathrm{cm}^{-2}~\mathrm{\AA}^{-1}$)'
    )

    ax2.set_ylabel(r'$\delta \log F_\nu$')

    # Add wavlength axis

    ax3 = ax1.twiny()

    # get axis limits
    xmin, xmax = ax1.get_xlim()
    ax3.set_xlim((xmin, xmax))

    def co(angs):
        return (np.log10(3e18 / (angs)))

    nu_arr = np.array([2000, 3000, 5000, 9000, 15000, 25000])
    ax3.set_xticks(co(nu_arr))
    ax3.set_xticklabels(nu_arr)

    ax3.set_xlabel(r"$ \lambda_{\mathrm{obs}}/\mathrm{\AA}$")

    pl.tight_layout()
    pl.savefig("../figures/power_broken_law.pdf")

    # pl.show()
    pl.clf()

    import corner
    corner.corner(res.flatchain,
                  labels=[
                      "k", r"$\nu_\mathrm{break}$", r"$\alpha_{\nu,1}$",
                      r"$\alpha_{\nu,2}$"
                  ],
                  quantiles=[0.16, 0.5, 0.84],
                  show_titles=True)
    pl.savefig("../figures/Cornerplot_broken_powerlaw.pdf", clobber=True)
コード例 #17
0
x = np.linspace(1, 10, 250)
np.random.seed(0)
y = 3.0 * np.exp(-x / 2) - 5.0 * np.exp(
    -(x - 0.1) / 10.) + 0.1 * np.random.randn(x.size)

p = lmfit.Parameters()
p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3.))


def residual(p):
    return p['a1'] * np.exp(-x / p['t1']) + p['a2'] * np.exp(
        -(x - 0.1) / p['t2']) - y


# create Minimizer
mini = lmfit.Minimizer(residual, p, nan_policy='propagate')

# first solve with Nelder-Mead algorithm
out1 = mini.minimize(method='Nelder')

# then solve with Levenberg-Marquardt using the
# Nelder-Mead solution as a starting point
out2 = mini.minimize(method='leastsq', params=out1.params)

lmfit.report_fit(out2.params, min_correl=0.5)

ci, trace = lmfit.conf_interval(mini, out2, sigmas=[1, 2], trace=True)
lmfit.printfuncs.report_ci(ci)

# plot data and best fit
plt.figure()
コード例 #18
0
def GaussianFit(nxs_filename,
                recording_dir,
                xLabel='',
                yLabel='',
                verbose=False):
    '''
    Fit sensor_x vs sensor_y with a Gaussian.

    Parameters
    ----------
    nxs_filename : str
        nexus filename
    recording_dir : str
        directory where the nexus file is stored
    xLabel : str
        exact name of the x sensor, as it appears in the data stamps
    yLabel : str
        exact name of the y sensor, as it appears in the data stamps        
    verbose : bool, optional
        verbose mode

    Returns
    -------
    array_like
        xData, an array containing the list of x values
    array_like
        yData, an array containing the list of y values
    array_like
        yFit, an array containing the list of fitted y values

    Raises
    ------
    SystemExit('Nexus not found')
        when Nexus file not found
    SystemExit('Sensor not found')
        when sensor not found
    '''

    xData, yData = Extract(nxs_filename,
                           recording_dir,
                           xLabel,
                           yLabel,
                           show_data_stamps=False,
                           verbose=verbose)

    # Create the graph
    fig = plt.figure(1, figsize=(12, 5))
    ax = fig.add_subplot(111)

    # Fit the data using LMFIT
    # Define the parameters and first guess
    fitparams = lm.Parameters()
    nbpts = xData.shape[0]
    B = (yData[nbpts - 1] - yData[0]) / (xData[nbpts - 1] - xData[0])
    A = yData[nbpts - 1] - B * xData[nbpts - 1]
    fitparams.add_many(
        ('Linear_Cste', A, True, -np.inf, yData.max() * 1.0, None),
        ('Linear_Coeff', B, True, -10 * B, 10 * B, None),
        ('Amplitude', yData.max(), True, 0.0, yData.max() * 1.1, None),
        ('sigma', np.abs(xData.max() - xData.min()) / 3., True, 0.0,
         xData.max() - xData.min(), None),
        ('mu', (xData.min() + xData.max()) / 2., True, xData.min(),
         xData.max(), None),
    )

    # Fit initialisation and fit
    fitter = lm.Minimizer(ResidualsNormFunction,
                          fitparams,
                          fcn_args=(xData, yData))
    result = fitter.minimize()

    # Print result if asked via verbose
    if verbose:
        print(lm.fit_report(result))

    yFit = NormalFunction(xData, result.params['mu'], result.params['sigma'],
                          result.params['Linear_Cste'],
                          result.params['Linear_Coeff'],
                          result.params['Amplitude'])

    # Plot first guess
    #ax.plot(xData,NormalFunction(xData,fitparams['mu'], fitparams['sigma'], fitparams['Linear_Cste'], fitparams['Linear_Coeff'], fitparams['Amplitude']), 'k--', lw=1)

    # plot the fitted data
    ax.plot(xData,
            yData,
            'o-',
            label=nxs_filename[nxs_filename.rfind('_') +
                               1:nxs_filename.rfind('.')])
    # plot the fit result
    ax.plot(xData, yFit, 'r-', lw=2)
    ax.legend(fontsize=16)
    ax.set_xlabel(xLabel, fontsize=16)
    ax.set_ylabel(yLabel, fontsize=16)
    ax.tick_params(labelsize=16)
    ax.yaxis.offsetText.set_fontsize(16)
    ax.text(xData.min() * 1.05,
            yData.max() * 0.75,
            'Center %3.4g' % (result.params['mu']),
            fontsize=14)
    ax.text(xData.min() * 1.05,
            yData.max() * 0.65,
            'FWHM %3.4g' %
            (2.0 * np.sqrt(2.0 * np.log(2.)) * result.params['sigma']),
            fontsize=14)
    ax.set_title('Gaussian Fit', fontsize=14)

    plt.show()

    return xData, yData, yFit
コード例 #19
0
    def run(
        self, fit_method="leastsq", fixpars=None, use_previous=True, **fit_kws
    ):

        # initial parameters, if doesnt exist, use default
        if self.optres is None:
            init_pars = self.params.copy()
        else:
            # Otherwise, use previous fits. Useful for MCMC
            init_pars = self.optres.params.copy()

        # do "reasonable" initial guesses
        y_at_xmax = self.y[np.argmax(self.x)].mean()
        y_at_xmin = self.y[np.argmin(self.x)].mean()
        init_pars["Kd"].value = geometric_mean(self.x)
        init_pars["offset"].value = y_at_xmin

        if "alpha" in init_pars.keys():
            if self.modality == "TPF":
                init_pars["alpha"].value = (y_at_xmax - y_at_xmin) / y_at_xmin
            elif self.modality == "SHG":
                init_pars["alpha"].value = (
                    np.sqrt((y_at_xmax - y_at_xmin) / 100.0 + 1) - 1.0
                )

        if "Amplitude" in init_pars.keys():
            init_pars["Amplitude"].value = y_at_xmax - y_at_xmin

        # if 'fixpars' is passed, then the parameters will be fixed with given value
        if fixpars is not None:
            if isinstance(fixpars, dict):
                for par_name, par_value in fixpars.items():
                    if par_name in init_pars.keys():
                        init_pars[par_name].value = par_value
                        init_pars[par_name].vary = False
                    else:
                        raise KeyError(
                            "Parameter {:s} is not recognized.".format(par_name)
                        )
            elif fixpars == "all":
                for par_name in init_pars.keys():
                    init_pars[par_name].vary = False
            else:
                raise ValueError("fixpars argument is not recognized.")

        else:
            # if no parameters are fixed, check if using previous result
            if use_previous and self.optres is not None:
                init_pars = self.optres.params.copy()

        if self.valid:
            # if the model if valid, try to do the fitting
            try:
                if fit_method == "emcee":
                    # if using emcee, check if residual is weighted by known
                    # sigmas (passed to object as 'sy')
                    emceepars = {"is_weighted": self.weighted}
                    minpars = {**fit_kws, **emceepars}
                else:
                    minpars = fit_kws

                self.minimizer = lmfit.Minimizer(
                    userfcn=self.fcn,
                    params=init_pars,
                    fcn_args=(self.x,),
                    fcn_kws={
                        "data": self.y,
                        "sigma": self.sy,
                        "modality": self.modality,
                    },
                )

                self.optres = self.minimizer.minimize(
                    method=fit_method, **minpars
                )

            except ValueError:
                print("####### Oops something happened here. DEBUG #######")
                print("Initial parameters")
                for par_name, par in init_pars.items():
                    print(par_name, par.value)
                raise

            self.model = self._form_model(
                self.optres.params, modality=self.modality
            )

            # use self.model to form ideal/fitted curve
            self._form_ideal()

        else:
            # invalid model is not fitted, so all parameters are fixed
            # because we still need to run the minimizer to get result
            init_pars = self.params.copy()
            for par_name in init_pars.keys():
                init_pars[par_name].vary = False

            self.minimizer = lmfit.Minimizer(
                fcn=self.fcn,
                params=init_pars,
                fcn_args=(self.x,),
                fcn_kws={
                    "data": self.y,
                    "sigma": self.sy,
                    "modality": self.modality,
                },
            )

            self.optres = self.minimizer.minimize(method=fit_method, **fit_kws)

            for param_key in self.optres.params.keys():
                self.optres.params[param_key].value = np.nan
コード例 #20
0
def GaussianRepartitionFit(nxs_filename,
                           recording_dir,
                           xLabel='',
                           yLabel='',
                           verbose=False):
    '''
    Fit sensor_x vs sensor_y with an Erf function.

    Parameters
    ----------
    nxs_filename : str
        nexus filename
    recording_dir : str
        directory where the nexus file is stored
    xLabel : str
        exact name of the x sensor, as it appears in the data stamps
    yLabel : str
        exact name of the y sensor, as it appears in the data stamps        
    verbose : bool, optional
        verbose mode

    Returns
    -------
    array_like
        xData, an array containing the list of x values
    array_like
        yData, an array containing the list of y values
    array_like
        yFit, an array containing the list of fitted y values

    Raises
    ------
    SystemExit('Nexus not found')
        when Nexus file not found
    SystemExit('Sensor not found')
        when sensor not found
    '''

    xData, yData = Extract(nxs_filename,
                           recording_dir,
                           xLabel,
                           yLabel,
                           show_data_stamps=False,
                           verbose=verbose)

    # Find the decrease direction
    if xData[0] < xData[xData.shape[0] - 1]:
        if yData[0] < yData[xData.shape[0] - 1]:
            sens = 1.
        else:
            sens = -1.
    else:
        if yData[0] > yData[xData.shape[0] - 1]:
            sens = -1.
        else:
            sens = 1.

    # Fit the data using LMFIT
    # Define the parameters and first guess
    fitparams = lm.Parameters()
    fitparams.add_many(
        ('Constant_Coeff', yData.min(), True, yData.min() * 0.9,
         yData.max() * 1.1, None),
        ('Amplitude', yData.max() - yData.min(), True, 0.0, yData.max() * 1.1,
         None),
        ('sigma', np.abs(xData.max() - xData.min()) / 3., True, 0.0,
         xData.max() - xData.min(), None),
        ('mu', (xData.min() + xData.max()) / 2., True, xData.min(),
         xData.max(), None),
    )

    # Fit initialisation and fit
    fitter = lm.Minimizer(ResidualsNormRepFunction,
                          fitparams,
                          fcn_args=(xData, yData, sens))
    result = fitter.minimize()
    # Print result if asked via verbose
    if verbose:
        print(lm.fit_report(result))

    yFit = NormRepFunction(xData, result.params['mu'], result.params['sigma'],
                           result.params['Amplitude'],
                           result.params['Constant_Coeff'], sens)

    # Create the graph
    fig = plt.figure(1, figsize=(12, 5))
    ax = fig.add_subplot(111)

    # Plot first guess
    #ax.plot(xData,NormRepFunction(xData,fitparams['mu'], fitparams['sigma'], fitparams['Amplitude'], fitparams['Constant_Coeff'], sens), 'k--', lw=1)

    # Plot the fitted data
    ax.plot(xData,
            yData,
            'o-',
            label=nxs_filename[nxs_filename.rfind('_') +
                               1:nxs_filename.rfind('.')])
    # Plot the fit result
    ax.plot(xData, yFit, 'r-', lw=2)

    # Plot the associated gaussian function
    ax2 = ax.twinx()
    ax2.plot(xData,
             NormalFunction(xData, result.params['mu'], result.params['sigma'],
                            0.0, 0.0, result.params['Amplitude']),
             'b-',
             lw=1)
    ax.legend(fontsize=16)
    ax.set_xlabel(xLabel, fontsize=16)
    ax.set_ylabel(yLabel, fontsize=16)
    ax.tick_params(labelsize=16)
    ax.yaxis.offsetText.set_fontsize(16)
    ax2.yaxis.offsetText.set_fontsize(16)
    ax2.tick_params(labelsize=16)
    if sens == 1:
        fig.text(0.2,
                 0.65,
                 'Center %3.4g' % (result.params['mu']),
                 fontsize=14)
        fig.text(0.2,
                 0.60,
                 'FWHM %3.4g' %
                 (2.0 * np.sqrt(2.0 * np.log(2.)) * result.params['sigma']),
                 fontsize=14)
    else:
        fig.text(0.7,
                 0.60,
                 'Center %3.4g' % (result.params['mu']),
                 fontsize=14)
        fig.text(0.7,
                 0.55,
                 'FWHM %3.4g' %
                 (2.0 * np.sqrt(2.0 * np.log(2.)) * result.params['sigma']),
                 fontsize=14)
    ax.set_title('Normal Repartition Function Fit', fontsize=14)

    plt.show()

    return xData, yData, yFit
コード例 #21
0
def bayes_fit(xdata, ydata, distribution, burn=100, steps=1000, thin=20):
    """Identify and fit an arbitrary number of peaks in a 1-d spectrum array.

    Parameters
    ----------
    xdata : 1-d array
        X data.

    ydata : 1-d array
        Y data.

    Returns
    -------
    results : lmfit.MinimizerResults.
        results of the fit. To get parameters, use `results.params`.
    """
    # Identify peaks
    index = find_peaks_cwt(ydata, widths=np.arange(1, 100))

    # Number of peaks
    n_peaks = len(index)

    # Construct initial guesses
    parameters = lmfit.Parameters()

    for peak_i in range(n_peaks):
        idx = index[peak_i]

        # Add center parameter
        parameters.add(name='peak_{}_center'.format(peak_i), value=xdata[idx])

        # Add height parameter
        parameters.add(name='peak_{}_height'.format(peak_i), value=ydata[idx])

        # Add width parameter
        parameters.add(
            name='peak_{}_width'.format(peak_i),
            value=.1,
        )

    # Minimize the above residual function.
    ML_results = lmfit.minimize(residual,
                                parameters,
                                args=[distribution, xdata],
                                kws={'ydata': ydata})

    # Add a noise term for the Bayesian fit
    ML_results.params.add('noise', value=1, min=0.001, max=2)

    # Define the log probability expression for the emcee fitter
    def lnprob(params=ML_results.params):
        noise = params['noise']
        return -0.5 * np.sum((residual(params, distribution, xdata, ydata) /
                              noise)**2 + np.log(2 * np.pi * noise**2))

    # Build a minizer object for the emcee search
    mini = lmfit.Minimizer(lnprob, ML_results.params)

    # Use the emcee version of minimizer class to perform MCMC sampling
    bayes_results = mini.emcee(burn=burn,
                               steps=steps,
                               thin=thin,
                               params=ML_results.params)

    return bayes_results, parameters
コード例 #22
0
ファイル: doc_fitting_emcee.py プロジェクト: yxs33/lmfit-py
    plt.plot(x, y, 'b')
    plt.plot(x, residual(mi.params) + y, 'r')
    # plt.savefig('../doc/_images/emcee_dbl_exp2.png')
    plt.show()

# add a noise parameter
mi.params.add('noise', value=1, min=0.001, max=2)


def lnprob(p):
    noise = p['noise']
    return -0.5 * np.sum((residual(p) / noise)**2 +
                         np.log(2 * np.pi * noise**2))


mini = lmfit.Minimizer(lnprob, mi.params)
res = mini.emcee(burn=300, steps=1000, thin=20, params=mi.params)

if HASPYLAB and HASCORNER:
    emcee_corner = corner.corner(res.flatchain,
                                 labels=res.var_names,
                                 truths=list(res.params.valuesdict().values()))
    # emcee_corner.savefig('../doc/_images/emcee_corner.png')
    plt.show()

print("\nmedian of posterior probability distribution")
print('--------------------------------------------')
lmfit.report_fit(res.params)

# find the maximum likelihood solution
highest_prob = np.argmax(res.lnprob)
コード例 #23
0
def LMminimizer(guess_dict: dict, data_dict: dict, method: str = 'leastsq', hops: int = 10,
                steps: int = 1000, walkers: int = 100, burn: int = 100, thin: int = 20,
                as_weight: float = None,
                lock_g: bool = None, lock_q: bool = None):
    """
    Minimizes the provided data to a binary star model, with initial provided guesses and a search
    radius
    :param as_weight: weight to give to the astrometric data, optional.
    :param hops: int designating the number of hops if basinhopping is selected
    :param method: string to indicate what method to be used, 'leastsq' or 'bqsinhopping' or 'emcee'
    :param guess_dict: dictionary containing guesses and 'to-vary' flags for the 11 parameters
    :param data_dict: dictionary containing observational data of RV and/or separations
    :param steps: integer giving the number of steps each walker in the MCMC should perform
    :param walkers: integer giving the number of independent walkers to be running
    :param burn: integer giving the number of samples to be discarded ("burned") at the start
    :param thin: integer indicating to accept only 1 every thin samples
    :param lock_g: boolean to indicate whether to lock gamma1 to gamma2
    :param lock_q: boolean to indicate whether to lock k2 to k1/q, and that q is supplied rather
    than k2 in that field
    :return: result from the lmfit minimization routine. It is a MinimizerResult object.
    """

    # setup data for the solver
    rv1s = None
    rv2s = None
    aas = None
    # we need to store this on module level so the function to minimize knows quickly which data is
    # included or not
    global RV1, RV2, AS
    RV1 = RV2 = AS = False
    global LAS, LRV
    LAS = LRV = 0
    if 'RV1' in data_dict:
        rv1s = data_dict['RV1']
        RV1 = True
        LRV = len(data_dict['RV1'])
    if 'RV2' in data_dict:
        rv2s = data_dict['RV2']
        RV2 = True
        LRV += len(data_dict['RV2'])
    if 'AS' in data_dict:
        aas = data_dict['AS']
        AS = True
        LAS = 2 * len(data_dict['AS'])
    # setup Parameters object for the solver
    params = lm.Parameters()
    # populate with parameter data
    params.add_many(
        ('e', guess_dict['e'][0], guess_dict['e'][1], 0, 1 - 1e-5),
        ('i', guess_dict['i'][0], guess_dict['i'][1]),
        ('omega', guess_dict['omega'][0], guess_dict['omega'][1]),
        ('Omega', guess_dict['Omega'][0], guess_dict['Omega'][1]),
        ('t0', guess_dict['t0'][0], guess_dict['t0'][1]),
        ('p', guess_dict['p'][0], guess_dict['p'][1], 0),
        ('mt', guess_dict['mt'][0], guess_dict['mt'][1], 0),
        ('d', guess_dict['d'][0], guess_dict['d'][1], 0),
        ('k1', guess_dict['k1'][0], guess_dict['k1'][1], 0),
        ('gamma1', guess_dict['gamma1'][0], guess_dict['gamma1'][1]),
        ('k2', guess_dict['k2'][0], guess_dict['k2'][1], 0),
        ('gamma2', guess_dict['gamma2'][0], guess_dict['gamma2'][1])
    )

    if lock_g:
        params['gamma2'].set(expr='gamma1')
    if lock_q:
        params.add('q', value=params['k1'] / params['k2'], vary=False)
        params['k2'].set(expr='k1/q')

    # put e to a non zero value to avoid conditioning problems in MCMC
    if params['e'].value < 1e-8:
        print('Warning: eccentricity is put to 1e-8 to avoid conditioning issues!')
        params['e'].set(value=1e-8)

    if RV1 and RV2:
        if not AS:
            for key in 'd', 'i', 'Omega', 'mt':
                params[key].set(vary=False)
    elif RV1:
        for key in 'k2', 'gamma2', 'd':
            params[key].set(vary=False)
        if not AS:
            for key in 'i', 'Omega', 'mt':
                params[key].set(vary=False)
    elif AS:
        for key in 'k1', 'gamma1', 'k2', 'gamma2':
            params[key].set(vary=False)
    else:
        raise ValueError('No data supplied! Cannot minimize.')

    # build a minimizer object
    minimizer = lm.Minimizer(fcn2min, params, fcn_args=(rv1s, rv2s, aas, as_weight))
    print('Starting Minimization with {}{}{}...'.format('primary RV data, ' if RV1 else '',
                                                        'secondary RV data, ' if RV2 else '',
                                                        'astrometric data' if AS else ''))
    tic = time.time()
    if method == 'leastsq':
        result = minimizer.minimize()
    elif method == 'basinhopping':
        result = minimizer.minimize(method=method, disp=True, niter=hops, T=5,
                                    minimizer_kwargs={'method': 'Nelder-Mead'})
    elif method == 'emcee':
        localresult = minimizer.minimize()
        mcminimizer = lm.Minimizer(fcn2min, params=localresult.params,
                                   fcn_args=(rv1s, rv2s, aas))
        print('Starting MCMC sampling using the minimized parameters...')
        result = mcminimizer.emcee(steps=steps, nwalkers=walkers, burn=burn, thin=thin)
    else:
        print('method not implemented')
        return
    toc = time.time()
    print('Minimization Complete in {} s!\n'.format(toc - tic))
    lm.report_fit(result.params)
    print('\n')
    rms_rv1, rms_rv2, rms_as = 0, 0, 0
    system = System(result.params.valuesdict())
    if RV1:
        # weigh with number of points for RV1 data
        rms_rv1 = np.sqrt(
            np.sum((system.primary.radial_velocity_of_hjds(rv1s[:, 0]) - rv1s[:, 1]) ** 2) / len(
                rv1s[:, 1]))
    if RV2:
        # Same for RV2
        rms_rv2 = np.sqrt(
            np.sum(
                (system.secondary.radial_velocity_of_hjds(rv2s[:, 0]) - rv2s[:, 1]) ** 2) / len(
                rv2s[:, 1]))
    if AS:
        # same for AS
        omc2E = np.sum((system.relative.east_of_hjds(aas[:, 0]) - aas[:, 1]) ** 2)
        omc2N = np.sum((system.relative.north_of_hjds(aas[:, 0]) - aas[:, 2]) ** 2)
        rms_as = np.sqrt((omc2E + omc2N) / LAS)
    return result, rms_rv1, rms_rv2, rms_as
コード例 #24
0
    def do_emcee(self, fit_keys, models_list, params_list=None, model_kwargs=None, param_kwargs=None, emcee_kwargs=None, **kwargs):
        r"""Run simulatneous MCMC sampling on the temp/pwr data for several
        parameters. Results are stored in either the ``emcee_results`` or
        ``emcee_joint_results`` attribute depending on whether one or multiple
        keys are passed to `fit_keys`.

        Parameters
        ----------
        fit_keys : list-like
            A list of keys that correspond to existing data. Any combination of
            keys from `self.keys()`` is acceptable, but duplicates are not
            permitted.

        models_list : list-like
            A list of fit functions, one per key in `fit_keys`. Function must
            return a residual of the form: ``residual = (model-data)/sigma``
            where ``residual``, ``model``, and ``data`` are all ``numpy``
            arrays. Function signature is ``model_func(params, temps, powers,
            data=None, sigmas=None)``. If ``data==None`` the functions must
            return the model calculated at ``temps`` and ``powers``. The model
            functions should also gracefully handle ``np.NaN`` or ``None``
            values.

        params_list : list-like
            A list of ``lmfit.Parameters`` objects, one for each key in
            `fit_keys`. Parameters sharing the same name will be merged so that
            the fit is truly joint. Alternately, a list of functions that return
            ``lmfit.Parameters`` objects may be passed. In this case, one should
            use `param_kwargs` to pass any needed options to the functions.
            Default is ``None`` and is equivalent to setting ``use_lmfit_params =
            True``.

        model_kwargs : list-like (optional)
            A list of ``dict`` objects to pass to the individual model functions
            as kwargs. ``None`` is also an acceptable entry  if there are no
            kwargs to pass to a model function. Default is ``None.``

        param_kwargs : list-like (optional)
            A list of ``dict`` objects to pass to the individual params
            functions as kwargs. ``None`` is also an acceptable entry  if
            there are no kwargs to pass to a model function. Default is
            ``None.``

        emcee_kwargs : dict (optional)
            Keyword arguments to pass options to the fitter

        Keyword Arguments
        -----------------
        min_temp : numeric
            Lower limit of temperature to fit. Default is 0.

        max_temp : numeric
            Upper limit of temerature to fit. Default is infinity.

        min_pwr : numeric
            Lower limit of temperature to fit. Default is -infinity.

        max_pwr : numeric
            Upper limit of temperature to fit. Default is infinity.

        use_lmfit_params : bool
            Whether or not to use the resulting best-fit ``lmfit.Paramters``
            object that resulted from calling ``ResonatorSweep.do_lmfit()`` as
            the starting value for the MCMC sampler. Default is True.

        raw_data : string {'lmfit', 'emcee', 'mle'}
            Whether to use the values returned by lmfit, or the values returned
            by the emcee fitter (either the 50th percentile or the maximum
            liklihood). This also chooses which set of errorbars to use: either
            those from the lmfit covariance matrix, or those from the 16th and
            84th percentiles of the posterior probablility distribution. Default
            is 'lmfit'.

        Note
        ----
        If the fits are succesful, the resulting fit data (ie the best fit
        surface) will be added to the self dict in the form of a
        ``pandas.DataFrame`` under the following keys:

        For a joint fit (``len(fit_keys) > 1``)::

            'emcee_joint_'+joint_key+'_'+key for each key in fit_keys

        For a single fit (``len(fit_keys) == 1``)::

            'emcee_'+key

        """

        #Figure out which data to fit
        raw_data = kwargs.pop('raw_data', 'lmfit')
        assert raw_data in ['lmfit', 'emcee', 'mle'], "raw_data must be 'lmfit' or 'emcee'."



        #Set some limits
        min_temp = kwargs.pop('min_temp', min(self.tvec))
        max_temp = kwargs.pop('max_temp', max(self.tvec))
        t_filter = (self.tvec >= min_temp) * (self.tvec <= max_temp)

        min_pwr = kwargs.pop('min_pwr', min(self.pvec))
        max_pwr = kwargs.pop('max_pwr', max(self.pvec))
        p_filter = (self.pvec >= min_pwr) * (self.pvec <= max_pwr)



        if params_list is not None:
            assert len(fit_keys) == len(models_list) == len(params_list), "Make sure argument lists match in number."
        else:
            assert len(fit_keys) == len(models_list), "Make sure argument lists match in number."

        #Make some empty dictionaries just in case so we don't break functions
        #by passing None as a kwargs
        if model_kwargs is None:
            model_kwargs = [{}]*len(fit_keys)

        if param_kwargs is None:
            params_kwargs = [{}]*len(fit_keys)

        if emcee_kwargs is None:
            emcee_kwargs = {}


        #Check to see if this should go in the joint_fits dict, and build a key if needed.
        if len(fit_keys) > 1:
            joint_key = '+'.join(fit_keys)
        else:
            joint_key = None


        #If possible (and desired) then we should use the existing best fit as a starting point
        #For the MCMC sampling. If not, build params from whatever is passed in.
        use_lmfit_params = kwargs.pop('use_lmfit_params', True)

        if (params_list is not None) and (use_lmfit_params == False):

            #Check if params looks like a lmfit.Parameters object.
            #If not, assume is function and try to set params by calling it
            for px, p in enumerate(params_list):
                if not hasattr(p, 'valuesdict'):
                    assert params_kwargs[px] is not None, "If passing functions to params, must specfify params_kwargs."
                    params_list[px] = p(**param_kwargs[px])

            #Combine the different params objects into one large list
            #Only the first of any duplicates will be transferred
            merged_params = lf.Parameters()
            if len(params_list) > 1:
                for p in params_list:
                    for key in p.keys():
                        if key not in merged_params.keys():
                            merged_params[key] = p[key]
            else:
                merged_params = params_list[0]

        else:
            if joint_key is not None:
                assert joint_key in self.lmfit_joint_results.keys(), "Can't use lmfit params. They don't exist."
                merged_params = self.lmfit_joint_results[joint_key].params
            else:
                assert fit_keys[0] in self.lmfit_results.keys(), "Can't use lmfit params. They don't exist."
                merged_params = self.lmfit_results[fit_keys[0]].params


        #Get all the possible temperature/power combos into two grids
        ts, ps = np.meshgrid(self.tvec[t_filter], self.pvec[p_filter])

        #Create grids to hold the fit data and the sigmas
        fit_data_list = []
        fit_sigmas_list = []

        #Get the data that corresponds to each temperature power combo and
        #flatten it to match the ts/ps combinations
        #Transposing is important because numpy matrices are transposed from
        #Pandas DataFrames
        for key in fit_keys:

            if raw_data == 'emcee':
                key = key + '_mc'
            elif raw_data == 'mle':
                key = key + '_mle'

            if raw_data in ['emcee', 'mle']:
                err_bars = (self[key+'_sigma_plus_mc'].loc[t_filter, p_filter].values.T+
                            self[key+'_sigma_minus_mc'].loc[t_filter, p_filter].values.T)
            else:
                err_bars = self[key+'_sigma'].loc[t_filter, p_filter].values.T

            fit_data_list.append(self[key].loc[t_filter, p_filter].values.T)
            fit_sigmas_list.append(err_bars)

        #Create a new model function that will be passed to the minimizer.
        #Basically this runs each fit and passes all the residuals back out
        def model_func(params, models, ts, ps, data, sigmas, kwargs):
            residuals = []
            for ix in range(len(fit_keys)):
                residuals.append(models[ix](params, ts, ps, data[ix], sigmas[ix], **kwargs[ix]))

            return np.asarray(residuals).flatten()


        #Create a lmfit minimizer object
        minObj = lf.Minimizer(model_func, merged_params, fcn_args=(models_list, ts, ps, fit_data_list, fit_sigmas_list, model_kwargs))

        #Call the lmfit minimizer method and minimize the residual
        emcee_result = minObj.emcee(**emcee_kwargs)

        #Put the result in the appropriate dictionary
        if joint_key is not None:
            self.emcee_joint_results[joint_key] = emcee_result
        else:
            self.emcee_results[fit_keys[0]] = emcee_result

        #Calculate the best-fit model from the params returned
        #And put it into a pandas DF with the appropriate key.
        #The appropriate key format is: 'lmfit_joint_'+joint_key+'_'+key
        #or, for a single fit: 'lmfit_'+key
        for ix, key in enumerate(fit_keys):
            #Call the fit model without data to have it return the model
            returned_model = models_list[ix](emcee_result.params, ts, ps)

            #Build the appropriate key
            if joint_key is not None:
                new_key = 'emcee_joint_'+joint_key+'_'+key
            else:
                new_key = 'emcee_'+key

            #Make a new dict entry to the self dictioary with the right key.
            #Have to transpose the matrix to turn it back into a DF
            self[new_key] = pd.DataFrame(np.nan, index=self.tvec, columns=self.pvec)
            self[new_key].loc[self.tvec[t_filter], self.pvec[p_filter]] = returned_model.T
コード例 #25
0
y = 3.0 * np.exp(-x / 2) - 5.0 * np.exp(
    -(x - 0.1) / 10.) + 0.1 * np.random.randn(len(x))

p = lmfit.Parameters()
p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3.))


def residual(p):
    return p['a1'] * np.exp(-x / p['t1']) + p['a2'] * np.exp(
        -(x - 0.1) / p['t2']) - y


print y

# create Minimizer
mini = lmfit.Minimizer(residual, p)

# first solve with Nelder-Mead
out1 = mini.minimize(method='Nelder')

# then solve with Levenberg-Marquardt using the
# Nelder-Mead solution as a starting point

out2 = mini.minimize(method='leastsq', params=out1.params)

lmfit.report_fit(out2.params, min_correl=0.5)
ci, trace = lmfit.conf_interval(mini,
                                out2,
                                sigmas=[1, 2],
                                trace=True,
                                verbose=False)
コード例 #26
0
def geom_min(params):
    import pandas
    launcher = ensemble_refine_launcher.RefineLauncher(params)
    df = pandas.read_pickle(params.geometry.input_pkl)
    if params.geometry.first_n is not None:
        df = df.iloc[:params.geometry.first_n]
    if COMM.rank==0:
        print("Will optimize using %d experiments" %len(df))
    launcher.load_inputs(df, refls_key=params.geometry.refls_key)

    # same on every rank:
    det_params = DetectorParameters(params, launcher.panel_groups_refined, launcher.n_panel_groups)

    # different on each rank
    crystal_params = CrystalParameters(launcher.Modelers)
    crystal_params.parameters = COMM.bcast(COMM.reduce(crystal_params.parameters))

    LMP = lmfit.Parameters()
    LMP.add_many(*(crystal_params.parameters + det_params.parameters))
    LMP_index_mapping = {name: i for i, name in enumerate(LMP.keys())}

    for i_shot in launcher.Modelers:
        Modeler = launcher.Modelers[i_shot]
        set_group_id_slices(Modeler, launcher.panel_group_from_id)

    # attached some objects to SIM for convenience
    launcher.SIM.panel_reference_from_id = launcher.panel_reference_from_id
    launcher.SIM.panel_group_from_id = launcher.panel_group_from_id
    launcher.SIM.panel_groups_refined = launcher.panel_groups_refined

    # compute gradients, depending on the refinement method
    do_grads = params.geometry.optimize_method == "lbfgsb"
    if not do_grads:
        assert params.geometry.optimize_method == "nelder"

    # set the GPU device
    launcher.SIM.D.device_Id = COMM.rank % params.refiner.num_devices
    if COMM.rank==0:
        print("Allocating %d pixels on rank %d")
    npx_str = "(rnk%d, dev%d): %d pix" %(COMM.rank, launcher.SIM.D.device_Id, launcher.NPIX_TO_ALLOC)
    npx_str = COMM.gather(npx_str)
    if COMM.rank==0:
        print("How many pixels each rank will allocate for on its device:")
        print("; ".join(npx_str))
    launcher.SIM.D.Npix_to_allocate =  launcher.NPIX_TO_ALLOC

    # configure diffBragg instance for gradient computation
    # TODO: fix flags currently unsupported in lmfit with gradients? One can always "fix" a parameter by
    #       setting the range in DetectorParameters/CrystalParameters to be infinitesimal, e.g. +-1e-10
    if do_grads:
        #if not params.fix.RotXYZ:
        for i_rot in range(3):
            launcher.SIM.D.refine(ROTXYZ_ID[i_rot])
        #if not params.fix.Nabc:
        launcher.SIM.D.refine(hopper_utils.NCELLS_ID)
        #if not params.fix.ucell:
        for i_ucell in range(launcher.SIM.num_ucell_param):
            launcher.SIM.D.refine(hopper_utils.UCELL_ID_OFFSET + i_ucell)
        for i, diffbragg_id in enumerate(PAN_OFS_IDS):
            #if not params.geometry.fix.panel_rotations[i]:
            launcher.SIM.D.refine(diffbragg_id)

        for i, diffbragg_id in enumerate(PAN_XYZ_IDS):
            #if not params.geometry.fix.panel_translations[i]:
            launcher.SIM.D.refine(diffbragg_id)

    # do a barrel roll!
    target = Target()
    fcn_args = [LMP_index_mapping, launcher.Modelers, launcher.SIM, params, do_grads]
    fcn_kws = {}
    lbfgs_kws = {}
    if do_grads:
        lbfgs_kws = {"jac": target.jac,
                    "options":  {"ftol": params.ftol, "gtol": 1e-10, "maxfun":1e5, "maxiter":params.lbfgs_maxiter}}

    minzer = lmfit.Minimizer(userfcn=target, params=LMP, fcn_args=fcn_args, fcn_kws=fcn_kws, iter_cb=target.callbk,
                             scale_covar=False, calc_covar=False)
    result = minzer.minimize(method=params.geometry.optimize_method, params=LMP, **lbfgs_kws)

    if COMM.rank == 0:
        save_opt_det(params, result.params, launcher.SIM)
コード例 #27
0
a, b, t1, t2 = 2, 3, 2, 10  # Real values
y_true = a * np.exp(-x / t1) + b * np.exp(-x / t2)
sigma = 0.02
y = y_true + np.random.randn(x.size) * sigma


def residuals(paras):
    a = paras['a'].value
    b = paras['b'].value
    t1 = paras['t1'].value
    t2 = paras['t2'].value
    return a * np.exp(-x / t1) + b * np.exp(-x / t2) - y


# fit the data with lmfit.
mini = lmfit.Minimizer(residuals, params)
result = mini.leastsq()
lmfit.report_errors(result.params)

# create lnfunc and starting distribution.
lnfunc, guess = create_all(result)
nwalkers, ndim = 30, len(guess)
p0 = emcee.utils.sample_ball(guess, 0.1 * np.array(guess), nwalkers)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnfunc)
steps = 500
sampler.run_mcmc(p0, steps)

if HASPYLAB:
    fig, axes = plt.subplots(5, 1, sharex=True, figsize=(8, 9))
    for (i, name, rv) in zip(range(5),
                             list(params.keys()) + ['sigma'],
コード例 #28
0
ファイル: call_catalog.py プロジェクト: AtomyChan/autophot
def match(image, headinfo, target_coords, syntax, catalog_syntax, filter_,
          data, fwhm):
    """
    Match positions from catalog with locations on image to check for source aboove threshold level given by
    'bkg_level' in syntax


    Input:

        - image: Numpy 2D array
        - headinfo: astropy.io.fits.header.Header
        - target_coords: astropy.coordinates.sky_coordinate.SkyCoord
        - syntax: dict
        - catalog_syntax: dict
        - filter_: str
        - data: pandas DataFrame
        - fwhm: float

    Output:

        - data_new_frame:  pandas DataFrame

    """

    import warnings
    if not syntax['catalog_warnings'] or syntax['master_warnings']:
        warnings.filterwarnings("ignore")

    import numpy as np
    import matplotlib.pyplot as plt

    import pandas as pd
    import lmfit
    from autophot.packages.functions import gauss_sigma2fwhm, gauss_2d, gauss_fwhm2sigma

    from autophot.packages.functions import moffat_2d, moffat_fwhm
    from astropy.stats import sigma_clipped_stats
    from photutils import DAOStarFinder

    from autophot.packages.functions import pix_dist, gauss_2d

    import logging
    from astropy.stats import sigma_clipped_stats

    logger = logging.getLogger(__name__)

    x_new_source = []
    y_new_source = []
    x_new_cen = []
    y_new_cen = []
    cp_dist = []
    dist2target_list = []
    cat_idx = []
    non_detections = []
    detections = []
    fwhm_list = []

    # Remove values that don't have matching value in selected value
    data_update = data[~np.isnan(data[catalog_syntax[filter_]])]

    # Look at most accuarte measurements first based on errors
    try:
        data_update.sort_values(by=[catalog_syntax[filter_ + '_err']],
                                inplace=True,
                                na_position='last')
    except:
        data_update[filter_ + '_err'] = np.nan * len(data_update)
        pass

    # Grid of close-up mathcing scale of image
    x = np.arange(0, 2 * syntax['scale'])
    xx, yy = np.meshgrid(x, x)

    k = 0

    # Wiggle room for catalog matching
    dx = syntax['catalog_matching_dx']
    dy = syntax['catalog_matching_dy']

    useable_sources = 0

    if syntax['use_moffat']:
        logging.info('Using Moffat Profile for fitting')

        fitting_model = moffat_2d
        fitting_model_fwhm = moffat_fwhm

    else:
        logging.info('Using Gaussian Profile for fitting')

        fitting_model = gauss_2d
        fitting_model_fwhm = gauss_sigma2fwhm

    try:
        logger.debug('Matching sources to catalog')

        for i in range(len(data_update.index.values)):

            idx = np.array(data_update.index.values)[i]

            print('\rMatching catalog source to image: %d / %d ' %
                  (float(i), len(data_update.index)),
                  end='')

            if useable_sources >= syntax['max_catalog_sources']:
                break

            try:

                # Skip if source location is off the image
                if data_update.x_pix[idx] <= 0 or data_update.y_pix[idx] <= 0:
                    x_new_cen.append(np.nan)
                    y_new_cen.append(np.nan)
                    cp_dist.append(np.nan)
                    dist2target_list.append(np.nan)
                    continue

                # catalog pixel coordinates of source take as an approximate location
                y = data_update.y_pix[idx]
                x = data_update.x_pix[idx]

                # Add index key for original catalog file comparision and matching
                cat_idx.append(int(idx))

                # Add x and y pxel location
                x_new_source.append(x)
                y_new_source.append(y)

                # Create cutout image of size (2*syntax['scale'],2*syntax['scale'])
                close_up = image[int(y - syntax['scale']):int(y +
                                                              syntax['scale']),
                                 int(x - syntax['scale']):int(x +
                                                              syntax['scale'])]

                # Cutout not possible - too close to edge or invalue pixel data i.e. nans of infs
                if close_up.shape != (2 * syntax['scale'],
                                      2 * syntax['scale']):
                    x_new_cen.append(np.nan)
                    y_new_cen.append(np.nan)
                    cp_dist.append(np.nan)
                    dist2target_list.append(np.nan)
                    fwhm_list.append(np.nan)

                    continue

                # Preset pixel error popup skip this source
                if 1e-5 in close_up or np.isnan(np.min(close_up)):
                    x_new_cen.append(np.nan)
                    y_new_cen.append(np.nan)
                    cp_dist.append(np.nan)
                    dist2target_list.append(np.nan)
                    fwhm_list.append(np.nan)

                    continue

                # Get close up image properties
                mean, median, std = sigma_clipped_stats(
                    close_up,
                    sigma=syntax['source_sigma_close_up'],
                    maxiters=syntax['iters'])

                # Preform source detection with threshold set in input.yml
                daofind = DAOStarFinder(fwhm=fwhm,
                                        threshold=syntax['bkg_level'] * std,
                                        roundlo=-1.0,
                                        roundhi=1.0,
                                        sharplo=0.2,
                                        sharphi=1.0)

                sources = daofind(close_up - median)

                if sources == None:
                    sources = []

                if len(sources) == 0:

                    non_detections.append(
                        data_update[catalog_syntax[filter_]].loc[[idx
                                                                  ]].values[0])

                    x_new_cen.append(np.nan)
                    y_new_cen.append(np.nan)
                    cp_dist.append(np.nan)
                    dist2target_list.append(np.nan)
                    fwhm_list.append(np.nan)

                    continue

                # Approximate location of source
                xc_guess = np.array(sources['xcentroid'])[0]
                yc_guess = np.array(sources['ycentroid'])[0]

                # If more than one source detected in close up
                # assume source closest to center is desired source
                if len(sources) >= 2:
                    r_vals = pix_dist(syntax['scale'],
                                      np.array(sources['xcentroid']),
                                      syntax['scale'],
                                      np.array(sources['ycentroid']))

                    r_idx = np.argmin(r_vals)

                    # if closest source is too far away from predicted loction - ignore
                    if r_vals[r_idx] > syntax['match_dist']:
                        x_new_cen.append(xc_guess)
                        y_new_cen.append(yc_guess)
                        cp_dist.append(np.nan)
                        dist2target_list.append(np.nan)
                        fwhm_list.append(np.nan)
                        continue

                    xc_guess = np.array(sources['xcentroid'])[r_idx]
                    yc_guess = np.array(sources['ycentroid'])[r_idx]
                try:

                    pars = lmfit.Parameters()
                    pars.add('A', value=np.nanmax(close_up), min=0)
                    pars.add('x0',
                             value=close_up.shape[1] / 2,
                             min=0.5 * close_up.shape[1] - dx,
                             max=0.5 * close_up.shape[1] + dx)
                    pars.add('y0',
                             value=close_up.shape[0] / 2,
                             min=0.5 * close_up.shape[0] - dy,
                             max=0.5 * close_up.shape[0] + dy)
                    pars.add('sky', value=np.nanmedian(close_up))

                    def residual(p):
                        p = p.valuesdict()
                        return (close_up - fitting_model(
                            (xx, yy), p['x0'], p['y0'], p['sky'], p['A'],
                            syntax['image_params']).reshape(
                                close_up.shape)).flatten()

                    mini = lmfit.Minimizer(residual, pars, nan_policy='omit')
                    result = mini.minimize(method='least_squares')

                    xcen = result.params['x0'].value
                    ycen = result.params['y0'].value

                    S = result.params['sky'].value
                    H = result.params['A'].value

                    sigma = fitting_model_fwhm(syntax['image_params'])

                except Exception as e:

                    x_new_cen.append(np.nan)
                    y_new_cen.append(np.nan)
                    cp_dist.append(np.nan)
                    dist2target_list.append(np.nan)
                    fwhm_list.append(np.nan)

                    logger.exception(e)

                    continue

                k += 1

                # Add new source location accounting for difference in fitted location / expected location

                centroid_x = xcen - syntax['scale'] + x
                centroid_y = ycen - syntax['scale'] + y

                x_new_cen.append(centroid_x)
                y_new_cen.append(centroid_y)

                dist2target = pix_dist(syntax['target_x_pix'], centroid_x,
                                       syntax['target_y_pix'], centroid_y)

                cp_dist.append(
                    np.sqrt((xcen - syntax['scale'])**2 +
                            (ycen - syntax['scale'])**2))
                dist2target_list.append(dist2target)
                detections.append(
                    data_update[catalog_syntax[filter_]].loc[[idx]].values[0])

                fwhm_list.append(sigma * 2 * np.sqrt(2 * np.log(2)))

                useable_sources += 1

                if syntax['source_plot']:
                    if len(sources) == 1:

                        fig = plt.figure(figsize=(6, 6))
                        ax = fig.add_subplot(111)
                        ax.imshow(close_up)

                        ax.set_title(
                            'Source @ x = ' +
                            '{0:.3f}'.format(xcen + x - syntax['scale']) +
                            ' : y = ' +
                            '{0:.3f}'.format(ycen + y - syntax['scale']))

                        small_ap = plt.Circle(
                            (xcen, ycen),
                            syntax['ap_size'] * headinfo['FWHM'],
                            color='r',
                            fill=False,
                            label='Aperture')
                        big_ap = plt.Circle(
                            (xcen, ycen),
                            syntax['inf_ap_size'] * headinfo['FWHM'],
                            color='b',
                            fill=False,
                            label='Aperture Correction')

                        ax.add_artist(small_ap)
                        ax.add_artist(big_ap)

                        ax.plot([], [],
                                ' ',
                                label='Sky =' + '{0:.3f}'.format(S) +
                                'Height =' + '{0:.3f}'.format(H))
                        ax.scatter(syntax['scale'],
                                   syntax['scale'],
                                   marker='+',
                                   s=100,
                                   color='r',
                                   linewidths=0.01,
                                   label='Catalog')
                        ax.scatter(
                            xc_guess,
                            yc_guess,
                            marker='+',
                            s=100,
                            color='b',
                            linewidths=0.01,
                            label='Source detection [closest object to catalog]'
                        )
                        ax.scatter(xcen,
                                   ycen,
                                   marker='+',
                                   s=100,
                                   color='green',
                                   linewidths=0.01,
                                   label='Least square fit')

                        ax.legend(loc='upper right')
                        plt.tight_layout()
                        plt.close()

            except Exception as e:

                logger.exception(e)

                x_new_cen.append(xc_guess)
                y_new_cen.append(yc_guess)
                cp_dist.append(np.nan)
                dist2target.append(np.nan)
                fwhm_list.append(np.nan)

                continue

        if syntax['show_nondetect_plot']:

            non_detections = np.array(non_detections)[np.isfinite(
                non_detections)]
            detections = np.array(detections)[np.isfinite(detections)]

            if len(non_detections) == 0:
                logger.debug('All sources detected')

            fig = plt.figure(figsize=(6, 8))
            ax = fig.add_subplot(111)
            ax.hist(non_detections,
                    bins='auto',
                    align='mid',
                    color='green',
                    histtype='step',
                    label='Non-Detection')

            ax.hist(detections,
                    bins='auto',
                    align='mid',
                    color='red',
                    histtype='step',
                    label='Detection')

            ax.set_title('Non - Detections')
            ax.set_xlabel('Magnitude')
            ax.set_ylabel('Binned Occurance')
            ax.legend(loc='best')
            plt.show()

        # print(' ... done')

        frame_data = [
            np.array(cat_idx).astype(int),
            np.array(data_update[catalog_syntax['RA']]),
            #                      np.array(data_update[catalog_syntax['RA_err']]),
            np.array(data_update[catalog_syntax['DEC']]),
            # np.array(data_update[catalog_syntax['DEC_err']]),
            np.array(x_new_source),
            np.array(y_new_source),
            np.array(x_new_cen),
            np.array(y_new_cen),
            np.array(cp_dist),
            np.array(dist2target_list),
            np.array(fwhm_list)
        ]

        frame_cols = [
            'cat_idx',
            'ra',
            #                      'ra_err',
            'dec',
            #                      'dec_err',
            'x_pix_source',
            'y_pix_source',
            'x_pix',
            'y_pix',
            'cp_dist',
            'dist2target',
            'fwhm'
        ]

        data_new_frame = pd.DataFrame(frame_data).T
        data_new_frame.columns = frame_cols
        data_new_frame.set_index('cat_idx', inplace=True)

        data_new_frame['cat_' + filter_] = data_update[catalog_syntax[filter_]]
        data_new_frame['cat_' + filter_ +
                       '_err'] = data_update[catalog_syntax[filter_ + '_err']]

        # for building colour terms include corrosponding colour
        # standards used in color terhsm consitently throughout AutoPhoT
        dmag = {
            'U': ['U', 'B'],
            'B': ['B', 'V'],
            'V': ['B', 'V'],
            'R': ['V', 'R'],
            'I': ['R', 'I'],
            'u': ['u', 'g'],
            'g': ['g', 'r'],
            'r': ['g', 'r'],
            'i': ['r', 'i'],
            'z': ['i', 'z']
        }

        # ct = [i for i in dmag[filter_] if i != filter_][0]

        for ct in list(dmag.keys()):
            try:

                if ct not in catalog_syntax:
                    continue

                data_new_frame['cat_' + ct] = data_update[catalog_syntax[ct]]
                data_new_frame['cat_' + ct +
                               '_err'] = data_update[catalog_syntax[ct +
                                                                    '_err']]
            except Exception:
                # logger.exception(e)
                continue

        data_new_frame = data_new_frame[~np.isnan(data_new_frame['x_pix'])]
        data_new_frame = data_new_frame[~np.isnan(data_new_frame['y_pix'])]

        warnings.filterwarnings("default")

    except Exception as e:
        logger.exception(e)
        cp_dist.append(np.nan)

    return data_new_frame, syntax
コード例 #29
0
ファイル: paramsfit.py プロジェクト: thusser/spexxy
    def __call__(self, filename: str) -> List[float]:
        """Start the fitting procedure on the given file.

        Args:
            filename: Name of file to fit.

        Returns:
            List of final values of parameters, ordered in the same way as the return value of parameters()
        """

        # fix any parameters?
        for cmp_name, cmp in self.objects['components'].items():
            # loop all parameters of this component
            for param_name in cmp.param_names:
                # do we have parameters to fix and is this one of them?
                if self._fixparams and cmp_name in self._fixparams and param_name in self._fixparams[
                        cmp_name]:
                    self.log.info(
                        'Fixing "%s" of component "%s" to its initial value of %f.',
                        param_name, cmp_name, cmp[param_name])
                    cmp.set(param_name, vary=False)
                else:
                    # otherwise make it a free parameter
                    cmp.set(param_name, vary=True)

        # Load spectrum
        self._load_spectrum(filename)

        # create weight array
        self.log.info('Creating weights array...')
        self._weight = np.ones((len(self._spec)))
        if self._weights is not None:
            # loop all weights
            for w in self._weights:
                # multiply weights array with new weights
                self._weight *= w(self._spec, filename)

        # adjusting valid mask for weights
        self._valid &= ~np.isnan(self._weight)

        # less than 50% of pixels valid?
        if np.sum(self._valid) < self._min_valid_pixels * len(self._valid):
            self.log.warning(
                'Less then %d percent of pixels valid, skipping...',
                self._min_valid_pixels * 100)
            return [None] * (len(self.columns()) - 2) + [False, 0]

        # initialize multiplicative polynomial with ones
        self._mult_poly = Legendre(self._spec, self._poly_degree)

        # get parameters
        params = Parameters()
        for cmp in self.components:
            params += cmp.make_params()

        # open PDF
        if self._plot_iterations:
            self._iterations_pdf = PdfPages(filename.replace('.fits', '.pdf'))

        # start minimization
        self.log.info('Starting fit...')
        minimizer = lmfit.Minimizer(self._fit_func,
                                    params,
                                    iter_cb=self._callback,
                                    max_nfev=self._max_fev,
                                    nan_policy='raise',
                                    xtol=self._xtol,
                                    ftol=self._ftol,
                                    epsfcn=self._epsfcn,
                                    factor=self._factor)
        result = minimizer.leastsq()
        self.log.info('Finished fit.')

        # close PDF file
        if self._plot_iterations:
            self._iterations_pdf.close()

        # get best fit
        best_fit = self._get_model(result.params)

        # estimate SNR
        snr = None if best_fit is None else self._spec.estimate_snr(best_fit)
        self.log.info('Estimated S/N of %.2f.', snr)

        # successful, if minimization was a success
        success = result.success

        # get message
        message = "" if result.lmdif_message is None else result.lmdif_message.replace(
            "\n", " ")

        # if any of the parameters was fitted close to their edge, fit failed
        for pn in result.params:
            # ignore all sigma values
            if pn.lower().find("sig") != -1 or pn.lower().find(
                    "tellurics") != -1:
                continue

            # get param
            p = result.params[pn]

            # get position of value within range for parameter
            pos = (p.value - p.min) / (p.max - p.min)

            # if pos < 0.01 or pos > 0.99, i.e. closer than 1% to the edge, fit failes
            if p.vary and (pos < 0.01 or pos > 0.99):
                success = False
                message = "Parameter %s out of range: %.2f" % (p.name, p.value)
                break

        # fill statistics dict
        stats = {
            'success': success,
            'errorbars': result.errorbars,
            'nfev': result.nfev,
            'chisqr': result.chisqr,
            'redchi': result.redchi,
            'nvarys': result.nvarys,
            'ndata': result.ndata,
            'nfree': result.nfree,
            'msg': message,
            'snr': snr
        }

        # write results back to file
        self._write_results_to_file(filename, result, best_fit, stats)

        # all components
        components = self._cmps
        if self._tellurics is not None:
            components.append(self._tellurics)

        # build list of results and return them
        results = []
        for cmp in self._cmps:
            # parse parameters
            cmp.parse_params(result.params)

            # loop params
            for n in cmp.param_names:
                p = '%s%s' % (cmp.prefix, n)
                results += [
                    cmp.parameters[n]['value'], cmp.parameters[n]['stderr']
                ]

        # success?
        results += [success, result.redchi]
        return results
コード例 #30
0
interceptQs = np.zeros_like(popt)

for i in range(popt.shape[0]):
    params = lmfit.Parameters()
    params.add('Qr', value=a.Qr(freq, popt[i]))
    params.add('Qc', value=2e-1)
    params.add('C', value=a.C)
    params.add('A', value=0.)
    params.add('normI', value=a.S21(freq, P_opt=popt[i]).real.max())
    params.add('normQ', value=a.S21(freq, P_opt=popt[i]).real.max())
    params.add('slopeI', value=0.)
    params.add('slopeQ', value=0.)
    params.add('interceptI', value=0.)
    params.add('interceptQ', value=0.)
    minner = lmfit.Minimizer(resid,
                             params,
                             fcn_args=(freq, a.S21(freq, P_opt=popt[i]).real,
                                       a.S21(freq, P_opt=popt[i]).imag))
    r = minner.minimize()
    p = np.array([
        r.params['Qr'].value, r.params['Qc'].value, r.params['C'].value,
        r.params['A'].value, r.params['normI'].value, r.params['normQ'].value,
        r.params['slopeI'].value, r.params['slopeQ'].value,
        r.params['interceptI'].value, r.params['interceptQ'].value
    ])
    qr[i] = p[0]
    tau0n[i] = p[1]
    cn[i] = p[2]
    aarray[i] = p[3]
    norms[i] = p[4]
    slopeIs[i] = p[5]
    slopeQs[i] = p[6]