Exemplo n.º 1
0
def test_saveload_modelresult_roundtrip(method):
    """Test for modelresult.loads()/dumps() and repeating that"""
    def mfunc(x, a, b):
        return a * (x - b)

    model = Model(mfunc)
    params = model.make_params(a=0.1, b=3.0)
    params['a'].set(min=.01, max=1, brute_step=0.01)
    params['b'].set(min=.01, max=3.1, brute_step=0.01)

    np.random.seed(2020)
    xx = np.linspace(-5, 5, 201)
    yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=len(xx))

    result1 = model.fit(yy, params=params, x=xx, method=method)

    result2 = ModelResult(model, Parameters())
    result2.loads(result1.dumps(), funcdefs={'mfunc': mfunc})

    result3 = ModelResult(model, Parameters())
    result3.loads(result2.dumps(), funcdefs={'mfunc': mfunc})

    assert result3 is not None
    assert_allclose(result2.params['a'], 0.5, rtol=1.0e-2)
    assert_allclose(result2.params['b'], 0.22, rtol=1.0e-2)
    assert_allclose(result3.params['a'], 0.50, rtol=1.0e-2)
    assert_allclose(result3.params['b'], 0.22, rtol=1.0e-2)
Exemplo n.º 2
0
def fit_sine(x_data: np.ndarray,
             y_data: np.ndarray,
             initial_parameters=None,
             positive_amplitude=True) -> Tuple[Dict[str, Any], Dict[str, Any]]:
    """ Fit a sine wave for the inputted data; see sine function in functions.py for model

    Args:
        x_data: x data points
        y_data: data to be fitted
        initial_parameters: list of 4 floats with initial guesses for: amplitude, frequency, phase and offset
        positive_amplitude: If True, then enforce the amplitude to be positive
    Returns:
        result_dict
    """
    if initial_parameters is None:
        initial_parameters = _estimate_initial_parameters_sine(x_data, y_data)

    lmfit_model = Model(sine)
    if positive_amplitude:
        lmfit_model.set_param_hint('amplitude', min=0)
    lmfit_result = lmfit_model.fit(y_data,
                                   x=x_data,
                                   **dict(
                                       zip(lmfit_model.param_names,
                                           initial_parameters)),
                                   method='least_squares')
    result_dict = extract_lmfit_parameters(lmfit_model, lmfit_result)

    return result_dict['fitted_parameters'], result_dict
Exemplo n.º 3
0
    def test_lrtest(self):
        rng = np.random.RandomState(RANDOM_SEED)
        a, b = 1, 1
        a_init, b_init = 2, 1

        alfa = 0.05
        noise = 0.03
        t = np.linspace(0, 12)

        def f(t, a, b):
            return b + np.exp(-a * t)

        y = f(t, a, b) + rng.normal(0, noise, len(t))
        model = Model(f)
        params = model.make_params(a=a_init, b=b_init)

        two_var_fit = model.fit(y, t=t, params=params)

        params['a'].set(vary=False)
        params['b'].set(vary=True)
        one_var_fit = model.fit(y, t=t, params=params)

        prefer_m1, pval, D, ddf = curveball.models.lrtest(
            one_var_fit, two_var_fit, alfa)
        self.assertTrue(prefer_m1)
Exemplo n.º 4
0
 def _init_model(self):
     #@todo make a separate class that subclasses Model.
     # potentially allow users to change it.
     self._model = Model(self._modelfunc)
     for p, q in self._params.items():
         self._model.set_param_hint(p,
                                    value=q.value,
                                    min=q.min,
                                    max=q.max,
                                    vary=q.vary)
         self._model.make_params()
Exemplo n.º 5
0
def test_saveload_modelresult_roundtrip():
    """Test for modelresult.loads()/dumps() and repeating that"""
    def mfunc(x, a, b):
        return a * (x - b)

    model = Model(mfunc)
    params = model.make_params(a=0.0, b=3.0)

    xx = np.linspace(-5, 5, 201)
    yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=len(xx))

    result1 = model.fit(yy, params, x=xx)

    result2 = ModelResult(model, Parameters())
    result2.loads(result1.dumps(), funcdefs={'mfunc': mfunc})

    result3 = ModelResult(model, Parameters())
    result3.loads(result2.dumps(), funcdefs={'mfunc': mfunc})

    assert result3 is not None
    assert_param_between(result2.params['a'], 0.48, 0.52)
    assert_param_between(result2.params['b'], 0.20, 0.25)
    assert_param_between(result3.params['a'], 0.48, 0.52)
    assert_param_between(result3.params['b'], 0.20, 0.25)
Exemplo n.º 6
0
def test_saveload_modelresult_roundtrip():
    """Test for modelresult.loads()/dumps() and repeating that"""
    def mfunc(x, a, b):
        return a * (x-b)

    model = Model(mfunc)
    params = model.make_params(a=0.0, b=3.0)

    xx = np.linspace(-5, 5, 201)
    yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=len(xx))

    result1 = model.fit(yy, params, x=xx)

    result2 = ModelResult(model, Parameters())
    result2.loads(result1.dumps(), funcdefs={'mfunc': mfunc})

    result3 = ModelResult(model, Parameters())
    result3.loads(result2.dumps(), funcdefs={'mfunc': mfunc})

    assert result3 is not None
    assert_param_between(result2.params['a'], 0.48, 0.52)
    assert_param_between(result2.params['b'], 0.20, 0.25)
    assert_param_between(result3.params['a'], 0.48, 0.52)
    assert_param_between(result3.params['b'], 0.20, 0.25)
Exemplo n.º 7
0
def fit_semiclassical(field,
                      conductance_data,
                      phase_relaxation_length_guess=100,
                      cubic_dresselhaus_guess=0.0,
                      linear_dresselhaus_guess=0.0,
                      rashba_guess=np.pi / 4):

    print(r"Calculating best fit at zero in-plane field...")
    start = time.time()
    model = Model(magnetoconductivity_semiclassical)

    # set parameter hints
    model.set_param_hint('l_phi',
                         min=10,
                         max=1000,
                         value=phase_relaxation_length_guess,
                         vary=True)
    model.set_param_hint('theta_alpha',
                         min=-2 * np.pi,
                         max=2 * np.pi,
                         value=rashba_guess,
                         vary=True)
    model.set_param_hint('theta_beta1',
                         min=0,
                         max=2 * np.pi,
                         value=linear_dresselhaus_guess,
                         vary=False)
    model.set_param_hint('theta_beta3',
                         value=cubic_dresselhaus_guess,
                         vary=False)
    model.set_param_hint('B_magnitude', value=0.0, vary=False)
    model.set_param_hint('B_angle', value=0.0, vary=False)

    params = model.make_params()

    # perform the fit
    res = model.fit(conductance_data, params, field=field, method='nelder')
    print(res.fit_report())
    res = model.fit(conductance_data, res.params, field=field)
    print(res.fit_report())

    phi = res.best_values['l_phi']
    phi_std = deepcopy(res.params['l_phi'].stderr)
    alpha = res.best_values['theta_alpha']
    alpha_std = deepcopy(res.params['theta_alpha'].stderr)
    beta = res.best_values['theta_beta1']
    beta_std = res.params['theta_beta1'].stderr
    gamma = res.best_values['theta_beta3']
    gamma_std = deepcopy(res.params['theta_beta3'].stderr)

    print(r"done in {0:.1f} seconds.".format(time.time() - start))

    return phi, phi_std, alpha, alpha_std, beta, beta_std, gamma, gamma_std, res.best_fit
Exemplo n.º 8
0
def fit_semiclassical_inplane(field,
                              conductance_data,
                              phase_relaxation_length_fixed=680,
                              cubic_dresselhaus_fixed=0.0,
                              linear_dresselhaus_fixed=0.03,
                              rashba_fixed=1.17,
                              B_magnitude=0.0,
                              B_angle=0.0):

    print(r"Calculating best fit in the presence of in-plane field...")
    start = time.time()
    model = Model(magnetoconductivity_semiclassical)

    # set parameter hints
    model.set_param_hint('l_phi',
                         value=phase_relaxation_length_fixed,
                         vary=False)
    model.set_param_hint('theta_alpha', value=rashba_fixed, vary=False)
    model.set_param_hint('theta_beta1',
                         value=linear_dresselhaus_fixed,
                         vary=False)
    model.set_param_hint('theta_beta3',
                         value=cubic_dresselhaus_fixed,
                         vary=False)
    model.set_param_hint('B_magnitude',
                         min=0.0,
                         max=0.2,
                         value=B_magnitude,
                         vary=True)
    model.set_param_hint('B_angle', value=B_angle, vary=False)

    params = model.make_params()

    # perform the fit
    res = model.fit(conductance_data, params, field=field, method='nelder')
    print(res.fit_report())
    res = model.fit(conductance_data, res.params, field=field)
    print(res.fit_report())

    r = res.best_values['B_magnitude']
    theta = res.best_values['B_angle']
    r_stderr = res.params['B_magnitude'].stderr
    theta_stderr = res.params['B_angle'].stderr

    print(r"done in {0:.1f} seconds.".format(time.time() - start))

    return r, theta, r_stderr, theta_stderr, res.best_fit
Exemplo n.º 9
0
# <examples/doc_model_savemodel.py>
import numpy as np

from lmfit.model import Model, save_model


def mysine(x, amp, freq, shift):
    return amp * np.sin(x * freq + shift)


sinemodel = Model(mysine)
pars = sinemodel.make_params(amp=1, freq=0.25, shift=0)

save_model(sinemodel, 'sinemodel.sav')
# <end examples/doc_model_savemodel.py>
Exemplo n.º 10
0
class H2ExcitationFit(ExcitationFit):
    r"""Tool for fitting temperatures, column densities, and ortho-to-para ratio(`OPR`) from an :math:`H_2` excitation diagram. It takes as input a set of :math:`H_2` rovibrational line observations with errors represented as :class:`~pdrtpy.measurement.Measurement`.   

Often, excitation diagrams show evidence of both "hot" and "cold" gas components, where the cold gas dominates the intensity in the low `J` transitions and the hot gas dominates in the high `J` transitions. Given data over several transitions, one can fit for :math:`T_{cold}, T_{hot}, N_{total} = N_{cold}+ N_{hot}`, and optionally `OPR`. One needs at least 5 points to fit the temperatures and column densities (slope and intercept :math:`\times 2`), though one could compute (not fit) them with only 4 points. To additionally fit `OPR`, one should have 6 points (5 degrees of freedom).

Once the fit is done, :class:`~pdrtpy.plot.ExcitationPlot` can be used to view the results.

:param measurements: Input :math:`H_2` measurements to be fit.  
:type measurements: list of :class:`~pdrtpy.measurement.Measurement`. 
    """
    def __init__(self,
                 measurements=None,
                 constantsfile="atomic_constants.tab"):
        super().__init__(measurements, constantsfile)
        self._canonical_opr = 3.0
        self._opr = Measurement(data=[self._canonical_opr], uncertainty=None)
        self._init_params()
        self._init_model()
        self._fitresult = None
        self._temperature = None
        self._total_colden = None
        # position and size that was used for averaging/fit
        self._position = None
        self._size = None

    def _init_params(self):
        #fit input parameters
        self._params = Parameters()
        # we have to have opr max be greater than 3 so that fitting will work.
        # the fit algorithm does not like when the initial value is pinned at one
        # of the limits
        self._params.add('opr', value=3.0, min=1.0, max=3.5, vary=False)
        self._params.add('m1', value=0, min=-1, max=0)
        self._params.add('n1', value=15, min=10, max=30)
        self._params.add('m2', value=0, min=-1, max=0)
        self._params.add('n2', value=15, min=10, max=30)

    def _residual(self, params, x, data, error, idx):
        # We assume that the column densities passed in have been normalized
        # using the canonical OPR=3. Therefore what we are actually fitting is
        # the ratio of the actual OPR to the canonical OPR.
        # For odd J, input x = Nu/(3*(2J+1) where 3=canonical OPR.
        #
        # We want the model-data residual to be small, but if the opr
        # is different from the  canonical value of 3, then data[idx] will
        # be low by a factor of 3/opr.
        # So we must LOWER model[idx] artificially by dividing it by
        # 3/opr, i.e. multiplying by opr/3.  This is equivalent to addition in log-space.
        p = params.valuesdict()
        y1 = 10**(x * p['m1'] + p['n1'])
        y2 = 10**(x * p['m2'] + p['n2'])
        model = np.log10(y1 + y2)
        if params['opr'].vary:
            model += np.log10(p['opr'] / self._canonical_opr)
        return (model - data) / error

    def _modelfunc(self, x, m1, n1, m2, n2, opr, idx=[], fit_opr=False):
        '''Function for fitting the excitation curve as sum of two linear functions 
           and allowing ortho-to-para ratio to vary.  Para is even J, ortho is odd J.
           :param x: independent axis array
           :param m1: slope of first line
           :type m1: float
           :param n1: intercept of first line
           :type n1: float
           :param m2: slope of second line
           :type m2: float
           :param n2: intercept of second line
           :type n2: float
           :param opr: ortho-to-para ratio
           :type opr: float
           :type idx: np.ndarray
           :param idx: list of indices that may have variable opr (odd J transitions)
           :param fit_opr: indicate whether opr will be fit, default False (opr fixed)
           :type fit_opr: False
           :return: Sum of lines in log space:log10(10**(x*m1+n1) + 10**(x*m2+n2)) + log10(opr/3.0)
           :rtype: :class:`numpy.ndarray` 
        '''
        y1 = 10**(x * m1 + n1)
        y2 = 10**(x * m2 + n2)

        model = np.log10(y1 + y2)
        # We assume that the column densities passed in have been normalized
        # using the canonical OPR=3. Therefore what we are actually fitting is
        # the ratio of the actual OPR to the canonical OPR.
        # For odd J, input x = Nu/(3*(2J+1) where 3=canonical OPR.
        #
        # We want the model-data residual to be small, but if the opr
        # is different from the  canonical value of 3, then data[idx] will
        # be low by a factor of 3/opr.
        # So we must LOWER model[idx] artificially by dividing it by
        # 3/opr, i.e. multiplying by opr/3.  This is equivalent to addition in log-space.
        if fit_opr:
            model[idx] += np.log10(opr / self._canonical_opr)
        return model

    def _init_model(self):
        #@todo make a separate class that subclasses Model.
        # potentially allow users to change it.
        self._model = Model(self._modelfunc)
        for p, q in self._params.items():
            self._model.set_param_hint(p,
                                       value=q.value,
                                       min=q.min,
                                       max=q.max,
                                       vary=q.vary)
            self._model.make_params()

    def _compute_quantities(self, fitmap):
        """Compute the temperatures and column densities for the hot and cold gas components.  This method will set class variables `_temperature` and `_colden`.
        
        :param params: The fit parameters returned from fit_excitation.
        :type params: :class:`lmfit.Parameters`
        """
        self._temperature = dict()
        # N(J=0) column density = intercept on y axis
        self._j0_colden = dict()
        # total column density = N(J=0)*Z(T) where Z(T) is partition function
        self._total_colden = dict()
        size = fitmap.data.size
        # create default arrays in which calculated values will be stored.
        # Use nan as fill value because there may be nans in fitmapdata, in which
        # case nothing need be done to arrays.
        # tc, th = cold and hot temperatures
        # utc, utc = uncertainties in cold and hot temperatures
        # nc, nh = cold and hot column densities
        # unc, unh = uncertainties in cold and hot temperatures
        # opr = ortho to para ratio
        # uopr = uncertainty in OPR
        tc = np.full(shape=size, fill_value=np.nan, dtype=float)
        th = np.full(shape=size, fill_value=np.nan, dtype=float)
        utc = np.full(shape=size, fill_value=np.nan, dtype=float)
        uth = np.full(shape=size, fill_value=np.nan, dtype=float)
        nc = np.full(shape=size, fill_value=np.nan, dtype=float)
        nh = np.full(shape=size, fill_value=np.nan, dtype=float)
        unh = np.full(shape=size, fill_value=np.nan, dtype=float)
        unc = np.full(shape=size, fill_value=np.nan, dtype=float)
        opr = np.full(shape=size, fill_value=np.nan, dtype=float)
        uopr = np.full(shape=size, fill_value=np.nan, dtype=float)
        ff = fitmap.data.flatten()
        ffmask = fitmap.mask.flatten()
        for i in range(size):
            if ffmask[i]:
                continue
            params = ff[i].params
            for p in params:
                if params[p].stderr is None:
                    print("AT pixel i [mask]", i, ffmask[i])
                    params.pretty_print()
                    raise Exception(
                        "Something went wrong with the fit and it was unable to calculate errors on the fitted parameters. It's likely that a two-temperature model is not appropriate for your data. Check the fit_result report and plot."
                    )

            if params['m2'] < params['m1']:
                cold = '2'
                hot = '1'
            else:
                cold = '1'
                hot = '2'
            mcold = 'm' + cold
            mhot = 'm' + hot
            ncold = 'n' + cold
            nhot = 'n' + hot
            # cold and hot temperatures
            utc[i] = params[mcold].stderr / params[mcold]
            tc[i] = -utils.LOGE / params[mcold]
            uth[i] = params[mhot].stderr / params[mhot]
            th[i] = -utils.LOGE / params[mhot]
            nc[i] = 10**params[ncold]
            unc[i] = utils.LN10 * params[ncold].stderr * nc[i]
            nh[i] = 10**params[nhot]
            unh[i] = utils.LN10 * params[nhot].stderr * nh[i]
            opr[i] = params['opr'].value
            uopr[i] = params['opr'].stderr

        # now reshape them all back to map shape
        tc = tc.reshape(fitmap.data.shape)
        th = th.reshape(fitmap.data.shape)
        utc = utc.reshape(fitmap.data.shape)
        uth = uth.reshape(fitmap.data.shape)
        nc = nc.reshape(fitmap.data.shape)
        nh = nh.reshape(fitmap.data.shape)
        unh = unh.reshape(fitmap.data.shape)
        unc = unc.reshape(fitmap.data.shape)
        opr = opr.reshape(fitmap.data.shape)
        uopr = uopr.reshape(fitmap.data.shape)

        mask = fitmap.mask | np.logical_not(np.isfinite(tc))
        ucc = StdDevUncertainty(np.abs(tc * utc))
        self._temperature["cold"] = Measurement(data=tc,
                                                unit=self._t_units,
                                                uncertainty=ucc,
                                                wcs=fitmap.wcs,
                                                mask=mask)
        mask = fitmap.mask | np.logical_not(np.isfinite(th))
        uch = StdDevUncertainty(np.abs(th * uth))
        self._temperature["hot"] = Measurement(data=th,
                                               unit=self._t_units,
                                               uncertainty=uch,
                                               wcs=fitmap.wcs,
                                               mask=mask)
        # cold and hot total column density
        ucn = StdDevUncertainty(np.abs(unc))
        mask = fitmap.mask | np.logical_not(np.isfinite(nc))
        self._j0_colden["cold"] = Measurement(nc,
                                              unit=self._cd_units,
                                              uncertainty=ucn,
                                              wcs=fitmap.wcs,
                                              mask=mask)
        mask = fitmap.mask | np.logical_not(np.isfinite(nh))
        uhn = StdDevUncertainty(np.abs(unh))
        self._j0_colden["hot"] = Measurement(nh,
                                             unit=self._cd_units,
                                             uncertainty=uhn,
                                             wcs=fitmap.wcs,
                                             mask=mask)
        #
        self._total_colden["cold"] = self._j0_colden[
            "cold"] * self._partition_function(self.tcold)
        self._total_colden["hot"] = self._j0_colden[
            "hot"] * self._partition_function(self.thot)
        mask = fitmap.mask | np.logical_not(np.isfinite(opr))
        self._opr = Measurement(opr,
                                unit=u.dimensionless_unscaled,
                                uncertainty=StdDevUncertainty(uopr),
                                wcs=fitmap.wcs,
                                mask=mask)

    @property
    def fit_result(self):
        '''The result of the fitting procedure which includes fit statistics, variable values and uncertainties, and correlations between variables.
        
        :rtype:  :class:`lmfit.model.ModelResult`      
        '''
        return self._fitresult

    @property
    def opr_fitted(self):
        '''Was the ortho-to-para ratio fitted?
        
        :returns: True if OPR was fitted, False if canonical LTE value was used
        :rtype: bool
        '''
        if self._fitresult is None:
            return False
        return self._params['opr'].vary

    @property
    def opr(self):
        '''The ortho-to-para ratio (OPR)
        
        :returns: The fitted OPR is it was determined in the fit, otherwise the canonical LTE OPR
        :rtype: :class:`~pdrtpy.measurement.Measurement`
        '''
        return self._opr

    @property
    def intensities(self):
        '''The stored intensities. See :meth:`add_measurement`
         
           :rtype: list of :class:`~pdrtpy.measurement.Measurement`
        '''
        return self._measurements

    def colden(self, component):  #,log=False):
        '''The column density of hot or cold gas component, or total column density.
        
        :param component: 'hot', 'cold', or 'total
        :type component: str
        
        :rtype: :class:`~pdrtpy.measurement.Measurement`
        '''
        #:param log: take the log10 of the column density
        cl = component.lower()
        if cl not in self._valid_components:
            raise KeyError(
                f"{cl} not a valid component. Must be one of {self._valid_components}"
            )
        #print(f'returning {cl}')
        if cl == 'total':
            return self.total_colden
        else:
            return self._total_colden[cl]

    @property
    def total_colden(self):
        '''The fitted total column density
        
        :rtype: :class:`~pdrtpy.measurement.Measurement` 
        '''
        return self._total_colden["hot"] + self._total_colden["cold"]

    @property
    def hot_colden(self):
        '''The fitted hot gas total column density
        
        :rtype: :class:`~pdrtpy.measurement.Measurement`         
        '''
        return self._total_colden["hot"]

    @property
    def cold_colden(self):
        '''The fitted cold gas total column density
        
        :rtype: :class:`~pdrtpy.measurement.Measurement`         
        '''
        return self._total_colden["cold"]

    @property
    def tcold(self):
        '''The fitted cold gas excitation temperature
        
        :rtype: :class:`~pdrtpy.measurement.Measurement` 
        '''
        return self._temperature['cold']  #self._fitparams.tcold

    @property
    def thot(self):
        '''The fitted hot gas excitation temperature
        
        :rtype: :class:`~pdrtpy.measurement.Measurement` 
        '''
        return self._temperature['hot']  #self._fitparams.thot

    @property
    def temperature(self):
        '''The fitted gas temperatures, returned in a dictionary with keys 'hot' and 'cold'.
        :rtype: dict
        '''
        return self._temperature

    def column_densities(self, norm=False, unit=utils._CM2, line=False):
        r'''The computed upper state column densities of stored intensities

           :param norm: if True, normalize the column densities by the 
                       statistical weight of the upper state, :math:`g_u`.  
                       Default: False
           :type norm: bool
           :param unit: The units in which to return the column density. Default: :math:`{\\rm }cm^{-2}`
           :type unit: str or :class:`astropy.units.Unit`
           :param line: if True, the dictionary index is the Line name, 
                     otherwise it is the upper state :math:`J` number.  Default: False
           :type line: bool

           :returns: dictionary of column densities indexed by upper state :math:`J` number or Line name. Default: False means return indexed by :math:`J`.
           :rtype: dict
        '''
        # Compute column densities if needed.
        # Note: this has a gotcha - if user changes an existing intensity
        # Measurement in place, rather than replaceMeasurement(), the colden
        # won't get recomputed. But we warned them!
        #if not self._column_density or (len(self._column_density) != len(self._measurements)):

        # screw it. just always compute them.  Note to self: change this if it becomes computationally intensive
        self._compute_column_densities(unit=unit, line=line)
        if norm:
            cdnorm = dict()
            for cd in self._column_density:
                if line:
                    denom = self._ac.loc[cd]["g_u"]
                else:
                    denom = self._ac.loc['J_u', cd]["g_u"]
                # This fails with complaints about units:
                #self._column_density[cd] /= self._ac.loc[cd]["g_u"]
                #gu = Measurement(self._ac.loc[cd]["g_u"],unit=u.dimensionless_unscaled)
                cdnorm[cd] = self._column_density[cd] / denom
            #return #self._column_density
            return cdnorm
        else:
            return self._column_density

    def energies(self, line=False):
        '''Upper state energies of stored intensities, in K. 

           :param line: if True, the dictionary index is the Line name, 
                     otherwise it is the upper state :math:`J` number.  Default: False
           :type line: bool
           :returns: dictionary indexed by upper state :math:`J` number or Line name. Default: False means return indexed by :math:`J`.
           :rtype: dict
        '''
        t = dict()
        if line:
            for m in self._measurements:
                t[m] = self._ac.loc[m]["E_upper/k"]
        else:
            for m in self._measurements:
                t[self._ac.loc[m]["J_u"]] = self._ac.loc[m]["E_upper/k"]
        return t

    def run(self, position=None, size=None, fit_opr=False, **kwargs):
        r'''Fit the :math:`log N_u-E` diagram with two excitation temperatures,
        a ``hot`` :math:`T_{ex}` and a ``cold`` :math:`T_{ex}`. 
        
        If ``position`` and ``size`` are given, the data will be averaged over a spatial box before fitting.  The box is created using :class:`astropy.nddata.utils.Cutout2D`.  If position or size is None, the data are averaged over all pixels.  If the Measurements are single values, these arguments are ignored.

        :param position: The position of the cutout array's center with respect to the data array. The position can be specified either as a `(x, y)` tuple of pixel coordinates.
        :type position: tuple 
        :param size: The size of the cutout array along each axis in pixels. If size is a scalar number or a scalar :class:`~astropy.units.Quantity`, then a square cutout of size will be created. If `size` has two elements, they should be in `(nx, ny)` order [*this is the opposite of Cutout2D signature*]. Scalar numbers in size are assumed to be in units of pixels.  Default value of None means use all pixels (position is ignored)
        :type size: int, array_like`
        :param fit_opr: Whether to fit the ortho-to-para ratio or not. If True, the OPR will be varied to determine the best value. If False, the OPR is fixed at the canonical LTE value of 3.
        :type fit_opr: bool
        '''
        kwargs_opts = {
            'mask': None,
            'method': 'leastsq',
            'nan_policy': 'raise',
            'test': False,
            'profile': False,
        }
        kwargs_opts.update(kwargs)
        return self._fit_excitation(position, size, fit_opr, **kwargs_opts)

    def intensity(self, colden):
        '''Given an upper state column density :math:`N_u`, compute the intensity :math:`I`.  

           .. math::
                 I = {A \Delta E~N_u \over 4\pi}     
              
        where :math:`A` is the Einstein A coefficient and :math:`\Delta E` is the energy of the transition.     
        
        :param colden: upper state column density
        :type colden: :class:`~pdrtpy.measurement.Measurement`
        :returns: optically thin intensity 
        :rtype: :class:`~pdrtpy.measurement.Measurement`
        '''
        # colden is N_upper
        dE = self._ac.loc[
            colden.id]["dE/k"] * constants.k_B.cgs * self._ac["dE/k"].unit
        A = self._ac.loc[colden.id]["A"] * self._ac["A"].unit
        v = A * dE / (4.0 * math.pi * u.sr)
        val = Measurement(data=v.value, unit=v.unit, identifier=colden.id)
        intensity = val * colden  # error will get propagated
        i = intensity.convert_unit_to(self._intensity_units)
        i._identifier = val.id
        return i

    def upper_colden(self, intensity, unit):
        '''Compute the column density in upper state :math:`N_u`, given an 
           intensity :math:`I` and assuming optically thin emission.  
           Units of :math:`I` need to be equivalent to 
           :math:`{\\rm erg~cm^{-2}~s^{-1}~sr^{-1}}`.

           .. math::
                 I &= {A \Delta E~N_u \over 4\pi}

                 N_u &= 4\pi {I\over A\Delta E}

           where :math:`A` is the Einstein A coefficient and :math:`\Delta E` is the energy of the transition.

           :param intensity: A :class:`~pdrtpy.measurement.Measurement` instance containing intensity in units equivalent to :math:`{\\rm erg~cm^{-2}~s^{-1}~sr^{-1}}`
           :type intensity: :class:`~pdrtpy.measurement.Measurement`
           :param unit: The units in which to return the column density. Default: :math:`{\\rm }cm^{-2}`
           :type unit: str or :class:`astropy.units.Unit`
           :returns: a :class:`~pdrtpy.measurement.Measurement` of the column density.
           :rtype: :class:`~pdrtpy.measurement.Measurement` 
        '''

        dE = self._ac.loc[
            intensity.id]["dE/k"] * constants.k_B.cgs * self._ac["dE/k"].unit
        A = self._ac.loc[intensity.id]["A"] * self._ac["A"].unit
        v = 4.0 * math.pi * u.sr / (A * dE)
        val = Measurement(data=v.value, unit=v.unit)
        N_upper = intensity * val  # error will get propagated
        return N_upper.convert_unit_to(unit)

    def _compute_column_densities(self, unit=utils._CM2, line=False):
        r'''Compute all upper level column densities for stored intensity measurements and puts them in a dictionary
           :param unit: The units in which to return the column density. Default: :math:`{\\rm }cm^{-2}`
           :type unit: str or :class:`astropy.units.Unit`
           :param line: if True, the dictionary index is the Line name, 
                     otherwise it is the upper state :math:`J` number.  Default: False
           :type line: bool
        
            # should we reutrn something here or just compute them and never store.
            # I'm beginning to think there is no reason to store them.
           #:returns: dictionary of column densities as:class:`~pdrtpy.measurement.Measurement  indexed by upper state :math:`J` number or Line name. Default: False means return indexed by :math:`J`.
           #:returns: a :class:`~pdrtpy.measurement.Measurement` of the column density.
        '''
        self._column_density = dict()
        for m in self._measurements:
            if line:
                index = m
            else:
                index = self._ac.loc[m]["J_u"]
            self._column_density[index] = self.upper_colden(
                self._measurements[m], unit)

    def gu(self, id, opr):
        r'''Get the upper state statistical weight $g_u$ for the given transition identifer, and, if the transition is odd-$J$, scale the result by the given ortho-to-para ratio.  If the transition is even-$J$, the LTE value is returned.
        
           :param id: the measurement identifier
           :type id: str
           :param opr:
           :type opr: float
           :raises KeyError: if id not in existing Measurements 
           :rtype: float
        '''
        if utils.is_even(self._ac.loc[id]["J_u"]):
            return self._ac.loc[id]["g_u"]
        else:
            #print("Ju=%d scaling by [%.2f/%.2f]=%.2f"%(self._ac.loc[id]["J_u"],opr,self._canonical_opr,opr/self._canonical_opr))
            return self._ac.loc[id]["g_u"] * opr / self._canonical_opr

    def average_column_density(self,
                               position=None,
                               size=None,
                               norm=True,
                               unit=utils._CM2,
                               line=False,
                               clip=-1E40 * u.Unit("cm-2")):
        r'''Compute the average column density over a spatial box.  The box is created using :class:`astropy.nddata.utils.Cutout2D`.

        :param position: The position of the cutout array's center with respect to the data array. The position can be specified either as a `(x, y)` tuple of pixel coordinates.
        :type position: tuple 
        :param size: The size of the cutout array along each axis. If size is a scalar number or a scalar :class:`~astropy.units.Quantity`, then a square cutout of size will be created. If `size` has two elements, they should be in `(nx,ny)` order [*this is the opposite of Cutout2D signature*]. Scalar numbers in size are assumed to be in units of pixels.  Default value of None means use all pixels (position is ignored)
        :type size: int, array_like`
        :param norm: if True, normalize the column densities by the 
                       statistical weight of the upper state, :math:`g_u`.  For ortho-$H_2$ $g_u = OPR \times (2J+1)$, for para-$H_2$ $g_u=2J+1$. In LTE, $OPR = 3$.
        :type norm: bool
        :param unit: The units in which to return the column density. Default: :math:`{\rm cm}^{-2}` 
        :type unit: str or :class:`astropy.units.Unit`
        :param line: if True, the returned dictionary index is the Line name, otherwise it is the upper state :math:`J` number.  
        :type line: bool
        :returns: dictionary of column density Measurements, with keys as :math:`J` number or Line name
        :rtype:  dict
        :param clip: Column density value at which to clip pixels. Pixels with column densities below this value will not be used in the average. Default: a large negative number, which translates to no clipping.  
        :type clip: :class:`astropy.units.Quantity`
        '''
        #@todo
        # - should default clip = None?

        # Set norm=False because we normalize below if necessary.
        if position is not None and size is None:
            print("WARNING: ignoring position keyword since no size given")
        if position is None and size is not None:
            raise Exception(
                "You must supply a position in addition to size for cutout")
        if size is not None:
            if np.isscalar(size):
                size = np.array([size, size])
            else:
                #Cutout2D wants (ny,nx)
                size = np.array([size[1], size[0]])

        clip = clip.to("cm-2")
        cdnorm = self.column_densities(norm=norm, unit=unit, line=line)
        cdmeas = dict()
        for cd in cdnorm:
            ca = cdnorm[cd]
            if size is not None:
                if len(size) != len(ca.shape):
                    raise Exception(
                        f"Size dimensions [{len(size)}] don't match measurements [{len(ca.shape)}]"
                    )

                #if size[0] > ca.shape[0] or size[1] > ca.shape[1]:
                #    raise Exception(f"Requested cutout size {size} exceeds measurement size {ca.shape}")
                cutout = Cutout2D(ca.data,
                                  position,
                                  size,
                                  ca.wcs,
                                  mode='trim',
                                  fill_value=np.nan)
                w = Cutout2D(ca.uncertainty.array,
                             position,
                             size,
                             ca.wcs,
                             mode='trim',
                             fill_value=np.nan)
                cddata = np.ma.masked_array(cutout.data,
                                            mask=np.ma.mask_or(
                                                np.isnan(cutout.data),
                                                cutout.data < clip.value))
                weights = np.ma.masked_array(w.data, np.isnan(w.data))
                if False:
                    # save cutout as a test that we have the x,y correct in size param
                    t = Measurement(cddata,
                                    unit=ca.unit,
                                    uncertainty=StdDevUncertainty(weights),
                                    identifier=ca.id)
                    #t.write("cutout.fits",overwrite=True)

            else:
                cddata = ca.data
                # handle corner case of measurment.data is shape = (1,)
                # and StdDevUncertainty.array is shape = ().
                # They both have only one value but StdDevUncertainty stores
                # its data in a peculiar way.
                # alternative: check that type(ca.uncertainty.array) == np.ndarray would also work.
                if np.shape(ca.data) == (1, ) and np.shape(
                        ca.uncertainty.array) == ():
                    weights = np.array([ca.uncertainty.array])
                else:
                    weights = ca.uncertainty.array
            cdavg = np.average(cddata, weights=weights)
            error = np.nanmean(ca.error) / np.sqrt(ca.error.size)  #-1
            cdmeas[cd] = Measurement(data=cdavg,
                                     uncertainty=StdDevUncertainty(error),
                                     unit=ca.unit,
                                     identifier=cd)
        return cdmeas

    def _get_ortho_indices(self, ids):
        """Given a list of J values, return the indices of those that are ortho
        transitions (odd J)
        
        :param ids:
        :type ids: list of str 
        :returns: The array indices of the odd J values.
        :rtype: list of int
        """
        return np.where(self._ac.loc[ids]["J_u"] % 2 != 0)[0]

    def _get_para_indices(self, ids):
        """Given a list of J values, return the indices of those that are para
        transitions (even J)
        
        :param ids:
        :type ids: list of str 
        :returns: The array indices of the even J values.
        :rtype: list of int
        """
        return np.where(self._ac.loc[ids]["J_u"] % 2 == 0)[0]

    # currently unused.  in future may allow users to give first guesses at the temperatures, though not clear these will be better than _firstguess().  plus this does nothing for the intercepts
    def _slopesfromguess(self, guess):
        """given a guess of two temperatures, compute slopes from them"""
        if guess[0] < guess[1]:
            thot = guess[1]
            tcold = guess[2]
        else:
            tcold = guess[2]
            thot = guess[1]
        slope = []
        slope[0] = -utils.LOGE / tcold
        slope[1] = -utils.LOGE / thot
        return slope

    def _first_guess(self, x, y):
        r"""The first guess at the fit parameters is done by finding the line between the first two (lowest energy) points to determine $T_{cold}and between the last two (highest energy) points to determine $T_{hot}. The first guess is needed to ensure the final fit converges.  The guess doesn't need to be perfect, just in the ballpark.
        
        :param x: array of energies, $E/k$
        :type x: numpy array
        :param y: array of normalized column densities $N_u/g_u$
        :type y: numpy array
        """
        slopecold = (y[1] - y[0]) / (x[1] - x[0])
        slopehot = (y[-1] - y[-2]) / (x[-1] - x[-2])
        intcold = y[1] - slopecold * x[1]
        inthot = y[-1] - slopehot * x[-1]
        #print("FG ",type(slopecold),type(slopehot),type(intcold),type(inthot))
        return np.array([slopecold, intcold, slopehot, inthot])

    def _fit_excitation(self, position, size, fit_opr=False, **kwargs):
        """Fit the :math:`log N_u-E` diagram with two excitation temperatures,
        a ``hot`` :math:`T_{ex}` and a ``cold`` :math:`T_{ex}`.  A first
        pass guess is initially made using data partitioning and two
        linear fits. 

        If ``position`` and ``size`` are given, the data will be averaged over a spatial box before fitting.  The box is created using :class:`astropy.nddata.utils.Cutout2D`.  If position or size is None, the data are averaged over all pixels.  If the Measurements are single values, these arguments are ignored.

        :param position: The position of the cutout array's center with respect to the data array. The position can be specified either as a `(x, y)` tuple of pixel coordinates or a :class:`~astropy.coordinates.SkyCoord`, which will use the :class:`~astropy.wcs.WCS` of the ::class:`~pdrtpy.measurement.Measurement`s added to this tool. See :class:`~astropy.nddata.utils.Cutout2D`.
        :type position: tuple or :class:`astropy.coordinates.SkyCoord` 
        :param size: The size of the cutout array along each axis. If size is a scalar number or a scalar :class:`~astropy.units.Quantity`, then a square cutout of size will be created. If `size` has two elements, they should be in `(ny, nx)` order. Scalar numbers in size are assumed to be in units of pixels. `size` can also be a :class:`~astropy.units.Quantity` object or contain :class:`~astropy.units.Quantity` objects. Such :class:`~astropy.units.Quantity` objects must be in pixel or angular units. For all cases, size will be converted to an integer number of pixels, rounding the the nearest integer. See the mode keyword for additional details on the final cutout size. Default value of None means use all pixels (position is ignored)
        :type size: int, array_like, or :class:`astropy.units.Quantity`
        :param fit_opr: Whether to fit the ortho-to-para ratio or not. If True, the OPR will be varied to determine the best value. If False, the OPR is fixed at the canonical LTE value of 3.
        :type fit_opr: bool
     ,dtype=object   :returns: The fit result which contains slopes, intercepts, the ortho to para ratio (OPR), and fit statistics
        :rtype:  :class:`lmfit.model.ModelResult`  
        """
        profile = kwargs.pop('profile')
        self._stats = None
        if profile:
            pr = cProfile.Profile()
            pr.enable()
        if fit_opr:
            min_points = 5
        else:
            min_points = 4
            self._opr = Measurement(data=[self._canonical_opr],
                                    uncertainty=None)

        self._params['opr'].vary = fit_opr
        energy = self.energies(line=True)
        _ee = np.array([c for c in energy.values()])
        #@ todo: allow fitting of one-temperature model
        if len(_ee) < min_points:
            raise Exception(
                f"You need at least {min_points:d} data points to determine two-temperature model"
            )
        if len(_ee) == min_points:
            warnings.warn(
                f"Number of data points is equal to number of free parameters ({min_points:d}). Fit will be over-constrained"
            )
        _energy = Measurement(_ee, unit="K")
        _ids = list(energy.keys())
        idx = self._get_ortho_indices(_ids)
        # Get Nu/gu.  Canonical opr will be used.
        if position is None or size is None:
            colden = self.column_densities(norm=True, line=True)
        else:

            colden = self.average_column_density(norm=True,
                                                 position=position,
                                                 size=size,
                                                 line=True)

        # Need to stuff the data into a single vector
        _cd = np.squeeze(np.array([c.data for c in colden.values()]))
        _er = np.squeeze(np.array([c.error for c in colden.values()]))
        _colden = Measurement(_cd,
                              uncertainty=StdDevUncertainty(_er),
                              unit="cm-2")
        fk = utils.firstkey(colden)
        x = _energy.data
        y = np.log10(_colden.data)
        #print("SHAPE Y LEN(SHAPE(Y) ",y.shape,len(y.shape))
        #kwargs_opts = {"guess": self._first_guess(x,y)}
        #kwargs_opts.update(kwargs)
        sigma = utils.LOGE * _colden.error / _colden.data
        slopecold, intcold, slopehot, inthot = self._first_guess(x, y)
        #print(slopecold,slopehot,intcold,inthot)
        tcold = (-utils.LOGE / slopecold)
        thot = (-utils.LOGE / slopehot)
        if np.shape(tcold) == ():
            tcold = np.array([tcold])
            thot = np.array([thot])
        saveshape = tcold.shape
        #print("TYPE COLD SIT",type(slopecold),type(intcold),type(tcold))
        #print("SHAPES: colden/sigma/slope/int/temp/cd: ",np.shape(_colden),np.shape(sigma),np.shape(slopecold),np.shape(intcold),np.shape(tcold),np.shape(_cd))
        #print("First guess at excitation temperatures:\n T_cold = %.1f K\n T_hot = %.1f K"%(tcold,thot))
        fmdata = np.empty(tcold.shape, dtype=object).flatten()
        #fm = FitMap(data=fmdata,wcs=colden[fk].wcs,uncertainty=None,unit=None)
        tcold = tcold.flatten()
        thot = thot.flatten()
        slopecold = slopecold.flatten()
        slopehot = slopehot.flatten()
        inthot = inthot.flatten()
        intcold = intcold.flatten()
        #sigma = sigma.flatten()
        # flatten any dimensions past 0
        shp = y.shape
        #print("NS ",shp[0],shp[1:])
        if len(shp) == 1:
            #print("adding new axis")
            y = y[:, np.newaxis]
            shp = y.shape
        yr = y.reshape((shp[0], np.prod(shp[1:])))
        sig = sigma.reshape((shp[0], np.prod(shp[1:])))
        #print("YR, SIG SHAPE",yr.shape,sig.shape)
        count = 0
        #print("LEN(TCOLD)",len(tcold))
        total = len(tcold)
        fm_mask = np.full(shape=tcold.shape, fill_value=False)
        # Suppress the incorrect warning about model parameters
        warnings.simplefilter('ignore', category=UserWarning)
        excount = 0
        badfit = 0
        # update whether opr is allowed to vary or not.
        self._model.set_param_hint('opr', vary=fit_opr)
        # use progress bar if more than one pixel
        if total > 1:
            progress = kwargs.pop("progress", True)
        else:
            progress = False
        with get_progress_bar(progress, total, leave=True, position=0) as pbar:
            for i in range(total):
                if np.isfinite(yr[:, i]).all() and np.isfinite(sig[:,
                                                                   i]).all():
                    # update Parameter hints based on first guess.
                    self._model.set_param_hint('m1',
                                               value=slopecold[i],
                                               vary=True)
                    self._model.set_param_hint('n1',
                                               value=intcold[i],
                                               vary=True)
                    self._model.set_param_hint('m2',
                                               value=slopehot[i],
                                               vary=True)
                    self._model.set_param_hint('n2',
                                               value=inthot[i],
                                               vary=True)
                    p = self._model.make_params()
                    wts = 1.0 / (sig[:, i] * sig[:, i])
                    try:
                        fmdata[i] = self._model.fit(
                            data=yr[:, i],
                            weights=wts,
                            x=x,
                            idx=idx,
                            fit_opr=fit_opr,
                            method=kwargs['method'],
                            nan_policy=kwargs['nan_policy'])
                        if fmdata[i].success and fmdata[i].errorbars:
                            count = count + 1
                        else:
                            fmdata[i] = None
                            fm_mask[i] = True
                            badfit = badfit + 1
                    except ValueError:
                        fmdata[i] = None
                        fm_mask[i] = True
                        excount = excount + 1
                else:
                    fmdata[i] = None
                    fm_mask[i] = True
                pbar.update(1)
        warnings.resetwarnings()
        fmdata = fmdata.reshape(saveshape)
        fm_mask = fm_mask.reshape(saveshape)
        self._fitresult = FitMap(fmdata,
                                 wcs=colden[fk].wcs,
                                 mask=fm_mask,
                                 name="result")
        # this will raise an exception if the fit was bad (fit errors == None)
        self._compute_quantities(self._fitresult)
        print(f"fitted {count} of {slopecold.size} pixels")
        print(f'got {excount} exceptions and {badfit} bad fits')
        #if successful, set the used position and size
        self._position = position
        self._size = size
        if profile:
            pr.disable()
            s = io.StringIO()
            sortby = SortKey.CUMULATIVE
            ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
            ps.print_stats()
            self._stats = s

    def _two_lines(self, x, m1, n1, m2, n2):
        '''This function is used to partition a fit to data using two lines and 
           an inflection point.  Second slope is steeper because slopes are 
           negative in excitation diagram.

           :param x: array of x values
           :type x: :class:`numpy.ndarray` 
           :param m1: slope of first line
           :type m1: float
           :param n1: intercept of first line
           :type n1: float
           :param m2: slope of second line
           :type m2: float
           :param n2: intercept of second line
           :type n2: float

            See https://stackoverflow.com/questions/48674558/how-to-implement-automatic-model-determination-and-two-state-model-fitting-in-py
        '''
        return np.max([m1 * x + n1, m2 * x + n2], axis=0)

    def _one_line(self, x, m1, n1):
        '''Return a line.

           :param x: array of x values
           :type x: :class:`numpy.ndarray` 
           :param m1: slope of first line
           :type m1: float
           :param n1: intercept of first line
           :type n1: float
        '''
        return m1 * x + n1

    def _partition_function(self, tex):
        '''Calculate the H2 partition function given an excitation temperature
        
        :param tex: the excitation temperature
        :type tex: :class:`~pdrtpy.measurement.Measurement` or :class:`astropy.units.quantity.Quantity`
        :returns: the partition function value
        :rtype: numpy.ndarray
        '''
        # See Herbst et al 1996
        # http://articles.adsabs.harvard.edu/pdf/1996AJ....111.2403H
        # Z(T) =  = 0.0247T * [1—exp(—6000/T)]^-1

        # This is just being defensive.  I know the temperatures used internally are in K.
        t = np.ma.masked_invalid((tex.value * u.Unit(tex.unit)).to(
            "K", equivalencies=u.temperature()).value)
        t.mask = np.logical_or(t.mask, np.logical_not(np.isfinite(t)))
        z = 0.0247 * t / (1.0 - np.exp(-6000.0 / t))
        return z
Exemplo n.º 11
0
def test_water(io_fix):
    # Load data
    res = load_nexus(io_fix['irs_res_f'])
    dat = load_nexus(io_fix['irs_red_f'])
    q_vals = io_fix['q_values']

    # Define the fitting range
    e_min = -0.4
    e_max = 0.4
    # Find indexes of dat['x'] with values in (e_min, e_max)
    mask = np.intersect1d(np.where(dat['x'] > e_min),
                          np.where(dat['x'] < e_max))
    # Drop data outside the fitting range
    fr = dict()  # fitting range. Use in place of 'dat'
    fr['x'] = dat['x'][mask]
    fr['y'] = np.asarray([y[mask] for y in dat['y']])
    fr['e'] = np.asarray([e[mask] for e in dat['e']])

    # Create the model
    def generate_model_and_params(spectrum_index=None):
        r"""Produce an LMFIT model and related set of fitting parameters"""

        sp = '' if spectrum_index is None else '{}_'.format(
            spectrum_index)  # prefix if spectrum_index passed

        # Model components
        intensity = ConstantModel(prefix='I_' + sp)  # I_amplitude
        elastic = DeltaDiracModel(prefix='e_' + sp)  # e_amplitude, e_center
        # l_amplitude, l_center, l_sigma (also l_fwhm, l_height)
        inelastic = LorentzianModel(prefix='l_' + sp)
        # r_amplitude, r_center (both fixed)
        resolution = TabulatedResolutionModel(res['x'],
                                              res['y'],
                                              prefix='r_' + sp)
        background = LinearModel(prefix='b_' + sp)  # b_slope, b_intercept

        # Putting it all together
        model = intensity * Convolve(resolution,
                                     elastic + inelastic) + background
        parameters = model.make_params()  # model params are a separate entity

        # Ties and constraints
        parameters['e_' + sp + 'amplitude'].set(min=0.0, max=1.0)
        parameters['l_' + sp + 'center'].set(expr='e_' + sp +
                                             'center')  # centers tied
        parameters['l_' + sp + 'amplitude'].set(expr='1 - e_' + sp +
                                                'amplitude')

        # Some initial sensible values
        init_vals = {
            'I_' + sp + 'c': 1.0,
            'e_' + sp + 'amplitude': 0.5,
            'l_' + sp + 'sigma': 0.01,
            'b_' + sp + 'slope': 0,
            'b_' + sp + 'intercept': 0
        }
        for p, v in init_vals.items():
            parameters[p].set(value=v)

        return model, parameters

    # Call the function
    model, params = generate_model_and_params()

    # Initial guess for first spectrum. Only set free parameters
    for name, value in dict(I_c=4.0,
                            e_center=0,
                            e_amplitude=0.1,
                            l_sigma=0.03,
                            b_slope=0,
                            b_intercept=0).items():
        params[name].set(value=value)
    # Carry out the fit
    fit = model.fit(fr['y'][0],
                    x=fr['x'],
                    params=params,
                    weights=1.0 / fr['e'][0])
    assert_almost_equal(fit.redchi, 1.72, decimal=2)

    # Carry out sequential fit
    n_spectra = len(fr['y'])
    fits = [
        None,
    ] * n_spectra  # store fits for all the tried spectra
    fits[0] = fit  # store previous fit
    for i in range(1, n_spectra):
        y_exp = fr['y'][i]
        e_exp = fr['e'][i]
        fit = model.fit(y_exp, x=fr['x'], params=params, weights=1.0 / e_exp)
        fits[i] = fit  # store fit results
    assert_almost_equal(
        [f.redchi for f in fits],
        [1.72, 1.15, 0.81, 0.73, 0.73, 0.75, 0.81, 0.86, 0.75, 0.91],
        decimal=2)

    # Fit HWHM(Q^2) with Teixeira model
    hwhms = 0.5 * np.asarray([fit.params['l_fwhm'].value for fit in fits])

    def teixeira(q2s, difcoef, tau):
        dq2 = difcoef * q2s
        return hbar * dq2 / (1 + dq2 * tau)

    teixeira_model = Model(teixeira)  # create LMFIT Model instance
    teixeira_model.set_param_hint('difcoef', min=0)
    teixeira_model.set_param_hint('tau', min=0)
    # Carry out the fit from an initial guess
    teixeira_params = teixeira_model.make_params(difcoef=1.0, tau=1.0)
    teixeira_fit = teixeira_model.fit(hwhms,
                                      q2s=np.square(q_vals),
                                      params=teixeira_params)
    assert_almost_equal(
        [teixeira_fit.best_values['difcoef'], teixeira_fit.best_values['tau']],
        [0.16, 1.11],
        decimal=2)

    # Model for Simultaneous Fit of All Spectra with Teixeira Water Model
    #
    # create one model for each spectrum, but collect all parameters under
    # a single instance of the Parameters class.
    l_model = list()
    g_params = lmfit.Parameters()
    for i in range(n_spectra):
        # model and parameters for one of the spectra
        m, ps = generate_model_and_params(spectrum_index=i)
        l_model.append(m)
        [g_params.add(p) for p in ps.values()]

    # Initialize parameter set with optimized parameters from sequential fit
    for i in range(n_spectra):
        optimized_params = fits[i].params  # these are I_c, e_amplitude,...
        for name in optimized_params:
            # for instance, 'e_amplitude' splitted into 'e', and 'amplitude'
            prefix, base = name.split('_')
            # i_name is 'e_3_amplitude' for i=3
            i_name = prefix + '_{}_'.format(i) + base
            g_params[i_name].set(value=optimized_params[name].value)

    # Introduce global parameters difcoef and tau.
    # Use previous optimized values as initial guess
    o_p = teixeira_fit.params
    g_params.add('difcoef', value=o_p['difcoef'].value, min=0)
    g_params.add('tau', value=o_p['tau'].value, min=0)

    # Tie each lorentzian l_i_sigma to the teixeira expression
    for i in range(n_spectra):
        q2 = q_vals[i] * q_vals[i]
        fmt = '{hbar}*difcoef*{q2}/(1+difcoef*{q2}*tau)'
        teixeira_expression = fmt.format(hbar=hbar, q2=q2)
        g_params['l_{}_sigma'.format(i)].set(expr=teixeira_expression)

    # Carry out the Simultaneous Fit
    def residuals(params):
        l_residuals = list()
        for i in range(n_spectra):
            x = fr['x']  # fitting range of energies
            y = fr['y'][i]  # associated experimental intensities
            e = fr['e'][i]  # associated experimental errors
            model_evaluation = l_model[i].eval(x=x, params=params)
            l_residuals.append((model_evaluation - y) / e)
        return np.concatenate(l_residuals)

    # Minimizer object using the parameter set for all models and the
    # function to calculate all the residuals.
    minimizer = lmfit.Minimizer(residuals, g_params)
    g_fit = minimizer.minimize()
    assert_almost_equal(g_fit.redchi, 0.93, decimal=2)
Exemplo n.º 12
0
def extract_soi_from_wal(field, r, reference_field, max_field,
                         model='full', truncation=1000,
                         guess_from_previous=True, guesses=None,
                         plot_fit=False, method='least_squares',
                         weigth_method='gauss', weight_stiffness=1.0,
                         htr=None, cubic_soi=None, density=None,
                         plot_path=None):
    """Extract the SOI parameters from fitting the wal conductance.

    This algorithm assumes that the data are properly centered.

    The fitted values are expressed in the unit of the input field (the guesses
    should use the same convention).

    Parameters
    ----------
    field : np.ndarray
        Magnetic field values for which the the resistance was measured.
        This can be a multidimensional array in which case the last
        dimension will be considered as the swept dimension.
    r : np.ndarray
        Resistance values in Ω which were measured.
        This can be a multidimensional array in which case the last dimension
        will be considered as the swept dimension.
    reference_field : float
        Field used a reference to eliminate possible experimental offsets.
    max_field : float
        Maximum field to consider when fitting the data since we know that the
        theory breaks down at high field.
    model : {'full', 'simplified'}, optional
        Model used to describe the WAL. 'simplified' corresponds to the
        situation in which either the Rashba term or the linear Dresselhaus
        term can be neglected. 'full' corresponds to a more complete model,
        however each evaluation of the fitting function requires to find the
        eigenvalues of a large and as a consequence may be slow.
    truncation : int, optional
        Both models imply a truncation of the number of Landau levels
        considered: in the 'simplified' case this enters the evaluation of a
        series, in the 'full' case the size of the matrix whose eigenvalues
        need to be computed.
    guess_from_previous : bool, optionnal
        When we perform a fit for multiple sweeps should the fitted values of
        the previous sweep be used as guess for the next sweep.
    guesses :
        Guessed fit parameters to use. Those should include the dephasing
        field, both linear fields (for simplified the second one is ignored),
        the cubic term field.
    plot_fit : bool, optional
        Should each fit be plotted to allow manual verification.
    method : str, optional
        Algorithm to use to perform the fit. See lmfit.minimize documentation
        for acceptable values.
    # XXX

    Returns
    -------
    dephasing_field : np.ndarray
        Dephasing field and standard deviation.
    linear_soi : np.ndarray
        Fields and standard deviations corresponding to the linear SOI terms.
        This is returned as a ...2x2 array in which the first line is the
        Rashba term and the second the Dresselhaus.
    cubic_soi_field : np.ndarray
        Field and standard deviation corresponding to the cubic Dresselhaus
        term.

    """
    # Identify the shape of the data and make them suitable for the following
    # treatment.
    if len(field.shape) >= 2:
        original_shape = field.shape[:-1]
        trace_number = np.prod(original_shape)
        field = field.reshape((trace_number, -1))
        r = r.reshape((trace_number, -1))
        if guesses is not None and len(guesses) == 4:
            g = np.empty(original_shape + (4,))
            for i in range(4):
                g[..., i] = guesses[i]
            guesses = g
        if guesses is not None:
            guesses = guesses.reshape((trace_number, -1))
        else:
            guesses = np.array([None]*trace_number)
    else:
        trace_number = 1
        field = np.array((field,))
        r = np.array((r,))
        guesses = np.array((guesses,))

    results = np.zeros((4, 2, trace_number))

    # Express the conductance in usual WAL normalization. (e^2/(2πh))
    # W. Knap et al.,
    # Weak Antilocalization and Spin Precession in Quantum Wells.
    # Physical Review B. 53, 3912–3924 (1996).
    sigma = (1/r) / (cs.e**2/(2*np.pi*cs.Planck))

    # Create the fitting model
    if model == 'full':
        raise ValueError('Unsupported model')
        model_obj = Model(full_wal)
    else:
        model_obj = Model(simple_wal)
        model_obj.set_param_hint('series_truncation',
                                 value=truncation,
                                 vary=False)
    model_obj.set_param_hint('low_field_reference',
                             value=reference_field,
                             vary=False)

    names = (('dephasing_field', 'linear_soi_rashba', 'linear_soi_dressel',
              'cubic_soi') if model == 'full' else
             ('dephasing_field', 'linear_soi', '', 'cubic_soi'))
    for name in [n for n in names if n]:
        if name == 'cubic_soi' and cubic_soi is not None:
            model_obj.set_param_hint(name, value=cubic_soi,
                                     vary=cubic_soi is None)
        elif name == 'dephasing_field':
            model_obj.set_param_hint(name, min=0, value=0.0003)
        else:
            model_obj.set_param_hint(name, min=0, value=0.01)

    if cubic_soi is None:
        model_obj.set_param_hint('soi', min=0, value=0.01)
        model_obj.set_param_hint('rashba_fraction', min=0, max=1, value=1)
        model_obj.set_param_hint('linear_soi', expr='soi*rashba_fraction')
        model_obj.set_param_hint('cubic_soi', expr='soi*(1-rashba_fraction)')

    params = model_obj.make_params()

    # Perform a fit for each magnetic field sweep
    for i in range(trace_number):

        print(f'Treating WAL trace {i+1}/{trace_number}')

        # Conserve only the data for positive field since we symmetrized the
        # data
        mask = np.where(np.logical_and(np.greater(field[i], 0.0002),
                                       np.less(field[i], max_field)))
        f, s = field[i][mask], sigma[i][mask]

        # Find the conductance at the reference field and compute Δσ
        ref_ind = np.argmin(np.abs(f - reference_field))
        reference_field = f[ref_ind]
        dsigma = s - s[ref_ind]

        # Build the weights
        weights = weight_wal_data(f, dsigma, mask=weigth_method,
                                  stiffness=weight_stiffness)

        # Set the initial values for the parameters
        if i != 0  and guess_from_previous:
            params = res.params
        else:
            if guesses[i] is not None:
                for n, v in zip(names, guesses[i]):
                    if n and (n != 'cubic_soi' or cubic_soi is None):
                        params[n].value = v
        params['low_field_reference'].value = reference_field

        # Perform the fit
        res = model_obj.fit(dsigma, params, field=f, method='nelder',
                            weights=weights)
        res = model_obj.fit(dsigma, res.params, field=f, method=method,
                            weights=weights)
        for j, n in enumerate(names):
            if not n:
                continue
            results[j, 0, i] = res.best_values[n]
            results[j, 1, i] = res.params[n].stderr

        # If requested plot the result.
        if plot_fit:
            fig, ax = plt.subplots(constrained_layout=True)
            if density is not None:
                fig.suptitle(f'Density {density[i]/1e4:.1e} (cm$^2$)')
            ax.plot(field[i]*1e3, sigma[i] - s[ref_ind], '+')
            ax.plot(np.concatenate((-f[::-1], f))*1e3,
                    np.concatenate((res.best_fit[::-1], res.best_fit)))
            ax.set_xlabel('Magnetic field B (mT)')
            ax.set_ylabel(r'Δσ(B) - Δσ(0) ($\frac{e^2}{2\,π\,\hbar})$')
            amp = abs(np.max(s - s[ref_ind]) - np.min(s - s[ref_ind]))
            ax.set_ylim((None, np.max(s - s[ref_ind]) + 0.1*amp))
            if htr is None:
                ax.set_xlim((-max_field*1e3, max_field*1e3))
            else:
                # ax.set_xlim((-5*htr[i]*1e3, 5*htr[i]*1e3))
                ax.set_xlim((-50, 50))
            if htr is not None:
                ax.axvline(htr[i]*1e3, color='k', label='H$_{tr}$')
            ax.legend()
            # if plot_path:
            #     path = os.path.join(plot_path,
            #                         f'fit_{i}_n_{density[i]}.pickle')
            #     with open(path, 'wb') as fig_pickle:
            #         pickle.dump(fig, fig_pickle)
            # ax2 = ax.twinx()
            # ax2.plot(f*1e3, weights, color='C2')
            # plt.show()

    if results.shape[0] == 1:
        return results[0]
    else:
        results = results.reshape((4, 2) + original_shape)
        return results[0], results[1:3], results[3]
Exemplo n.º 13
0
def fit_double_gaussian(x_data,
                        y_data,
                        maxiter=None,
                        maxfun=None,
                        verbose=1,
                        initial_params=None):
    """ Fitting of double gaussian

    Fitting the Gaussians and finding the split between the up and the down state,
    separation between the max of the two gaussians measured in the sum of the std.

    Args:
        x_data (array): x values of the data
        y_data (array): y values of the data
        maxiter (int): Legacy argument, not used any more
        maxfun (int): Legacy argument, not used any more
        verbose (int): set to >0 to print convergence messages
        initial_params (None or array): optional, initial guess for the fit parameters:
            [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]

    Returns:
        par_fit (array): fit parameters of the double gaussian: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
        result_dict (dict): dictionary with results of the fit. Fields guaranteed in the dictionary:
            parameters (array): Fitted parameters
            parameters initial guess (array): initial guess for the fit parameters, either the ones give to the
                function, or generated by the function: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
            reduced_chi_squared (float): Reduced chi squared value of the fit
            separation (float): separation between the max of the two gaussians measured in the sum of the std
            split (float): value that separates the up and the down level
            left (array), right (array): Parameters of the left and right fitted Gaussian

    """
    if maxiter is not None:
        warnings.warn('argument maxiter is not used any more')
    if maxfun is not None:
        warnings.warn('argument maxfun is not used any more')

    if initial_params is None:
        initial_params = _estimate_double_gaussian_parameters(x_data, y_data)

    def _double_gaussian(x, A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up):
        """ Double Gaussian helper function for lmfit """
        gauss_dn = gaussian(x, mean_dn, sigma_dn, A_dn)
        gauss_up = gaussian(x, mean_up, sigma_up, A_up)
        double_gauss = gauss_dn + gauss_up
        return double_gauss

    lmfit_method = 'least_squares'
    double_gaussian_model = Model(_double_gaussian)
    delta_x = x_data.max() - x_data.min()
    bounds = [x_data.min() - .1 * delta_x, x_data.max() + .1 * delta_x]
    double_gaussian_model.set_param_hint('mean_up',
                                         min=bounds[0],
                                         max=bounds[1])
    double_gaussian_model.set_param_hint('mean_dn',
                                         min=bounds[0],
                                         max=bounds[1])
    double_gaussian_model.set_param_hint('A_up', min=0)
    double_gaussian_model.set_param_hint('A_dn', min=0)

    param_names = double_gaussian_model.param_names
    result = double_gaussian_model.fit(y_data,
                                       x=x_data,
                                       **dict(zip(param_names,
                                                  initial_params)),
                                       verbose=False,
                                       method=lmfit_method)

    par_fit = np.array([result.best_values[p] for p in param_names])

    if par_fit[4] > par_fit[5]:
        par_fit = np.take(par_fit, [1, 0, 3, 2, 5, 4])
    # separation is the difference between the max of the gaussians divided by the sum of the std of both gaussians
    separation = (par_fit[5] - par_fit[4]) / (abs(par_fit[2]) +
                                              abs(par_fit[3]))
    # split equal distant to both peaks measured in std from the peak
    weigthed_distance_split = par_fit[4] + separation * abs(par_fit[2])

    result_dict = {
        'parameters': par_fit,
        'parameters initial guess': initial_params,
        'separation': separation,
        'split': weigthed_distance_split,
        'reduced_chi_squared': result.redchi,
        'left': np.take(par_fit, [4, 2, 0]),
        'right': np.take(par_fit, [5, 3, 1]),
        'type': 'fitted double gaussian'
    }

    return par_fit, result_dict
Exemplo n.º 14
0
def fit_gaussian(x_data,
                 y_data,
                 maxiter=None,
                 maxfun=None,
                 verbose=0,
                 initial_parameters=None,
                 initial_params=None,
                 estimate_offset=True):
    """ Fitting of a gaussian, see function 'gaussian' for the model that is fitted

    The final optimization of the fit is performed with `lmfit <https://lmfit.github.io/lmfit-py/>`
    using the `least_squares` method.

    Args:
        x_data (array): x values of the data
        y_data (array): y values of the data
        verbose (int): set positive for verbose fit
        initial_parameters (None or array): optional, initial guess for the
            fit parameters: [mean, s, amplitude, offset]
        estimate_offset (bool): If True then include offset in the Gaussian parameters

        maxiter (int): Legacy argument, not used
        maxfun (int): Legacy argument, not used

    Returns:
        par_fit (array): fit parameters of the gaussian: [mean, s, amplitude, offset]
        result_dict (dict): result dictonary containging the fitparameters and the initial guess parameters
    """

    if initial_params is not None:
        warnings.warn('use initial_parameters instead of initial_params')
        initial_parameters = initial_params
    if maxiter is not None:
        warnings.warn('argument maxiter is not used any more')
    if maxfun is not None:
        warnings.warn('argument maxfun is not used any more')

    if initial_parameters is None:
        initial_parameters = _estimate_initial_parameters_gaussian(
            x_data, y_data, include_offset=estimate_offset)

    if estimate_offset:

        def gaussian_model(x, mean, sigma, amplitude, offset):
            """ Gaussian helper function for lmfit """
            y = gaussian(x, mean, sigma, amplitude, offset)
            return y
    else:

        def gaussian_model(x, mean, sigma, amplitude):  # type: ignore
            """ Gaussian helper function for lmfit """
            y = gaussian(x, mean, sigma, amplitude)
            return y

    lmfit_method = 'least_squares'
    lmfit_model = Model(gaussian_model)
    lmfit_model.set_param_hint('amplitude', min=0)
    lmfit_result = lmfit_model.fit(y_data,
                                   x=x_data,
                                   **dict(
                                       zip(lmfit_model.param_names,
                                           initial_parameters)),
                                   verbose=verbose,
                                   method=lmfit_method)
    result_dict = extract_lmfit_parameters(lmfit_model, lmfit_result)

    result_dict['parameters fitted gaussian'] = result_dict[
        'fitted_parameters']
    result_dict['parameters initial guess'] = result_dict['initial_parameters']

    return result_dict['fitted_parameters'], result_dict
Exemplo n.º 15
0
def fit_basic(
    x,
    y,
    dy=None,
    model="line",
    init={},
    fix=None,
    method="leastsq",
    emcee=False,
    plot_corner=False,
    **kwargs,
):

    if model in "linear":
        func = linear
    elif model in "power":
        func = power
    elif model in "quadratic":
        func = quadratic
    elif model in "exponential":
        func = exponential
    else:
        raise ValueError("Model {} not defined.".format(model))

    model = Model(func, nan_policy="omit")

    pars = init_pars(model, init, x, y)

    if fix is not None:
        for vn in fix:
            pars[vn].set(value=fix[vn], vary=0)

    if dy is not None:
        dy = np.abs(dy)
        wgt = np.array(
            [1.0 / dy[i] if dy[i] > 0 else 0 for i in range(len(y))])
        is_weighted = True
    else:
        wgt = None
        is_weighted = False

    if emcee:
        mi = lmfit.minimize(
            residual,
            pars,
            args=(x, model),
            kws={
                "data": y,
                "eps": wgt
            },
            method="nelder",
            nan_policy="omit",
        )
        # mi.params.add('f', value=1, min=0.001, max=2)
        mini = lmfit.Minimizer(residual,
                               mi.params,
                               fcn_args=(x, model),
                               fcn_kws={
                                   "data": y,
                                   "eps": wgt
                               })
        out = mini.emcee(burn=300,
                         steps=1000,
                         thin=20,
                         params=mi.params,
                         is_weighted=is_weighted)
        out, fit_report = get_ml_solution(out, fix)
        print(list(out.params.valuesdict().values()))
        if plot_corner:
            corner.corner(
                out.flatchain,
                labels=out.var_names,
                truths=list(out.params.valuesdict().values()),
            )

    else:
        out = lmfit.minimize(
            residual,
            pars,
            args=(x, model),
            method=method,
            kws={
                "data": y,
                "eps": wgt
            },
            nan_policy="omit",
            **kwargs,
        )
        fit_report = lmfit.fit_report(out)

    pars_arr = np.zeros((len(pars), 2))
    for i, vn in enumerate(pars):
        pars_arr[i, 0] = out.params[vn].value
        pars_arr[i, 1] = out.params[vn].stderr if out.params[vn].stderr else 0

    if not emcee:
        gof = np.array([out.chisqr, out.redchi, out.bic, out.aic])
    else:
        gof = 0
    return pars_arr, gof, out, fit_report, model
Exemplo n.º 16
0
#!/usr/bin/env python

# <examples/doc_mode_savemodel.py>
import numpy as np

from lmfit.model import Model, save_model


def mysine(x, amp, freq, shift):
    return amp * np.sin(x*freq + shift)


sinemodel = Model(mysine)
pars = sinemodel.make_params(amp=1, freq=0.25, shift=0)

save_model(sinemodel, 'sinemodel.sav')
# <end examples/doc_model_savemodel.py>