コード例 #1
0
ファイル: s21.py プロジェクト: zobristnicholas/mkidcalculator
    def guess(cls,
              z,
              f,
              imbalance=None,
              offset=None,
              use_filter=False,
              filter_length=None,
              fit_resonance=True,
              nonlinear_resonance=False,
              fit_gain=True,
              quadratic_gain=True,
              fit_phase=True,
              quadratic_phase=False,
              fit_imbalance=False,
              fit_offset=False,
              **kwargs):
        """
        Guess the model parameters based on the data. Returns a
        lmfit.Parameters() object.
        Args:
            z: numpy.ndarray, dtype=complex, shape=(N,)
                Complex resonator scattering parameter.
            f: numpy.ndarray, dtype=real, shape=(N,)
                Frequency points corresponding to z.
            imbalance: numpy.ndarray, dtype=complex, shape=(M, L) (optional)
                Mixer imbalance calibration data (M data sets of I and Q
                beating). Each of the M data sets is it's own calibration,
                potentially taken at different frequencies and frequency
                offsets. The results of the M data sets are averaged together.
                The default is None, which means alpha and beta are taken from
                the keywords. The alpha and beta keywords are ignored if a
                value other than None is given.
            offset: complex, iterable (optional)
                A complex number corresponding to the I + iQ mixer offset. The
                default is 0, corresponding to no offset. If the input is
                iterable, a mean is taken to determine the mixer_offset value.
            use_filter: boolean (optional)
                Filter the phase and magnitude data of z before trying to guess
                the parameters. This can be helpful for noisy data, but can
                also result in poor guesses for clean data. The default is
                False.
            filter_length: int, odd >= 3 (optional)
                If use_filter==True, this is used as the filter length. Only
                odd numbers greater or equal to three are allowed. If None, a
                filter length is computed as roughly 1% of the number of points
                in z. The default is None.
            fit_resonance: boolean (optional)
                Allow the resonance parameters to vary in the fit. The default
                is True.
            nonlinear_resonance: float, boolean (optional)
                Allow the resonance model to fit for nonlinear behavior. The
                default is False. If a float, this value is used for 'a'.
                If True, the 'a' is set to 0.0025, since the fit has trouble
                if 'a' is initialized to 0.
            fit_gain: boolean (optional)
                Allow the gain parameters to vary in the fit. The default is
                True.
            quadratic_gain: boolean (optional)
                Allow for a quadratic gain component in the model. The default
                is True.
            fit_phase: boolean (optional)
                Allow the phase parameters to vary in the fit. The default is
                True.
            quadratic_phase: boolean (optional)
                Allow for a quadratic phase component in the model. The default
                is False since there isn't an obvious physical reason why there
                should be a quadratic term.
            fit_imbalance: boolean (optional)
                Allow the IQ mixer amplitude and phase imbalance to vary in the
                fit. The default is False. The imbalance is typically
                calibrated and not fit.
            fit_offset: boolean (optional)
                Allow the IQ mixer offset to vary in the fit. The default is
                False. The offset is highly correlated with the gain parameters
                and typically should not be allowed to vary unless the gain is
                properly calibrated.
            kwargs: (optional)
                Set the options of any of the parameters directly bypassing the
                calculated guess.
        Returns:
            params: lmfit.Parameters
                An object with guesses and bounds for each parameter.
        """
        # undo the mixer calibration for more accurate guess if known ahead of time
        offset = np.mean(offset) if offset is not None else 0.
        if imbalance is not None:
            # bandpass filter the I and Q signals
            imbalance = np.atleast_2d(imbalance)
            i, q = imbalance.real, imbalance.imag
            n = i.shape[0]
            ip, f_i_ind = bandpass(i)
            qp, f_q_ind = bandpass(q)
            # compute alpha and beta
            amp = np.sqrt(2 * np.mean(ip**2, axis=-1))
            alpha = np.sqrt(2 * np.mean(qp**2, axis=-1)) / amp
            ratio = np.angle(
                np.fft.rfft(ip)[np.arange(n), f_i_ind[:, 0]] /
                np.fft.rfft(qp)[np.arange(n),
                                f_q_ind[:, 0]])  # for arcsine branch
            beta = np.arcsin(
                np.sign(ratio) * 2 * np.mean(qp * ip, axis=-1) /
                (alpha * amp**2)) + np.pi * (ratio < 0)
            alpha = np.mean(alpha)
            beta = np.mean(beta)
        else:
            alpha = 1.
            beta = 0.
        if kwargs.get('alpha', None) is not None:
            alpha = kwargs['alpha']['value'] if isinstance(
                kwargs['alpha'], dict) else kwargs['alpha']
        if kwargs.get('beta', None) is not None:
            beta = kwargs['beta']['value'] if isinstance(
                kwargs['beta'], dict) else kwargs['beta']
        z = cls.mixer_inverse((alpha, beta, offset), z)
        # compute the magnitude and phase of the scattering parameter
        magnitude = np.abs(z)
        phase = np.unwrap(np.angle(z))
        # filter the magnitude and phase if requested
        if use_filter:
            if filter_length is None:
                filter_length = int(np.round(len(magnitude) / 100.0))
            if filter_length % 2 == 0:
                filter_length += 1
            if filter_length < 3:
                filter_length = 3
            magnitude = sps.savgol_filter(magnitude, filter_length, 1)
            phase = sps.savgol_filter(phase, filter_length, 1)

        # calculate useful indices
        f_index_end = len(f) - 1  # last frequency index
        f_index_5pc = max(int(len(f) * 0.05), 2)  # end of first 5% of data
        # set up a unitless, reduced, midpoint frequency for baselines
        f_midpoint = np.median(f)  # frequency at the center of the data

        def xm(fx):
            return (fx - f_midpoint) / f_midpoint

        # get the magnitude and phase data to fit
        mag_ends = np.concatenate(
            (magnitude[:f_index_5pc], magnitude[-f_index_5pc + 1:]))
        phase_ends = np.concatenate(
            (phase[:f_index_5pc], phase[-f_index_5pc + 1:]))
        freq_ends = xm(np.concatenate((f[:f_index_5pc], f[-f_index_5pc + 1:])))
        # calculate the gain polynomials
        gain_poly = np.polyfit(freq_ends, mag_ends, 2 if quadratic_gain else 1)
        if not quadratic_gain:
            gain_poly = np.concatenate(([0], gain_poly))
        phase_poly = np.polyfit(freq_ends, phase_ends,
                                2 if quadratic_phase else 1)
        if not quadratic_phase:
            phase_poly = np.concatenate(([0], phase_poly))

        # guess f0
        f_index_min = np.argmin(magnitude - np.polyval(gain_poly, xm(f)))
        f0_guess = f[f_index_min]
        # set some bounds (resonant frequency should not be within 5% of file end)
        f_min = min(f[f_index_5pc], f[f_index_end - f_index_5pc])
        f_max = max(f[f_index_5pc], f[f_index_end - f_index_5pc])
        if not f_min < f0_guess < f_max:
            f0_guess = f_midpoint

        # guess Q values
        mag_max = np.polyval(gain_poly, xm(f[f_index_min]))
        mag_min = magnitude[f_index_min]
        fwhm = np.sqrt(
            (mag_max**2 + mag_min**2) / 2.)  # fwhm is for power not amplitude
        fwhm_mask = magnitude < fwhm
        regions, _ = label(
            fwhm_mask)  # find the regions where magnitude < fwhm
        region = regions[f_index_min]  # pick the one that includes the minimum
        try:
            f_masked = f[find_objects(
                regions,
                max_label=region)[-1]]  # mask f to only include that region
            bandwidth = f_masked.max() - f_masked.min()  # find the bandwidth
        except IndexError:  # no found region
            bandwidth = 0  # defer calculation
        # Q0 = f0 / fwhm bandwidth
        q0_guess = f0_guess / bandwidth if bandwidth != 0 else 1e4
        # Q0 / Qi = min(mag) / max(mag)
        qi_guess = q0_guess * mag_max / mag_min if mag_min != 0 else 1e5
        if qi_guess == 0:
            qi_guess = 1e5
        if q0_guess == 0:
            q0_guess = 1e4
        # 1 / Q0 = 1 / Qc + 1 / Qi
        qc_guess = 1. / (1. / q0_guess - 1. / qi_guess) if (
            1. / q0_guess - 1. / qi_guess) != 0 else 1e4

        # make the parameters object (coerce all values to float to avoid ints and numpy types)
        params = lm.Parameters()
        # resonance parameters
        params.add('xa', value=float(0), vary=fit_resonance)
        params.add('f0',
                   value=float(f0_guess),
                   min=f_min,
                   max=f_max,
                   vary=fit_resonance)
        params.add('qc',
                   value=float(qc_guess),
                   min=1,
                   max=10**8,
                   vary=fit_resonance)
        params.add('qi',
                   value=float(qi_guess),
                   min=1,
                   max=10**8,
                   vary=fit_resonance)
        a_sqrt = np.sqrt(
            0.0025
        ) if nonlinear_resonance is True else nonlinear_resonance  # bifurcation at a=0.7698
        params.add('a_sqrt',
                   value=float(a_sqrt),
                   vary=bool(nonlinear_resonance) and fit_resonance)
        # polynomial gain parameters
        params.add('gain0', value=float(gain_poly[2]), min=0, vary=fit_gain)
        params.add('gain1', value=float(gain_poly[1]), vary=fit_gain)
        params.add('gain2',
                   value=float(gain_poly[0]),
                   vary=quadratic_gain and fit_gain)
        # polynomial phase parameters
        params.add('phase0', value=float(phase_poly[2]), vary=fit_phase)
        params.add('phase1', value=float(phase_poly[1]), vary=fit_phase)
        params.add('phase2',
                   value=float(phase_poly[0]),
                   vary=quadratic_phase and fit_phase)
        # IQ mixer parameters
        params.add('gamma', value=float(offset.real), vary=fit_offset)
        params.add('delta', value=float(offset.imag), vary=fit_offset)
        params.add('alpha', value=float(alpha), vary=fit_imbalance)
        params.add('beta',
                   value=float(beta),
                   min=beta - np.pi / 2,
                   max=beta + np.pi / 2,
                   vary=fit_imbalance)
        # add derived parameters
        params.add(
            "a",
            expr="a_sqrt**2")  # nonlinearity parameter (Swenson et al. 2013)
        params.add("q0",
                   expr="1 / (1 / qi + 1 / qc)")  # the total quality factor
        params.add("fm", value=float(f_midpoint),
                   vary=False)  # the frequency midpoint used for fitting
        params.add("tau", expr="-phase1 / (2 * pi * fm)")  # the cable delay
        params.add("fr", expr="f0 * (1 - a / q0)"
                   )  # resonance frequency accounting for nonlinearity

        # override the guess
        for key, options in kwargs.items():
            if options is not None:
                if isinstance(options, dict):
                    params[key].set(**options)
                else:
                    params[key].set(value=options)
        return params
コード例 #2
0
ファイル: test_parameters.py プロジェクト: xli2522/lmfit-py
def test_parameters_init():
    """Test for initialization of the Parameters class"""
    ast_int = asteval.Interpreter()
    pars = lmfit.Parameters(asteval=ast_int, usersyms={'test': np.sin})
    assert pars._asteval == ast_int
    assert 'test' in pars._asteval.symtable
コード例 #3
0
    assert len(x)==len(y), "X and Y are different sizes"
    res=0
    
    y_hat=np.zeros(len(y))
    for i in range(len(y_hat)):
        fraction_x_bound=multisite_1_to_3.multisite_1_to_3(protein_conc,x[i], params['kd1'].value,params['kd2'].value,params['kd3'].value)
        amount_x_bound=fraction_x_bound*x[i]
        fraction_total_possible_bound=amount_x_bound/(3*protein_conc)
        y_hat[i]=ymin+((params['ymax'].value-ymin)/protein_conc)*fraction_total_possible_bound
    return np.square(y_hat-y)


x=[0.000010,0.1,0.2,0.3,0.4,0.5,0.75,1,1.5,2,2.5,5,10,15,20,30]
y=[0.000001,1.01E+08,63134669,45748112,25162485,1.52E+08,2.9E+08,4.3E+08,4.1E+08,7.21E+08,1.04E+09,1.62E+09,1.6E+09,2.1E+09,2.17E+09,2.28E+09]

params=lmfit.Parameters()
params.add("kd1", value=12.0, min=0, max=np.inf)
params.add("kd2", value=20.0, min=0, max=np.inf)
params.add("kd3", value=30.0, min=0, max=np.inf)
params.add("ymax", value=np.max(y), min=0.8*np.max(y), max=2*np.max(y))

lmmini=lmfit.Minimizer(_residual, params, fcn_args=(protein_conc, x,y,np.min(y)))
print(params)

result=lmmini.minimize()
print(result.params)


print("Here")
x_hat=np.linspace(0.00001, np.max(x),num=100)
y_hats=np.zeros(len(x_hat))
コード例 #4
0
 def getParameters(self):
     """ returns the lmfit parameters object for the fit function"""
     return lmfit.Parameters(
         {_.name: _.parameter
          for _ in self.parametersList})
コード例 #5
0
    def get_fluxes(self,
                   x_custom,
                   z_custom,
                   optimizer_method="leastsq",
                   verbose=False):
        """Calculates the circuit fluxes for given x/z Pauli schedules.
        Numerically finds fluxes that produce the desired Pauli coefficients
        This optimizes the fluxes simultaneously.

        Arguments
        ---------
        x_custom : array
            target schedule for Pauli x coefficient
        z_custom : array
            target schedule for Pauli z coefficient
        optimizer_method : str
            Method used for optimizer
            default is "leastsq", another good option is "nelder"
        verbose : bool
            Weather to show the progress or not

        Returns
        -------
        ndarray:
            the barrier (x) and tilt (z) biases (phase NOT flux), that produce
            the x_custom and z_custom schedules.
            dim = (2, p_tot)
        """
        p_tot = len(x_custom)
        phi_x_list = np.zeros(p_tot)
        phi_z_list = np.zeros(p_tot)

        # object that stores optimization parameters:
        params = lmfit.Parameters()
        # It contains dictionary of Parameter() objects,
        # with keys corresponding to parameter name
        # initial value selected at degeneracy point of z-bias, important for
        # limiting the range of solutions found
        params["phi_x"] = lmfit.Parameter(name="phi_x",
                                          value=1.5 * np.pi,
                                          min=1 * np.pi,
                                          max=2 * np.pi)
        params["phi_z"] = lmfit.Parameter(name="phi_z",
                                          value=0,
                                          min=-0.01 * np.pi,
                                          max=0.01 * np.pi)

        for i, (x_trgt, z_trgt) in enumerate(zip(x_custom, z_custom)):
            if verbose:
                print("schedule point", i + 1, "/", p_tot, end="\x1b[1K\r")
            minner = lmfit.Minimizer(self._residuals,
                                     params,
                                     fcn_args=(x_trgt, z_trgt))
            result = minner.minimize(method=optimizer_method)
            # options={"xatol": 1e-6})

            residual_norm = np.linalg.norm(result.residual)
            target_norm = np.linalg.norm([x_trgt, z_trgt])
            rel_error = residual_norm / target_norm
            if rel_error > 1e-2:
                print(
                    "point #{0:d} single qubit residuals: \n".format(i),
                    result.residual,
                    "\n",
                )
                warnings.warn(
                    ("For the point #{0:d}, solver found solutions that" +
                     " are not optimal. The relative error is" +
                     " {1:.2f} % for single qubit residuals").format(
                         i, rel_error * 100))
            phi_x_list[i] = result.params["phi_x"].value
            phi_z_list[i] = result.params["phi_z"].value

            # set initial value of next search to the solution of this
            # only set for x cause finding small z-bias sometimes gets stuck
            # speeds up the solver
            params["phi_x"].set(value=phi_x_list[i])
            # params["phi_z"].set(value=phi_z_list[i])

        return [phi_x_list, phi_z_list]
コード例 #6
0
def test_basinhopping_Alpine02():
    """Test basinhopping on Alpine02 function."""

    global_optimum = [7.91705268, 4.81584232]
    fglob = -6.12950

    # SciPy
    def Alpine02(x):
        x0 = x[0]
        x1 = x[1]
        return np.prod(np.sqrt(x0) * np.sin(x0)) * np.prod(
            np.sqrt(x1) * np.sin(x1))

    def basinhopping_accept(f_new, f_old, x_new, x_old):
        """Does the new candidate vector lie inbetween the bounds?

        Returns
        -------
        accept_test : bool
            The candidate vector lies inbetween the bounds
        """
        if np.any(x_new < np.array([0.0, 0.0])):
            return False
        if np.any(x_new > np.array([10.0, 10.0])):
            return False
        return True

    minimizer_kwargs = {
        'method': 'L-BFGS-B',
        'bounds': [(0.0, 10.0), (0.0, 10.0)]
    }
    x0 = [1.0, 1.0]

    # FIXME - remove after requirement for scipy >= 0.19
    major, minor, micro = np.array(scipy_version.split('.'), dtype='int')
    if major < 1 and minor < 19:
        ret = basinhopping(Alpine02,
                           x0,
                           minimizer_kwargs=minimizer_kwargs,
                           accept_test=basinhopping_accept)
    else:
        ret = basinhopping(Alpine02,
                           x0,
                           minimizer_kwargs=minimizer_kwargs,
                           accept_test=basinhopping_accept,
                           seed=7)

    # lmfit
    def residual_Alpine02(params):
        x0 = params['x0'].value
        x1 = params['x1'].value
        return np.prod(np.sqrt(x0) * np.sin(x0)) * np.prod(
            np.sqrt(x1) * np.sin(x1))

    pars = lmfit.Parameters()
    pars.add_many(('x0', 1., True, 0.0, 10.0), ('x1', 1., True, 0.0, 10.0))

    mini = lmfit.Minimizer(residual_Alpine02, pars)
    kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7}
    out = mini.minimize(method='basinhopping', **kws)
    out_x = np.array([out.params['x0'].value, out.params['x1'].value])

    assert_allclose(out.residual, fglob, rtol=1e-5)
    assert_allclose(min(out_x), min(global_optimum))
    assert_allclose(max(out_x), max(global_optimum))
コード例 #7
0
ファイル: fit_dla.py プロジェクト: jselsing/GRB160410A
def main():
    data = np.genfromtxt("data/stitched_spectrum_bin10.dat")
    wl, flux, error = data[:, 0], data[:, 1], data[:, 2]
    mask = ~(np.isnan(flux) | np.isinf(flux) | np.isnan(error) |
             np.isinf(error) | np.isnan(1 / error**2) | np.isinf(1 / error**2))
    error[error < 1e-25] = 1e-17
    wl, flux, error = wl[mask], flux[mask], error[mask]
    pl.errorbar(wl,
                flux,
                yerr=error,
                fmt=".k",
                capsize=0,
                elinewidth=0.5,
                ms=3,
                zorder=1)
    pl.plot(wl, flux, lw=1, linestyle="steps-mid", alpha=0.7)
    # pl.ylim((-1e-18, 1e-16))
    # pl.show()
    # exit()

    data = np.genfromtxt("data/stitched_spectrum_bin10.dat")
    wl, flux, error = data[:, 0], data[:, 1], data[:, 2]
    mask = ~(np.isnan(flux) | np.isinf(flux) | np.isnan(error) |
             np.isinf(error) | np.isnan(1 / error**2) | np.isinf(1 / error**2))
    error[error < 1e-25] = 1e-17
    wl, flux, error = wl[mask], flux[mask], error[mask]
    p = lmfit.Parameters()
    #           (Name,  Value,  Vary,   Min,  Max,  Expr)
    p.add_many(('amp_pow', -8, True, -np.inf, 0),
               ('slope_pow', -2.38, True, -3, -2), ('N', 20, True, 18, 22),
               ('sigma', 200, True, 0), ('z', 1.716, True, 1.70, 1.73),
               ('ebv', 0.0, True, 0), ('f', 0.416, False),
               ('lambda_zero', 1215.6701, False), ('gamma', 6.265e8, False),
               ('resolution_fwhm', 30, False))

    mi = lmfit.minimize(residual, p, method='Nelder', args=(wl, flux, error))
    print(lmfit.report_fit(mi.params))

    # pl.plot(wl, residual(mi.params.valuesdict().values(), wl), lw = 3, color=cmap[2], zorder=2)
    # pl.ylim((-1e-18, 1e-16))
    # pl.show()
    # exit()

    def lnprob(pars):
        """
        This is the log-likelihood probability for the sampling.
        """
        model = residual(pars, wl)
        return -0.5 * np.sum((
            (model - flux) / error)**2 + np.log(2 * np.pi * error**2))

    mini = lmfit.Minimizer(lnprob, mi.params)

    nwalkers = 100
    v = mi.params.valuesdict()
    MLE_vals = np.array(
        [v["amp_pow"], v["slope_pow"], v["N"], v["sigma"], v["z"], v["ebv"]])
    # pos = [MLE_vals + 1e-2*MLE_vals*np.random.randn(len(MLE_vals)) for i in range(nwalkers)]
    res = mini.emcee(nwalkers=nwalkers,
                     burn=100,
                     steps=1000,
                     thin=1,
                     params=mi.params,
                     seed=12345)

    low, mid, high, names = [], [], [], []
    for kk in res.params.valuesdict().keys():
        names.append(str(kk))
        if str(kk) in ["f", "lambda_zero", "gamma", "resolution_fwhm"]:
            low.append(res.params.valuesdict()[str(kk)])
            mid.append(res.params.valuesdict()[str(kk)])
            high.append(res.params.valuesdict()[str(kk)])
        else:
            low.append(np.percentile(res.flatchain[str(kk)], [15.9])[0])
            mid.append(np.percentile(res.flatchain[str(kk)], [50])[0])
            high.append(np.percentile(res.flatchain[str(kk)], [84.2])[0])
    low, mid, high = np.array(low), np.array(mid), np.array(high)

    pl.plot(wl,
            residual(res.params.valuesdict().values(), wl),
            lw=1,
            color=cmap[2],
            zorder=3)
    pl.fill_between(wl,
                    residual(low, wl),
                    residual(high, wl),
                    alpha=0.5,
                    color=cmap[2],
                    zorder=2)
    # pl.plot(wl, residual(high, wl), lw = 3, linestyle="dashed")
    pl.xlim(3200, 6000)
    pl.ylim(-1e-18, 1.5e-16)
    pl.xlabel(r"Wavelength / [$\mathrm{\AA}$]")
    pl.ylabel(r'Flux density [erg s$^{-1}$ cm$^{-1}$ $\AA^{-1}$]')
    pl.savefig("figs/DLA_fit_zoom.pdf")
    pl.xlim(3200, 10000)
    pl.ylim(-1e-18, 1.5e-16)
    pl.savefig("figs/DLA_fit.pdf")
    pl.clf()
    # exit()
    from matplotlib.ticker import MaxNLocator
    fig, axes = pl.subplots(6, 1, sharex=True, figsize=(8, 9))
    axes[0].plot(res.chain[:, :, 0].T, color="k", alpha=0.4)
    axes[0].yaxis.set_major_locator(MaxNLocator(5))
    axes[0].axhline(mid[0], color="#888888", lw=1)
    # axes[0].set_ylim((mid[0] - 3 * low[0], mid[0] + 3 * high[0]))
    axes[0].set_ylabel("Powerlaw amplitude")

    axes[1].plot(res.chain[:, :, 1].T, color="k", alpha=0.4)
    axes[1].yaxis.set_major_locator(MaxNLocator(5))
    axes[1].axhline(mid[1], color="#888888", lw=1)
    # axes[1].set_ylim((mid[1] - 3 * low[1], mid[1] + 3 * high[1]))
    axes[1].set_ylabel("Powerlaw slope")

    axes[2].plot(res.chain[:, :, 2].T, color="k", alpha=0.4)
    axes[2].yaxis.set_major_locator(MaxNLocator(5))
    axes[2].axhline(mid[2], color="#888888", lw=1)
    # axes[2].set_ylim((mid[2] - 3 * low[2], mid[2] + 3 * high[2]))
    axes[2].set_ylabel("N")

    axes[3].plot(res.chain[:, :, 3].T, color="k", alpha=0.4)
    axes[3].yaxis.set_major_locator(MaxNLocator(5))
    axes[3].axhline(mid[3], color="#888888", lw=1)
    # axes[3].set_ylim((mid[3] - 3 * low[3], mid[3] + 3 * high[3]))
    axes[3].set_ylabel("$\sigma$")

    axes[4].plot(res.chain[:, :, 4].T, color="k", alpha=0.4)
    axes[4].yaxis.set_major_locator(MaxNLocator(5))
    axes[4].axhline(mid[4], color="#888888", lw=1)
    # axes[4].set_ylim((mid[4] - 3 * low[4], mid[4] + 3 * high[4]))
    axes[4].set_ylabel("z")

    axes[5].plot(res.chain[:, :, 5].T, color="k", alpha=0.4)
    axes[5].yaxis.set_major_locator(MaxNLocator(5))
    axes[5].axhline(mid[5], color="#888888", lw=1)
    # axe5[4].set_ylim((mid[4] - 3 * low[4], mid[4] + 3 * high[4]))
    axes[5].set_ylabel("ebv")
    axes[5].set_xlabel("step number")

    fig.tight_layout(h_pad=0.0)
    fig.savefig("figs/line-time.pdf")

    dt = [("names", np.str, 16), ("value", np.float64), ("+error", np.float64),
          ("-error", np.float64)]
    data = np.array(zip(np.array(names), mid, abs(mid - low), abs(high - mid)),
                    dtype=dt)
    np.savetxt("data/dla_fitresults.dat",
               data,
               header="names value +error -error",
               fmt=['%16s', '%1.5e', '%1.5e', '%1.5e'])

    import corner
    corner.corner(res.flatchain,
                  labels=res.var_names,
                  truths=list(res.params.valuesdict().values()))
    pl.savefig("figs/Cornerplot.pdf", clobber=True)
コード例 #8
0
import matplotlib.pyplot as plt
import numpy as np

import lmfit

x = np.linspace(1, 10, 250)
np.random.seed(0)
y = 0.3 + 0.2 * x + np.random.randn(x.size)
plt.plot(x, y, 'b')

p = lmfit.Parameters()
p.add_many(('alpha', 0.5), ('beta', 0.3))


def residual(p):
    v = p.valuesdict()
    return (v['alpha'] + v['beta'] * x) - y


mi = lmfit.minimize(residual, p, method='nelder', nan_policy='omit')
lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)

plt.plot(x, y, 'b')
plt.plot(x, residual(res.params) + y, 'r')
plt.legend(loc='best')
plt.show()

mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))

res = lmfit.minimize(residual,
                     method='emcee',
コード例 #9
0
def plotfit_xye(pfile_path):

    if "-xy" in sys.argv:
        load_xy = True
    else:
        load_xy = False

    if load_xy:
        x, y = load_xyfile(pfile_path)
    else:
        x, y, sy = load_xyefile(pfile_path)

    #Initialize variables
    xvar = "\mathit{x}"
    yvar = "\mathit{y}"
    xunit = ""
    yunit = ""
    plot_path = ""
    plotminx = min(x)
    plotmaxx = max(x)

    miny = min(y)
    maxy = max(y)
    minfacy = 1.1 if miny < 0 else 0.9
    maxfacy = 1.1 if maxy > 0 else 0.9

    plotminy = miny * minfacy
    plotmaxy = maxy * maxfacy

    labelname = ""

    if "-vsm" in sys.argv:
        xvar = "\mathit{B}"
        yvar = "\mathit{M}"
        xunit = " \, / \, T"
        yunit = " \, / \, memu"
        head, tail = os.path.split(pfile_path)
        pre, ext = os.path.splitext(tail)
        plot_path = head + "/" + pre + ".png"
        if plot_path.startswith("/"):
            plot_path = "." + plot_path
        labelname = pre

    if "-vars" in sys.argv:
        xvar = "\mathit{" + sys.argv[sys.argv.index("-vars") + 1] + "}"
        yvar = "\mathit{" + sys.argv[sys.argv.index("-vars") + 2] + "}"

    if "-units" in sys.argv:
        xunit = " \, / \, " + sys.argv[sys.argv.index("-units") + 1]
        yunit = " \, / \, " + sys.argv[sys.argv.index("-units") + 2]

    if "-plot_lim" in sys.argv:
        minx = float(sys.argv[sys.argv.index("-plot_lim") + 1])
        maxx = float(sys.argv[sys.argv.index("-plot_lim") + 2])

    if "-label" in sys.argv:
        labelname = sys.argv[sys.argv.index("-label") + 1]

    if "-save" in sys.argv:
        plot_path = sys.argv[sys.argv.index("-save") + 1]

    fig, ax = plt.subplots()  #Initialize plot canvas
    # Restricted fit area defined?
    if "-fit_lim" in sys.argv:
        fit_idx = sys.argv.index("-fit_lim")
        fitminx = float(sys.argv[fit_idx + 1])
        fitmaxx = float(sys.argv[fit_idx + 2])
        fit_slice = get_slice(x, fitminx, fitmaxx)

        x_plot = x[fit_slice]
        y_plot = y[fit_slice]
        #Excluded data
        x_exc = x[-fit_slice]
        y_exc = y[-fit_slice]

        if not load_xy:
            sy_plot = sy[fit_slice]
            sy_exc = sy[-fit_slice]

        if len(x_exc) > 0:
            if load_xy:
                ax.plot(x_exc, y_exc, linestyle='None', color='gray')
            else:
                ax.errorbar(x_exc,
                            y_exc,
                            sy_exc,
                            linestyle='None',
                            color='gray')
    else:
        x_plot = x
        y_plot = y
        if not load_xy:
            sy_plot = sy

    # Initialize fit variables
    p = lmfit.Parameters()
    minit = sys.argv[sys.argv.index("-m") + 1] if "-m" in sys.argv else 1
    binit = sys.argv[sys.argv.index("-b") + 1] if "-b" in sys.argv else 0

    if "-fixb" in sys.argv:
        varyb = False
    else:
        varyb = True

    p.add("m", minit)
    p.add("b", binit, vary=varyb)

    if load_xy:
        fit_result = lmfit.minimize(residuum_no_err, p, args=(x, y))
    else:
        fit_result = lmfit.minimize(residuum, p, args=(x, y, sy))
    print(lmfit.fit_report(fit_result))

    m = fit_result.params["m"].value
    sm = fit_result.params["m"].stderr
    b = fit_result.params["b"].value
    sb = fit_result.params["b"].stderr

    prec = "{:.3e}"
    fitresultstr = r"$\mathit{m} \, = \, $" + prec.format(m) + " +/- " + prec.format(sm) + "\n"+\
            "$\mathit{b} \, = \, $" + prec.format(b) + " +/-" + prec.format(sb)

    chi2 = fit_result.redchi
    if load_xy:
        ax.plot(x_plot,
                y_plot,
                linestyle='None',
                color='#2b8cbe',
                label=labelname)
    else:
        ax.errorbar(x_plot,
                    y_plot,
                    sy_plot,
                    linestyle='None',
                    color='#2b8cbe',
                    label=labelname)
    ax.plot(x,
            common_math.linear(x, m, b),
            color='#e34a33',
            marker='None',
            label=fitresultstr)

    if chi2 < 1e-3:
        chi2str = "{:.3e}".format(chi2)
    else:
        chi2str = "{:.4f}".format(chi2)
    ax.plot([], [],
            ls='None',
            marker='None',
            label="$\chi^2/dof \, = \, " + chi2str + "$")

    ax.set_xlabel("$" + xvar + xunit + "$")
    ax.set_ylabel("$" + yvar + yunit + "$")
    ax.set_xlim([plotminx, plotmaxx])
    ax.set_ylim([plotminy, plotmaxy])
    plt.legend(loc='best', fontsize=10)

    if plot_path != "":
        fig.savefig(plot_path)
        print("Saved plot to", plot_path)

    plt.show()
コード例 #10
0
melt_data_dict = {}
for melt in melts:
    melt_data_dict[melt] = np.load(os.path.join(PATH, f"{melt}.npy"),
                                   allow_pickle=True)

# Compile fraction folded expressions.
comp_frac_folded_dict = {}
for construct in constructs:
    frac_folded_string = frac_folded_dict[construct + "_frac_folded"]
    comp_frac_folded = compile(frac_folded_string,
                               "{}_comp_ff".format(construct), "eval")
    comp_frac_folded_dict[construct + "_comp_ff"] = comp_frac_folded

# CREATE INITIAL GUESSES
# First, thermodynamic parameters.  These are Global.
init_guesses = lmfit.Parameters()
init_guesses.add("dGN", value=5)
init_guesses.add("dGR", value=4)
init_guesses.add("dGX", value=5)
init_guesses.add("dGC", value=6)
init_guesses.add("dGRR", value=-11)
init_guesses.add("dGXX", value=-10)
init_guesses.add("dGRX", value=-10)
init_guesses.add("dGXR", value=-10)
init_guesses.add("mR", value=0.8)
init_guesses.add("mX", value=0.8)

# Next, baseline parameters.  These are local.
for melt in melts:
    init_guesses.add("af_{}".format(melt), value=0.02)
    init_guesses.add("bf_{}".format(melt), value=1)
コード例 #11
0
class FittingModel:
    """
    Base/Meta class for model fitting, with data and parameters scaling.
    """
    name = ""
    params = lmfit.Parameters()
    # optimization method
    fit_method = "lbfgsb"
    # whether the 'ydata' and 'yerr' to be scaled in order to reduce
    # the dynamical range for a more stable fitting
    scale = False
    scale_factor = 1.0

    def __init__(self, fit_method="lbfgsb", params=None, scale=True):
        self.fit_method = fit_method
        if params is not None:
            self.load_params(params)
        self.scale = scale

    @staticmethod
    def model(x, params):
        pass

    def f(self, x):
        return self.model(x, self.params) * self.scale_factor

    def load_data(self, xdata, ydata=None, xerr=None, yerr=None,
                  update_params=False):
        if xdata.ndim == 2 and xdata.shape[1] == 4:
            # 4-column data
            self.xdata = xdata[:, 0].copy()
            self.xerr = xdata[:, 1].copy()
            self.ydata = xdata[:, 2].copy()
            self.yerr = xdata[:, 3].copy()
        else:
            self.xdata = np.array(xdata)
            self.ydata = np.array(ydata)
            self.xerr = np.array(xerr)
            self.yerr = np.array(yerr)
        self.scale_data(update_params=update_params)

    def scale_data(self, update_params=False):
        """
        Scale the ydata and yerr to reduce their dynamical ranges,
        for a more stable model fitting.
        """
        if self.scale:
            y_min = np.min(self.ydata)
            y_max = np.max(self.ydata)
            self.scale_factor = np.exp(np.log(y_min*y_max) / 2)
            self.ydata /= self.scale_factor
            self.yerr /= self.scale_factor
            if update_params:
                self.scale_params()

    def scale_params(self):
        """
        Scale the paramters' min/max values accordingly.
        """
        pass

    def f_residual(self, params):
        if self.yerr is None:
            return self.model(self.xdata, params) - self.ydata
        else:
            return (self.model(self.xdata, params) - self.ydata) / self.yerr

    def fit(self, method=None):
        if method is None:
            method = self.fit_method
        self.fitter = lmfit.Minimizer(self.f_residual, self.params)
        self.fitted = self.fitter.minimize(method=method)
        self.load_params(self.fitted.params)

    def get_param(self, name=None):
        """
        Return the requested 'Parameter' object or the whole
        'Parameters' object of no name supplied.
        """
        try:
            return self.params[name]
        except KeyError:
            return self.params

    def set_param(self, name, *args, **kwargs):
        """
        Set the properties of the specified parameter.
        """
        param = self.params[name]
        param.set(*args, **kwargs)

    def dump_params(self, serialize=True):
        """
        Dump the current values/settings for all model parameters,
        and these dumped results can be later loaded by 'load_params()'.
        """
        if serialize:
            return self.params.dumps()
        else:
            return self.params.copy()

    def load_params(self, params):
        """
        Load the provided parameters values/settings.
        """
        if isinstance(params, lmfit.parameter.Parameters):
            self.params = params.copy()
        else:
            p = lmfit.parameter.Parameters()
            p.loads(params)
            self.params = p

    def report(self, rtype):
        """
        Report the fitting results, e.g., g.o.f, chisqr, parameters, etc.
        """
        if rtype == "fitting":
            fitted = self.fitted
            results = OrderedDict([
                ("nfev",   fitted.nfev),
                ("ndata",  fitted.ndata),
                ("nvarys", fitted.nvarys),  # number of variable parameters
                ("nfree",  fitted.nfree),  # degree of freedom
                ("chisqr", fitted.chisqr),
                ("redchi", fitted.redchi),
                ("aic",    fitted.aic),
                ("bic",    fitted.bic),
            ])
        elif rtype == "parameters":
            results = OrderedDict([
                (pn, [par.value, par.min, par.max, par.vary])
                for pn, par in self.params.items()
            ])
        else:
            raise ValueError("invalid rtype: %s" % rtype)
        return results
コード例 #12
0
def chi2_shift_leastsq(im1, im2, err=None, mode='wrap', maxoff=None,
        return_error=True, guessx=0, guessy=0, use_fft=False,
        ignore_outside=True, verbose=False, **kwargs):
    """
    Determine the best fit offset using `scipy.ndimage.map_coordinates` to
    shift the offset image.
    *OBSOLETE* It kind of works, but is sensitive to input guess and doesn't reliably
    output errors

    Parameters
    ----------
        im1 : np.ndarray
            First image
        im2 : np.ndarray
            Second image (offset image)
        err : np.ndarray OR float
            Per-pixel error in image 2
        mode : 'wrap','constant','reflect','nearest'
            Option to pass to map_coordinates for determining what to do with
            shifts outside of the boundaries.  
        maxoff : None or int
            If set, crop the data after shifting before determining chi2
            (this is a good thing to use; not using it can result in weirdness
            involving the boundaries)

    """
    #xc = correlate2d(im1,im2, boundary=boundary)
    #ac1peak = (im1**2).sum()
    #ac2peak = (im2**2).sum()
    #chi2 = ac1peak - 2*xc + ac2peak


    if not im1.shape == im2.shape:
        raise ValueError("Images must have same shape.")

    if np.any(np.isnan(im1)):
        im1 = im1.copy()
        im1[im1!=im1] = 0
    if np.any(np.isnan(im2)):
        im2 = im2.copy()
        if hasattr(err,'shape'):
            err[im2!=im2] = np.inf
        im2[im2!=im2] = 0

    im1 = im1-im1.mean()
    im2 = im2-im2.mean()
    if not use_fft:
        yy,xx = np.indices(im1.shape)
    ylen,xlen = im1.shape
    xcen = xlen/2-(1-xlen%2) 
    ycen = ylen/2-(1-ylen%2) 

    # possible requirements for only this function
    import lmfit
    if not use_fft:
        import scipy.ndimage

    def residuals(p, im1, im2):
        xsh, ysh = p['xsh'].value,p['ysh'].value
        if use_fft:
            shifted_img = shift.shiftnd(im2, (-ysh, -xsh))
        else: # identical to skimage
            shifted_img = scipy.ndimage.map_coordinates(im2, [yy+ysh,xx+xsh],
                    mode=mode)
        if maxoff is not None:
            xslice = slice(xcen-maxoff,xcen+maxoff,None)
            yslice = slice(ycen-maxoff,ycen+maxoff,None)
            # divide by sqrt(number of samples) = sqrt(maxoff**2)
            residuals = np.abs(np.ravel((im1[yslice,xslice]-shifted_img[yslice,xslice])) / maxoff)
        else:
            if ignore_outside:
                outsidex = min([(xlen-2*xsh)/2,xcen])
                outsidey = min([(ylen-2*ysh)/2,xcen])
                xslice = slice(xcen-outsidex,xcen+outsidex,None)
                yslice = slice(ycen-outsidey,ycen+outsidey,None)
                residuals = ( np.abs( np.ravel(
                    (im1[yslice,xslice]-shifted_img[yslice,xslice]))) /
                    (2*outsidex*2*outsidey)**0.5 )
            else:
                xslice = slice(None)
                yslice = slice(None)
                residuals = np.abs(np.ravel((im1-shifted_img))) / im1.size**0.5
        if err is None:
            return residuals
        elif hasattr(err,'shape'):
            if use_fft:
                shifted_err = shift.shiftnd(err, (-ysh, -xsh))
            else:
                shifted_err = scipy.ndimage.map_coordinates(err, [yy+ysh,xx+xsh], mode=mode)
            return residuals / shifted_err[yslice,xslice].flat
        else:
            return residuals / err

    fit_params = lmfit.Parameters()
    fit_params['xsh'] = lmfit.Parameter(value=guessx, max=maxoff)
    fit_params['ysh'] = lmfit.Parameter(value=guessy, max=maxoff)
    if maxoff is not None:
        fit_params['xsh'].min = -maxoff
        fit_params['ysh'].min = -maxoff

    iter_cb = per_iteration if verbose else None

    lmfitter = lmfit.minimize(residuals, fit_params, args=(im1,im2), iter_cb=iter_cb, **kwargs)

    px,py = lmfitter.params.values()
    fxsh,fysh = px.value,py.value
    efxsh,efysh = px.stderr,py.stderr
    if return_error:
        return fxsh,fysh,efxsh,efysh
    else:
        return fxsh,fysh
        
    # ignore
    if return_error:
        if cov is None:
            return bestfit[0],bestfit[1],0,0
        else: # based on scipy.optimize.curve_fit, the "correct" covariance is this cov * chi^2/n
            return bestfit[0],bestfit[1],(cov[0,0]*chi2n)**0.5,(cov[1,1]*chi2n)**0.5
    else:
        return bestfit[0],bestfit[1]
コード例 #13
0
def fit_helix(track_data, B_data, m=m_e, q=q_e, rot=True):
    '''
    track_data is 4 by N np.array, B_data is 3 by N where each row is
    track_data: [t, x, y, z] with units [s, m, m, m]
    B_data: [Bx, By, Bz] with units [T, T, T]
    (to work out of the box with emtracks.particle)
    '''
    track_data = track_data.copy() # to not change track_data information
    track_data[0] = track_data[0]*1e9 # convert to ns
    track_data[1:] = track_data[1:]*1e3 # convert to mm
    # x0, y0 = track_data[1:3,0] # for phi0 estimate
    # translations:
    translate_vec = track_data[:,0].reshape(1, 4)
    track_data = track_data - np.repeat(translate_vec, track_data.shape[1], axis=0).T
    # Rotate so mean B vector is on z-axis
    B_mean = np.mean(B_data, axis=1)
    B = np.linalg.norm(B_mean)
    B_mean_T = np.linalg.norm(B_mean[:2]) # average transverse Bfield (original coords)
    # calculate rotation angles
    rot_theta = np.arctan2(B_mean_T, B_mean[2])
    rot_phi = np.arctan2(B_mean[1], B_mean[0])
    # create rotation function (+inverse) to align mean B with z axis
    rot_func = Rotation.from_euler('zy', np.array([-rot_phi, -rot_theta]))
    rot_inv_func = rot_func.inv()
    # rotate track data
    track_data_rot = track_data.copy()
    if rot:
        track_data_rot[1:] = rot_func.apply(track_data_rot[1:].T).T
    # estimate parameters
    # R, C_x, C_y
    cent, R_guess, Ri_fit, R_residual = reco_circle(track_data_rot[1], track_data_rot[2])
    C_x_guess, C_y_guess = cent
    # Lambda
    dists = (np.sum(np.square(track_data_rot[1:3]), axis=0))**(1/2)
    diffs_negative = np.diff(dists) < 0
    if diffs_negative.sum() == 0:
        endpoint = -1
    else:
        endpoint = np.argwhere(np.diff(dists) < 0).flatten()[0] - 1
    xyz_ends = track_data_rot[1:, [0, endpoint]]
    # print(xyz_ends)
    Lambda_guess = Lambda_est(R_guess, xyz_ends)
    # phi0
    # x0, y0 = track_data_rot[1:3,0] # WRONG-->[0,0,0]
    # x0p = -y0
    # y0p = x0
    # phi0_guess = np.arctan2(y0p, x0p)
    phi0_guess = abs(np.arctan2(C_x_guess, C_y_guess))
    # t0 should be 0 by construction (the translation)
    t0_guess = 0.
    # guess dict
    params_guess = {'R':R_guess, 'Lambda':Lambda_guess, 'C_x':C_x_guess, 'C_y':C_y_guess,
                    'phi0':phi0_guess, 't0':t0_guess}
    mom_guess = LHelix_get_momentum(params_guess['R'], params_guess['Lambda'], m, q, B)
    # construct model
    model = lm.Model(LHelix_P_pos, independent_vars=['t'])
    params = lm.Parameters()
    params.add('R', value=R_guess, min=R_guess-10, max=R_guess+10)
    params.add('Lambda', value=Lambda_guess, min=Lambda_guess-10, max=Lambda_guess+10)
    params.add('C_x', value=C_x_guess, min=C_x_guess-10, max=C_x_guess+10)
    params.add('C_y', value=C_y_guess, min=C_y_guess-10, max=C_y_guess+10)
    params.add('phi0', value=phi0_guess, min=phi0_guess-np.pi/10, max=phi0_guess+np.pi/10)
    # params.add('t0', value=t0_guess, min=t0_guess-1, max=t0_guess+1)#vary=False)
    # params.add('R', value=R_guess, )#min=R_guess-25, max=R_guess+25)
    # params.add('Lambda', value=Lambda_guess,) #min=Lambda_guess-25, max=Lambda_guess+25)
    # params.add('C_x', value=C_x_guess,) #min=C_x_guess-10, max=C_x_guess+10)
    # params.add('C_y', value=C_y_guess,) #min=C_y_guess-10, max=C_y_guess+10)
    # params.add('phi0', value=phi0_guess, min=0., max=2*np.pi)
    params.add('t0', value=t0_guess, vary=False)
    params.add('m', value=m, vary=False)
    params.add('q', value=q, vary=False)
    params.add('B', value=B, vary=False)

    result = model.fit(track_data_rot[1:], t=track_data_rot[0],params=params)
    params_fit = {key:val.value for key,val in result.params.items()}
    mom_fit = LHelix_get_momentum(params_fit['R'], params_fit['Lambda'], m, q, B)

    track_fit_xyz, track_fit_mom, track_fit_mom_vec = LHelix_P(track_data[0], **params_fit)
    # rotate track fit and momentum vec
    if rot:
        track_fit_xyz = rot_inv_func.apply(track_fit_xyz.T).T
        track_fit_mom_vec = rot_inv_func.apply(track_fit_mom_vec.T).T
    track_fit_xyz = track_fit_xyz + np.repeat(translate_vec[:,1:], track_data.shape[1], axis=0).T
    track_fit_xyz = 1e-3*track_fit_xyz
    df_fit = pd.DataFrame({'t':(track_data[0]+translate_vec[:,0])*1e-9, 'x':track_fit_xyz[0],
                           'y':track_fit_xyz[1], 'z':track_fit_xyz[2],
                           'px':track_fit_mom_vec[0], 'py':track_fit_mom_vec[1],
                           'pz':track_fit_mom_vec[2]})
    # return track_data_rot, R_guess, Lambda_guess, C_x_guess, C_y_guess, phi0_guess, t0_guess
    return mom_fit, result, df_fit, params_fit, mom_guess, params_guess
コード例 #14
0
    def _init_parameters(self, init, fix):
        """Initialize lmfit parameters dictionary"""

        self._initial_guess(init)

        # initialize parameters
        pars = lmfit.Parameters()
        for vn, vinit in init.items():
            if vn in self.fitqdep:
                pars.add(vn)
            else:
                pars.add(vn, value=vinit[0], min=vinit[1], max=vinit[2], vary=True)
        pars.add("a", value=init["a"][0], min=init["a"][1], max=init["a"][2], vary=True)
        pars.add(
            "beta",
            value=init["beta"][0],
            min=init["beta"][1],
            max=init["beta"][2],
            vary=True,
        )

        self._init_pars_dataframe(init)
        self._make_varnames()

        # setting contrast constraint
        if not "b0" in fix:
            beta_constraint = self._get_beta_constraint(ndat=-1)
            pars["b0"].set(expr=beta_constraint)

        # setting parameters fixed
        fixed_values = {}
        for vn in fix.keys():
            if vn in pars.keys():
                val = fix[vn]
                if isinstance(val, Iterable):
                    fixed_values[vn] = val
                else:
                    pars[vn].set(value=val, min=-np.inf, max=np.inf, vary=False)

        # modifying pars for global fitting if not globalfit then ndat=1
        lpars = list(pars.items())
        newpars = []
        for j in range(self.ndat - 1):
            for pk, pv in reversed(sorted(lpars)):
                if (
                    (pk not in self.fitglobal)
                    or (pk in self.fitqdep)
                    or (pk in fixed_values)
                ):
                    if pk in fixed_values and j == 0:
                        pv.set(value=fixed_values[pk][j], vary=False)
                    pt = copy(pv)
                    pt.name += f"_{j}"
                    if pk == "b0" and pk not in fixed_values:
                        beta_constraint = self._get_beta_constraint(ndat=j)
                        pt.set(expr=beta_constraint)
                    if pk in fixed_values:
                        pt.set(value=fixed_values[pk][j + 1], vary=False)
                    # print(pars)
                    # print(pt)
                    pars.add(pt)

        re_par = re.compile("([tgb]\d{1})|(beta)")
        for p in self.fitqdep:
            for new_par, par_kw in self.fitqdep[p]["pars"].items():
                pars.add(new_par, **par_kw)
            for j in range(self.ndat):
                counter_str = f"_{j-1}" * bool(j)
                vn = p + counter_str
                q = self.qv[self.nq[j]]
                expr = copy(self.fitqdep[p]["expr"])
                cpars_groups = re_par.findall(expr)
                for cpars in set(cpars_groups):
                    for c in cpars:
                        if not len(c) or c in self.fitglobal:
                            continue
                        expr = expr.replace(
                            f"{c}", str(c) + counter_str
                        )  # self._varnames[c[0]][j][int(c[1])])

                expr = expr.replace("q", f"{q:.5f}")
                pars[vn].set(expr=expr)

        if (
            sum([x.startswith("t") for x in self.fitglobal if not isinstance(x, int)])
            == 0
        ):
            for j in range(self.ndat):
                for i in range(1, self.nmodes):
                    vc = f"t{i}" + f"_{j-1}" * bool(j)
                    vs = f"t{i-1}" + f"_{j-1}" * bool(j)
                    pars.add(vs + "_dtmp", value=pars[vc].value - pars[vs].value, min=0)
                    pars[vc].set(expr=f"{vs} + {vs}" + "_dtmp")

        pars._asteval.symtable["gamma"] = gamma
        self._lmpars = pars
コード例 #15
0
ファイル: resonator_sweep.py プロジェクト: vapor36/scraps
    def do_emcee(self,
                 fit_keys,
                 models_list,
                 params_list=None,
                 model_kwargs=None,
                 param_kwargs=None,
                 emcee_kwargs=None,
                 **kwargs):
        r"""Run simulatneous MCMC sampling on the temp/pwr data for several
        parameters. Results are stored in either the ``emcee_results`` or
        ``emcee_joint_results`` attribute depending on whether one or multiple
        keys are passed to `fit_keys`.

        Parameters
        ----------
        fit_keys : list-like
            A list of keys that correspond to existing data. Any combination of
            keys from `self.keys()`` is acceptable, but duplicates are not
            permitted.

        models_list : list-like
            A list of fit functions, one per key in `fit_keys`. Function must
            return a residual of the form: ``residual = (model-data)/sigma``
            where ``residual``, ``model``, and ``data`` are all ``numpy``
            arrays. Function signature is ``model_func(params, temps, powers,
            data=None, sigmas=None)``. If ``data==None`` the functions must
            return the model calculated at ``temps`` and ``powers``. The model
            functions should also gracefully handle ``np.NaN`` or ``None``
            values.

        params_list : list-like
            A list of ``lmfit.Parameters`` objects, one for each key in
            `fit_keys`. Parameters sharing the same name will be merged so that
            the fit is truly joint. Alternately, a list of functions that return
            ``lmfit.Parameters`` objects may be passed. In this case, one should
            use `param_kwargs` to pass any needed options to the functions.
            Default is ``None`` and is equivalent to setting ``use_lmfit_params =
            True``.

        model_kwargs : list-like (optional)
            A list of ``dict`` objects to pass to the individual model functions
            as kwargs. ``None`` is also an acceptable entry  if there are no
            kwargs to pass to a model function. Default is ``None.``

        param_kwargs : list-like (optional)
            A list of ``dict`` objects to pass to the individual params
            functions as kwargs. ``None`` is also an acceptable entry  if
            there are no kwargs to pass to a model function. Default is
            ``None.``

        emcee_kwargs : dict (optional)
            Keyword arguments to pass options to the fitter

        Keyword Arguments
        -----------------
        min_temp : numeric
            Lower limit of temperature to fit. Default is 0.

        max_temp : numeric
            Upper limit of temerature to fit. Default is infinity.

        min_pwr : numeric
            Lower limit of temperature to fit. Default is -infinity.

        max_pwr : numeric
            Upper limit of temperature to fit. Default is infinity.

        use_lmfit_params : bool
            Whether or not to use the resulting best-fit ``lmfit.Paramters``
            object that resulted from calling ``ResonatorSweep.do_lmfit()`` as
            the starting value for the MCMC sampler. Default is True.

        raw_data : string {'lmfit', 'emcee', 'mle'}
            Whether to use the values returned by lmfit, or the values returned
            by the emcee fitter (either the 50th percentile or the maximum
            liklihood). This also chooses which set of errorbars to use: either
            those from the lmfit covariance matrix, or those from the 16th and
            84th percentiles of the posterior probablility distribution. Default
            is 'lmfit'.

        Note
        ----
        If the fits are succesful, the resulting fit data (ie the best fit
        surface) will be added to the self dict in the form of a
        ``pandas.DataFrame`` under the following keys:

        For a joint fit (``len(fit_keys) > 1``)::

            'emcee_joint_'+joint_key+'_'+key for each key in fit_keys

        For a single fit (``len(fit_keys) == 1``)::

            'emcee_'+key

        """

        #Figure out which data to fit
        raw_data = kwargs.pop('raw_data', 'lmfit')
        assert raw_data in ['lmfit', 'emcee',
                            'mle'], "raw_data must be 'lmfit' or 'emcee'."

        #Set some limits
        min_temp = kwargs.pop('min_temp', min(self.tvec))
        max_temp = kwargs.pop('max_temp', max(self.tvec))
        t_filter = (self.tvec >= min_temp) * (self.tvec <= max_temp)

        min_pwr = kwargs.pop('min_pwr', min(self.pvec))
        max_pwr = kwargs.pop('max_pwr', max(self.pvec))
        p_filter = (self.pvec >= min_pwr) * (self.pvec <= max_pwr)

        if params_list is not None:
            assert len(fit_keys) == len(models_list) == len(
                params_list), "Make sure argument lists match in number."
        else:
            assert len(fit_keys) == len(
                models_list), "Make sure argument lists match in number."

        #Make some empty dictionaries just in case so we don't break functions
        #by passing None as a kwargs
        if model_kwargs is None:
            model_kwargs = [{}] * len(fit_keys)

        if param_kwargs is None:
            params_kwargs = [{}] * len(fit_keys)

        if emcee_kwargs is None:
            emcee_kwargs = {}

        #Check to see if this should go in the joint_fits dict, and build a key if needed.
        if len(fit_keys) > 1:
            joint_key = '+'.join(fit_keys)
        else:
            joint_key = None

        #If possible (and desired) then we should use the existing best fit as a starting point
        #For the MCMC sampling. If not, build params from whatever is passed in.
        use_lmfit_params = kwargs.pop('use_lmfit_params', True)

        if (params_list is not None) and (use_lmfit_params == False):

            #Check if params looks like a lmfit.Parameters object.
            #If not, assume is function and try to set params by calling it
            for px, p in enumerate(params_list):
                if not hasattr(p, 'valuesdict'):
                    assert params_kwargs[
                        px] is not None, "If passing functions to params, must specfify params_kwargs."
                    params_list[px] = p(**param_kwargs[px])

            #Combine the different params objects into one large list
            #Only the first of any duplicates will be transferred
            merged_params = lf.Parameters()
            if len(params_list) > 1:
                for p in params_list:
                    for key in p.keys():
                        if key not in merged_params.keys():
                            merged_params[key] = p[key]
            else:
                merged_params = params_list[0]

        else:
            if joint_key is not None:
                assert joint_key in self.lmfit_joint_results.keys(
                ), "Can't use lmfit params. They don't exist."
                merged_params = self.lmfit_joint_results[joint_key].params
            else:
                assert fit_keys[0] in self.lmfit_results.keys(
                ), "Can't use lmfit params. They don't exist."
                merged_params = self.lmfit_results[fit_keys[0]].params

        #Get all the possible temperature/power combos into two grids
        ts, ps = np.meshgrid(self.tvec[t_filter], self.pvec[p_filter])

        #Create grids to hold the fit data and the sigmas
        fit_data_list = []
        fit_sigmas_list = []

        #Get the data that corresponds to each temperature power combo and
        #flatten it to match the ts/ps combinations
        #Transposing is important because numpy matrices are transposed from
        #Pandas DataFrames
        for key in fit_keys:

            if raw_data == 'emcee':
                key = key + '_mc'
            elif raw_data == 'mle':
                key = key + '_mle'

            if raw_data in ['emcee', 'mle']:
                err_bars = (
                    self[key + '_sigma_plus_mc'].loc[t_filter,
                                                     p_filter].values.T +
                    self[key + '_sigma_minus_mc'].loc[t_filter,
                                                      p_filter].values.T)
            else:
                err_bars = self[key + '_sigma'].loc[t_filter,
                                                    p_filter].values.T

            fit_data_list.append(self[key].loc[t_filter, p_filter].values.T)
            fit_sigmas_list.append(err_bars)

        #Create a new model function that will be passed to the minimizer.
        #Basically this runs each fit and passes all the residuals back out
        def model_func(params, models, ts, ps, data, sigmas, kwargs):
            residuals = []
            for ix in range(len(fit_keys)):
                residuals.append(models[ix](params, ts, ps, data[ix],
                                            sigmas[ix], **kwargs[ix]))

            return np.asarray(residuals).flatten()

        #Create a lmfit minimizer object
        minObj = lf.Minimizer(model_func,
                              merged_params,
                              fcn_args=(models_list, ts, ps, fit_data_list,
                                        fit_sigmas_list, model_kwargs))

        #Call the lmfit minimizer method and minimize the residual
        emcee_result = minObj.emcee(**emcee_kwargs)

        #Put the result in the appropriate dictionary
        if joint_key is not None:
            self.emcee_joint_results[joint_key] = emcee_result
        else:
            self.emcee_results[fit_keys[0]] = emcee_result

        #Calculate the best-fit model from the params returned
        #And put it into a pandas DF with the appropriate key.
        #The appropriate key format is: 'lmfit_joint_'+joint_key+'_'+key
        #or, for a single fit: 'lmfit_'+key
        for ix, key in enumerate(fit_keys):
            #Call the fit model without data to have it return the model
            returned_model = models_list[ix](emcee_result.params, ts, ps)

            #Build the appropriate key
            if joint_key is not None:
                new_key = 'emcee_joint_' + joint_key + '_' + key
            else:
                new_key = 'emcee_' + key

            #Make a new dict entry to the self dictioary with the right key.
            #Have to transpose the matrix to turn it back into a DF
            self[new_key] = pd.DataFrame(np.nan,
                                         index=self.tvec,
                                         columns=self.pvec)
            self[new_key].loc[self.tvec[t_filter],
                              self.pvec[p_filter]] = returned_model.T
コード例 #16
0
ファイル: indexer.py プロジェクト: Taimin/problematic
    def refine(self,
               img,
               result,
               projector=None,
               verbose=True,
               method="least-squares",
               fit_tol=0.1,
               vary_center=True,
               vary_scale=True,
               vary_alphabeta=True,
               vary_gamma=True,
               **kwargs):
        """
        Refine the orientations of all solutions in results agains the given image

        img: ndarray
            Image array
        result: IndexingResult object
            Specifications of the solution to be refined
        projector: Projector object, optional
            This keyword should be specified if projector is not already an attribute on Indexer,
            or if a different one should be used
        method: str, optional
            Minimization method to use, should be one of 'nelder', 'powell', 'cobyla', 'least-squares'
        fit_tol: float
            Tolerance for termination. For detailed control, use solver-specific options.
        """
        if not projector:
            projector = self.projector

        f_kws = kwargs.get("kws", None)

        def objfunc(params, img):
            cx = params["center_x"].value
            cy = params["center_y"].value
            al = params["alpha"].value
            be = params["beta"].value
            ga = params["gamma"].value
            sc = params["scale"].value

            proj = projector.get_projection(al, be, ga)
            pks = proj[:, 3:6]
            score = get_score_shape(img, pks, sc, cx, cy)

            return 1e3 / (1 + score)

        params = lmfit.Parameters()
        params.add("center_x",
                   value=result.center_x,
                   vary=vary_center,
                   min=result.center_x - 2.0,
                   max=result.center_x + 2.0)
        params.add("center_y",
                   value=result.center_y,
                   vary=vary_center,
                   min=result.center_y - 2.0,
                   max=result.center_y + 2.0)
        params.add("alpha", value=result.alpha, vary=vary_alphabeta)
        params.add("beta", value=result.beta, vary=vary_alphabeta)
        params.add("gamma", value=result.gamma, vary=vary_gamma)
        params.add("scale",
                   value=result.scale,
                   vary=vary_scale,
                   min=result.scale * 0.8,
                   max=result.scale * 1.2)

        args = img,

        res = lmfit.minimize(objfunc,
                             params,
                             args=args,
                             method=method,
                             tol=fit_tol,
                             kws=f_kws)

        if verbose:
            lmfit.report_fit(res)

        p = res.params

        alpha, beta, gamma = [
            round(p[key].value, 4) for key in ("alpha", "beta", "gamma")
        ]
        scale, center_x, center_y = [
            round(p[key].value, 2) for key in ("scale", "center_x", "center_y")
        ]

        proj = projector.get_projection(alpha, beta, gamma)
        pks = proj[:, 3:6]

        score = round(get_score_shape(img, pks, scale, center_x, center_y), 2)

        # print "Score: {} -> {}".format(int(score), int(score))

        refined = IndexingResult(score=score,
                                 number=result.number,
                                 alpha=alpha,
                                 beta=beta,
                                 gamma=gamma,
                                 center_x=center_x,
                                 center_y=center_y,
                                 scale=scale,
                                 phase=result.phase)

        return np.array(refined, dtype=IndexingResultDType).view(np.recarray)
コード例 #17
0
    w6 = params['w6'].value
    m0 = params['m0'].value
    a0 = params['a0'].value
    a7 = params['a7'].value
    w7 = params['w7'].value

    model = \
        gauss(a1, w1, xc1, x) + gauss(a2, w2, xc2, x) + \
        gauss(a3, w3, xc3, x) + gauss(a4, w4, xc4, x) + \
        gauss(a5, w5, xc5, x) + gauss(a6, w6, xc6, x) + \
        gauss(a7, w7, xc7, x) + background(m0, a0, x)

    return model - data


params = lm.Parameters()
#         (Name ,      Value     ,  Vary, Min, Max, Expr)
params.add('a1', initial_guess[0], False, 0, 1E5, None)
params.add('a2', initial_guess[1], False, 0, 1E5, None)
params.add('a3', initial_guess[2], False, 0, 1E5, None)
params.add('a4', initial_guess[3], False, 0, 1E5, None)
params.add('a5', initial_guess[4], True, 0, 1E5, None)
params.add('a6', initial_guess[5], False, 0, 1E5, None)
params.add('w1', initial_guess[6], False, 0, 1E5, None)
params.add('w2', initial_guess[7], False, 0, 1E5, None)
params.add('w3', initial_guess[8], False, 0, 1E5, None)
params.add('w4', initial_guess[9], False, 0, 1E5, None)
params.add('w5', initial_guess[10], True, 0, 40, None)
params.add('w6', initial_guess[11], False, 0, 1E5, None)
params.add('m0', initial_guess[12], False, 0, 1E5, None)
params.add('a0', initial_guess[13], False, 0, 1E5, None)
コード例 #18
0
ファイル: indexer.py プロジェクト: Taimin/problematic
    def probability_distribution(self,
                                 img,
                                 result,
                                 projector=None,
                                 verbose=True,
                                 vary_center=False,
                                 vary_scale=True):
        """https://lmfit.github.io/lmfit-py/fitting.html#lmfit.minimizer.Minimizer.emcee

        Calculate posterior probability distribution of parameters"""
        import corner
        import emcee

        if not projector:
            projector = self.projector

        def objfunc(params, pks, img):
            cx = params["center_x"].value
            cy = params["center_y"].value
            al = params["alpha"].value
            be = params["beta"].value
            ga = params["gamma"].value
            sc = params["scale"].value

            proj = projector.get_projection(al, be, ga)
            pks = proj[:, 3:6]
            score = get_score_shape(img, pks, sc, cx, cy)

            resid = 1e3 / (1 + score)

            # Log-likelihood probability for the sampling.
            # Estimate size of the uncertainties on the data
            s = params['f']
            resid *= 1 / s
            resid *= resid
            resid += np.log(2 * np.pi * s**2)
            return -0.5 * np.sum(resid)

        params = lmfit.Parameters()
        params.add("center_x",
                   value=result.center_x,
                   vary=vary_center,
                   min=result.center_x - 2.0,
                   max=result.center_x + 2.0)
        params.add("center_y",
                   value=result.center_y,
                   vary=vary_center,
                   min=result.center_y - 2.0,
                   max=result.center_y + 2.0)
        params.add("alpha",
                   value=result.alpha + 0.01,
                   vary=True,
                   min=result.alpha - 0.1,
                   max=result.alpha + 0.1)
        params.add("beta",
                   value=result.beta + 0.01,
                   vary=True,
                   min=result.beta - 0.1,
                   max=result.beta + 0.1)
        params.add("gamma",
                   value=result.gamma + 0.01,
                   vary=True,
                   min=result.gamma - 0.1,
                   max=result.gamma + 0.1)
        params.add("scale",
                   value=result.scale,
                   vary=vary_scale,
                   min=result.scale * 0.8,
                   max=result.scale * 1.2)

        # Noise parameter
        params.add('f', value=1, min=0.001, max=2)

        pks_current = projector.get_projection(result.alpha, result.beta,
                                               result.gamma)[:, 3:5]

        args = pks_current, img

        mini = lmfit.Minimizer(objfunc, params, fcn_args=args)
        res = mini.emcee(params=params)

        if verbose:
            print("\nMedian of posterior probability distribution")
            print("--------------------------------------------")
            lmfit.report_fit(res)

        # find the maximum likelihood solution
        highest_prob = np.argmax(res.lnprob)
        hp_loc = np.unravel_index(highest_prob, res.lnprob.shape)
        mle_soln = res.chain[hp_loc]

        if verbose:
            for i, par in enumerate(res.var_names):
                params[par].value = mle_soln[i]
            print("\nMaximum likelihood Estimation")
            print("-----------------------------")
            print(params)

        corner.corner(res.flatchain,
                      labels=res.var_names,
                      truths=[
                          res.params[par].value for par in res.params
                          if res.params[par].vary
                      ])
        plt.show()
コード例 #19
0
ファイル: ffopt-sigplot.py プロジェクト: DCM-UPB/RPMD_Mainz
def sigres(vpres_ffm_obj, fitparams, respower, sigmin, sigmax, npoints):
    sqvals = np.empty((npoints, 2))
    dsig = (sigmax - sigmin) / (npoints - 1)
    for it in range(npoints):
        sqvals[it, 0] = sigmin + it * dsig
        fitparams["oo_sig"].value = sqvals[it, 0]
        resid = vpres_ffm_obj(fitparams, respower)
        sqvals[it, 1] = np.dot(resid, resid)
    return sqvals


ndata = 1500
linpars0 = np.array([1.21, 0.00025, 0.135, 0.062])
nolpars0 = np.array([0.7, 5.8, 1.3, 1.84, 107.0])
fitparams = lmf.Parameters()
fitparams.add('alpha', value=nolpars0[0], min=0.500, max=1.000)
fitparams.add('oo_sig', value=nolpars0[1], min=0.010, max=10.000)
fitparams.add('alp', value=nolpars0[2], min=0.010, max=100.0)
fitparams.add('reoh', value=nolpars0[3], min=1.500, max=2.000)
fitparams.add('thetad', value=nolpars0[4], min=90.00, max=120.0)

natom = 375
ndata = 1500
linp0 = linpars0
traj_fname = "all_tray.xyz"
aifrc_fname = "FORCES-PBE-ALL.frc"
parout_fname = "param_PBE-testsig"
respower = 0.5
fitftol = 1.e-6
lskwords = {'ftol': fitftol}
コード例 #20
0
        Neg_Binom=[]
        

        Act_ploidies = list(np.loadtxt("../Data/Third_Analysis/%s/Genome_ploidy_%s.txt" %(MEANDEPTH,i)))   
        Num_1.append(Act_ploidies.count(1))
        Num_2.append(Act_ploidies.count(2))
        Num_3.append(Act_ploidies.count(3))
        Num_4.append(Act_ploidies.count(4))
        Num_5.append(Act_ploidies.count(5))
        Expected.append(len(set(Act_ploidies)))


            
        ##### take relevent part of each genome for analysis and add appropriate coeficient parameters

        Poiss_Params_1=lmfit.Parameters()
        Poiss_Params_1.add('mu1',value=np.percentile(data,50),min=1)

        Poiss_Params_2=lmfit.Parameters()
        Poiss_Params_2.add('mu1',value=np.percentile(data,33),min=1)
        Poiss_Params_2.add('mu2',value=np.percentile(data,66),min=1)
        Poiss_Params_2.add('coef1',value=0.5,max=1,min=0.001)

        Poiss_Params_3=lmfit.Parameters()
        Poiss_Params_3.add('mu1',value=np.percentile(data,25),min=1)
        Poiss_Params_3.add('mu2',value=np.percentile(data,50),min=1)
        Poiss_Params_3.add('mu3',value=np.percentile(data,75),min=1)
        Poiss_Params_3.add('coef1',value=0.3333,max=1,min=0.001)
        Poiss_Params_3.add('coef2',value=0.3333,max=1,min=0.001)

        Poiss_Params_4=lmfit.Parameters()
コード例 #21
0
    def _calc_focus_model(self):
        """Calculate new focus model from saved entries."""
        import lmfit

        # no coefficients? no model...
        if not self._coefficients or self._table_storage is None:
            return

        # only take clear filter images for now
        data = self._table_storage.copy()

        # enough measurements?
        if len(data) < self._min_measurements:
            log.warning(
                'Not enough measurements found for re-calculating model (%d<%d).',
                len(data), self._min_measurements)
            return

        # build parameters
        params = lmfit.Parameters()
        for c in self._coefficients.keys():
            params.add(c, 0.)

        # if we want to fit filter offsets, add them to params
        if self._filter_offsets is not None:
            # get unique list of filters and add them
            for f in data['filter'].unique():
                params.add('off_' + f, 0.)

        # fit
        log.info('Fitting coefficients...')
        out = lmfit.minimize(self._residuals, params, args=(data, ))

        # print results
        log.info('Found best coefficients:')
        for p in out.params:
            if not p.startswith('off_'):
                if out.params[p].stderr is not None:
                    log.info('  %-5s = %10.5f +- %8.5f', p,
                             out.params[p].value, out.params[p].stderr)
                else:
                    log.info('  %-5s = %10.5f', p, out.params[p].value)
        if self._filter_offsets is not None:
            log.info('Found filter offsets:')
            for p in out.params:
                if p.startswith('off_'):
                    if out.params[p].stderr is not None:
                        log.info('  %-10s = %10.5f +- %8.5f', p[4:],
                                 out.params[p].value, out.params[p].stderr)
                    else:
                        log.info('  %-10s = %10.5f', p[4:],
                                 out.params[p].value)

        log.info('Reduced chi squared: %.3f', out.redchi)

        # store new coefficients and filter offsets
        if self._update_model:
            # just copy all?
            d = dict(out.params.valuesdict())
            if self._filter_offsets is None:
                self._coefficients = d
            else:
                # need to separate
                self._coefficients = {
                    k: v
                    for k, v in d.items() if not k.startswith('off_')
                }
                self._filter_offsets = {
                    k[4:]: v
                    for k, v in d.items() if k.startswith('off_')
                }
コード例 #22
0
ファイル: aperturecorrector.py プロジェクト: rag9704/PTS
    def calculate_aperture_correction(self):
        """
        # Define function that uses provided beam profile to aperture-correct photometry

        INPUT_DICT:
        ----------

        psf:    the PSF, or a False boolean
        cutout: Array upon which photometry is being perfomred upon.

        CONFIG:
        -------

        pix_arcsec: The width, in arscec, of the pixels in the map photometry is being performed upon
                    (this is needed in case there is a pixel size mismatch with PSF).
        semimaj_pix: Semi-major axis of photometric aperture, in pixels.
        axial_ratio: Axial ratio of photometryic aperture.
        angle: Position angle of photometric aperture, in degrees.

        centre_i: Zero-indexed, 0th-axis coordinate (equivalent to y-axis one-indexed coordinates in FITS terms)
                  of centre position of photometric aperture.
        centre_j: Zero-indexed, 1st-axis coordinate (equivalent to x-axis one-indexed coordinates in FITS terms) of
                  centre position of photometric aperture.

        ... and other ... (see configuration definition)

        """

        ### INPUT

        psf = self.psf.data  # PSF MUST BE PREPARED

        # Cutout
        cutout = self.cutout.data

        #####

        # Produce mask for pixels we care about for fitting (ie, are inside photometric aperture and background annulus)
        mask = chrisfuncs.EllipseMask(
            cutout, self.config.semimaj_pix, self.config.axial_ratio,
            self.config.angle, self.config.centre_i,
            self.config.centre_j)  # *band_dict['annulus_outer']

        ##
        # from pts.magic.tools import plotting
        # plotting.plot_mask(mask)
        ##

        # Produce guess values
        initial_sersic_amplitude = cutout[int(round(self.config.centre_i)),
                                          int(round(self.config.centre_j))]
        initial_sersic_r_eff = self.config.semimaj_pix / 10.0
        initial_sersic_n = 1.0
        initial_sersic_x_0 = self.config.centre_j
        initial_sersic_y_0 = self.config.centre_i
        initial_sersic_ellip = (self.config.axial_ratio -
                                1.0) / self.config.axial_ratio
        initial_sersic_theta = np.deg2rad(self.config.angle)

        # Produce sersic model from guess parameters, for time trials
        sersic_x, sersic_y = np.meshgrid(np.arange(cutout.shape[1]),
                                         np.arange(cutout.shape[0]))
        sersic_model = astropy.modeling.models.Sersic2D(
            amplitude=initial_sersic_amplitude,
            r_eff=initial_sersic_r_eff,
            n=initial_sersic_n,
            x_0=initial_sersic_x_0,
            y_0=initial_sersic_y_0,
            ellip=initial_sersic_ellip,
            theta=initial_sersic_theta)
        sersic_map = sersic_model(sersic_x, sersic_y)

        # Make sure that PSF array is smaller than sersic model array (as required for convolution); if not, remove its edges such that it is
        if psf.shape[0] > sersic_map.shape[0] or psf.shape[
                1] > sersic_map.shape[1]:
            excess = max(psf.shape[0] - sersic_map.shape[0],
                         psf.shape[1] - sersic_map.shape[1])
            border = max(2, int(np.round(np.ceil(float(excess) / 2.0) - 1.0)))
            psf = psf[border:, border:]
            psf = psf[:-border, :-border]

        # Determine whether FFT convolution or direct convolution is faster for this kernel,
        # using sersic model produced with guess parameters
        time_fft = time.time()
        conv_map = astropy.convolution.convolve_fft(sersic_map,
                                                    psf,
                                                    normalize_kernel=True)
        time_fft = time.time() - time_fft
        time_direct = time.time()
        conv_map = astropy.convolution.convolve(sersic_map,
                                                psf,
                                                normalize_kernel=True)
        time_direct = time.time() - time_direct

        if time_fft < time_direct: use_fft = True
        else: use_fft = False

        # Set up parameters to fit galaxy with 2-dimensional sersic profile
        params = lmfit.Parameters()
        params.add('sersic_amplitide',
                   value=initial_sersic_amplitude,
                   vary=True)
        params.add('sersic_r_eff',
                   value=initial_sersic_r_eff,
                   vary=True,
                   min=0.0,
                   max=self.config.semimaj_pix)
        params.add('sersic_n',
                   value=initial_sersic_n,
                   vary=True,
                   min=0.1,
                   max=10)
        params.add('sersic_x_0', value=initial_sersic_x_0, vary=False)
        params.add('sersic_y_0', value=initial_sersic_y_0, vary=False)
        params.add('sersic_ellip',
                   value=initial_sersic_ellip,
                   vary=True,
                   min=0.5 * initial_sersic_ellip,
                   max=0.5 * (1.0 - initial_sersic_ellip) +
                   initial_sersic_ellip)
        params.add('sersic_theta', value=initial_sersic_theta, vary=False)

        # Solve with LMfit to find parameters of best-fit sersic profile
        result = lmfit.minimize(chi_squared_sersic,
                                params,
                                args=(cutout, psf, mask, use_fft),
                                method='leastsq',
                                ftol=1E-5,
                                xtol=1E-5,
                                maxfev=200)

        # Extract best-fit results
        sersic_amplitide = result.params['sersic_amplitide'].value
        sersic_r_eff = result.params['sersic_r_eff'].value
        sersic_n = result.params['sersic_n'].value
        sersic_x_0 = result.params['sersic_x_0'].value
        sersic_y_0 = result.params['sersic_y_0'].value
        sersic_ellip = result.params['sersic_ellip'].value
        sersic_theta = result.params['sersic_theta'].value

        # Construct underlying sersic map and convolved sersic map, using best-fit parameters
        sersic_model = astropy.modeling.models.Sersic2D(
            amplitude=sersic_amplitide,
            r_eff=sersic_r_eff,
            n=sersic_n,
            x_0=sersic_x_0,
            y_0=sersic_y_0,
            ellip=sersic_ellip,
            theta=sersic_theta)
        sersic_map = sersic_model(sersic_x, sersic_y)

        if use_fft:
            conv_map = astropy.convolution.convolve_fft(sersic_map,
                                                        psf,
                                                        normalize_kernel=True)
        else:
            conv_map = astropy.convolution.convolve(sersic_map,
                                                    psf,
                                                    normalize_kernel=True)

        # Determine annulus properties before proceeding with photometry
        # bg_inner_semimaj_pix = input_dict['semimaj_pix'] * input_dict['annulus_inner'] # number of pixels of semimajor axis of inner annulus ellipse
        # bg_width = (input_dict['semimaj_pix'] * input_dict['annulus_outer']) - bg_inner_semimaj_pix # number of pixels of difference between outer major axis and minor major axis

        bg_inner_semimaj_pix = self.config.semimaj_pix_annulus_inner
        bg_width = self.config.semimaj_pix_annulus_outer - bg_inner_semimaj_pix
        axial_ratio_annulus = self.config.axial_ratio_annulus
        angle_annulus = self.config.annulus_angle
        centre_i_annulus = self.config.annulus_centre_i
        centre_j_annulus = self.config.annulus_centre_j

        # Evaluate pixels in source aperture and background annulus in UNCONVOLVED sersic map
        if self.config.subpixel_factor == 1.0:
            sersic_ap_calc = chrisfuncs.EllipseSum(
                sersic_map, self.config.semimaj_pix, self.config.axial_ratio,
                self.config.angle, self.config.centre_i, self.config.centre_j)
            sersic_bg_calc = chrisfuncs.AnnulusSum(
                sersic_map, bg_inner_semimaj_pix, bg_width,
                axial_ratio_annulus, angle_annulus, centre_i_annulus,
                centre_j_annulus)
        elif self.config.subpixel_factor > 1.0:
            sersic_ap_calc = chrisfuncs.EllipseSumUpscale(
                sersic_map,
                self.config.semimaj_pix,
                self.config.axial_ratio,
                self.config.angle,
                self.config.centre_i,
                self.config.centre_j,
                upscale=self.config.subpixel_factor)
            sersic_bg_calc = chrisfuncs.AnnulusSumUpscale(
                sersic_map,
                bg_inner_semimaj_pix,
                bg_width,
                axial_ratio_annulus,
                angle_annulus,
                centre_i_annulus,
                centre_j_annulus,
                upscale=self.config.subpixel_factor)
        else:
            raise ValueError("Invalid subpixel factor: " +
                             str(self.config.subpixel_factor))

        # Background-subtract and measure UNCONVOLVED sersic source flux
        sersic_bg_clip = chrisfuncs.SigmaClip(sersic_bg_calc[2],
                                              median=False,
                                              sigma_thresh=3.0)
        sersic_bg_avg = sersic_bg_clip[1] * self.config.subpixel_factor**2.0
        sersic_ap_sum = sersic_ap_calc[0] - (
            sersic_ap_calc[1] * sersic_bg_avg
        )  # sersic_ap_calc[1] = number of pixels counted for calculating sum (total flux in ellipse)

        # Evaluate pixels in source aperture and background annulus in CONVOLVED sersic map
        if self.config.subpixel_factor == 1.0:
            conv_ap_calc = chrisfuncs.EllipseSum(
                conv_map, self.config.semimaj_pix, self.config.axial_ratio,
                self.config.angle, self.config.centre_i, self.config.centre_j)
            conv_bg_calc = chrisfuncs.AnnulusSum(
                conv_map, bg_inner_semimaj_pix, bg_width, axial_ratio_annulus,
                angle_annulus, centre_i_annulus, centre_j_annulus)
        elif self.config.subpixel_factor > 1.0:
            conv_ap_calc = chrisfuncs.EllipseSumUpscale(
                conv_map,
                self.config.semimaj_pix,
                self.config.axial_ratio,
                self.config.angle,
                self.config.centre_i,
                self.config.centre_j,
                upscale=self.config.subpixel_factor)
            conv_bg_calc = chrisfuncs.AnnulusSumUpscale(
                conv_map,
                bg_inner_semimaj_pix,
                bg_width,
                axial_ratio_annulus,
                angle_annulus,
                centre_i_annulus,
                centre_j_annulus,
                upscale=self.config.subpixel_factor)
        else:
            raise ValueError("Invalid subpixel factor: " +
                             str(self.config.subpixel_factor))

        # Background-subtract and measure CONVOLVED sersic source flux
        conv_bg_clip = chrisfuncs.SigmaClip(conv_bg_calc[2],
                                            median=False,
                                            sigma_thresh=3.0)
        conv_bg_avg = conv_bg_clip[1] * self.config.subpixel_factor**2.0
        conv_ap_sum = conv_ap_calc[0] - (
            conv_ap_calc[1] * conv_bg_avg
        )  # conv_ap_calc[1] = number of pixels counted for calculating sum (total flux in ellipse)

        # Find difference between flux measured on convoled and unconvoled sersic maps
        ap_correction = np.nanmax([1.0, (sersic_ap_sum / conv_ap_sum)])

        # Return aperture correction
        #return ap_correction

        # Set the factor
        self.factor = ap_correction
コード例 #23
0
"""
Tests for simulation utility functions.
To run: python test_util.py
"""

import util

import lmfit
import numpy as np
import unittest

ka = 0.4
v0 = 10
kb = 0.32
kc = 0.4
PARAMETERS = lmfit.Parameters()
NAMES = ["ka", "v0", "kb", "kc"]
for name in NAMES:
    if name[0] == "v":
        maxval = 20
    else:
        maxval = 2
    PARAMETERS.add(name, value=eval(name), min=0, max=maxval)
PARAMETERS_COLLECTION = [PARAMETERS for _ in range(10)]
IGNORE_TEST = True


class TestFunctions(unittest.TestCase):
    def testFoldGenerator(self):
        NUM_FOLDS = 4
        generator = util.foldGenerator(10, NUM_FOLDS)
コード例 #24
0
def cmplxIQ_params(res, **kwargs):
    """Initialize fitting parameters used by the cmplxIQ_fit function.

    Parameters
    ----------
    res : ``scraps.Resonator`` object
        The object you want to calculate parameter guesses for.

    Keyword Arguments
    -----------------
    fit_quadratic_phase : bool
        This determines whether the phase baseline is fit by a line or a
        quadratic function. Default is False for fitting only a line.

    hardware : string {'VNA', 'mixer'}
        This determines whether or not the Ioffset and Qoffset parameters are
        allowed to vary by default.

    use_filter : bool
        Whether or not to use a smoothing filter on the data before calculating
        parameter guesses. This is especially useful for very noisy data where
        the noise spikes might be lower than the resonance minimum.

    filter_win_length : int
        The length of the window used in the Savitsky-Golay filter that smoothes
        the data when ``use_filter == True``. Default is ``0.1 * len(data)`` or
        3, whichever is larger.

    Returns
    -------
    params : ``lmfit.Parameters`` object

    """

    #Check if some other type of hardware is supplied
    hardware = kwargs.pop('hardware', 'VNA')
    assert hardware in ['VNA', 'mixer'
                        ], "Unknown hardware type! Choose 'mixer' or 'VNA'."

    #Whether or not to use a smoothing filter on the data before making guesses
    use_filter = kwargs.pop('use_filter', False)
    assert use_filter in [True, False], "Must pass boolean to 'use_filter'."

    #Window length of Savitsky-Golay filter (must be odd and >= 3)
    filter_win_length = kwargs.pop('filter_win_length', 0)

    #If no window length is supplied, defult to 1% of the data vector or 3
    if filter_win_length == 0:
        filter_win_length = int(np.round(len(res.mag) / 100.0))
        if filter_win_length % 2 == 0:
            filter_win_length += 1
        if filter_win_length < 3:
            filter_win_length = 3

    assert (filter_win_length % 2 == 1) and (
        filter_win_length >=
        3), "Filter window length must be odd and greater than 3."

    #Whether to fit a line or a parabola to the phase:
    fit_quadratic_phase = kwargs.pop('fit_quadratic_phase', False)

    #There shouldn't be any more kwargs left
    if kwargs:
        raise Exception("Unknown keyword argument supplied")

    if use_filter:
        resMag = sps.savgol_filter(res.mag, filter_win_length, 1)
        resPhase = sps.savgol_filter(res.phase, filter_win_length, 1)
        resUPhase = np.unwrap(resPhase)
    else:
        resMag = res.mag
        resPhase = res.phase
        resUPhase = res.uphase

    #Get index of last datapoint
    findex_end = len(res.freq) - 1

    #Set up lmfit parameters object for fitting later

    #Detrend the mag and phase using first and last 5% of data
    findex_5pc = int(len(res.freq) * 0.05)

    findex_center = int(np.round(findex_end / 2))
    f_midpoint = res.freq[findex_center]

    #Set up a unitless, reduced, mipoint frequency for baselines
    ffm = lambda fx: (fx - f_midpoint) / f_midpoint

    magEnds = np.concatenate((resMag[0:findex_5pc], resMag[-findex_5pc:-1]))
    freqEnds = ffm(
        np.concatenate((res.freq[0:findex_5pc], res.freq[-findex_5pc:-1])))

    #This fits a second order polynomial
    magBaseCoefs = np.polyfit(freqEnds, magEnds, 2)

    magBase = np.poly1d(magBaseCoefs)

    #Put this back in the resonator object because it is super useful!
    res.magBaseline = magBase(ffm(res.freq))

    #Store the frequency at the magnitude minimum for future use.
    #Pull out the baseline variation first

    findex_min = np.argmin(resMag - magBase(ffm(res.freq)))

    f_at_mag_min = res.freq[findex_min]

    #These points are useful for later code, so add them to the resonator object
    res.fmin = f_at_mag_min
    res.argfmin = findex_min

    #Update best guess with minimum
    f0_guess = f_at_mag_min

    #Update: now calculating against file midpoint
    #This makes sense because you don't want the baseline changing
    #as f0 shifts around with temperature and power

    #Remove any linear variation from the phase (caused by electrical delay)
    phaseEnds = np.concatenate(
        (resUPhase[0:findex_5pc], resUPhase[-findex_5pc:-1]))

    if fit_quadratic_phase:
        phase_poly_order = 2
    else:
        phase_poly_order = 1

    #This fits a second order polynomial
    phaseBaseCoefs = np.polyfit(freqEnds, phaseEnds, phase_poly_order)
    phaseBase = np.poly1d(phaseBaseCoefs)

    #Add to resonator object
    res.phaseBaseline = phaseBase(ffm(res.freq))

    #Set some bounds (resonant frequency should not be within 5% of file end)
    f_min = res.freq[findex_5pc]
    f_max = res.freq[findex_end - findex_5pc]

    if f_min < f0_guess < f_max:
        pass
    else:
        f0_guess = res.freq[findex_center]

    #Guess the Q values:
    #1/Q0 = 1/Qc + 1/Qi
    #Q0 = f0/fwhm bandwidth
    #Q0/Qi = min(mag)/max(mag)
    magMax = res.magBaseline[findex_min]
    magMin = resMag[findex_min]

    fwhm = np.sqrt((magMax**2 + magMin**2) / 2.)
    fwhm_mask = resMag < fwhm
    bandwidth = res.freq[fwhm_mask][-1] - res.freq[fwhm_mask][0]
    q0_guess = f0_guess / bandwidth

    qi_guess = q0_guess * magMax / magMin

    qc_guess = 1. / (1. / q0_guess - 1. / qi_guess)

    #Create a lmfit parameters dictionary for later fitting
    #Set up assymetric lorentzian parameters (Name, starting value, range, vary, etc):
    params = lf.Parameters()
    params.add('df', value=0, vary=True)
    params.add('f0', value=f0_guess, min=f_min, max=f_max, vary=True)
    params.add('qc', value=qc_guess, min=1, max=10**8, vary=True)
    params.add('qi', value=qi_guess, min=1, max=10**8, vary=True)

    #Allow for quadratic gain variation
    params.add('gain0', value=magBaseCoefs[2], min=0, vary=True)
    params.add('gain1', value=magBaseCoefs[1], vary=True)
    params.add('gain2', value=magBaseCoefs[0], vary=True)

    #Allow for linear phase variation
    params.add('pgain0', value=phaseBaseCoefs[phase_poly_order], vary=True)
    params.add('pgain1', value=phaseBaseCoefs[phase_poly_order - 1], vary=True)

    if fit_quadratic_phase:
        params.add('pgain2', value=phaseBaseCoefs[0], vary=True)
    else:
        params.add('pgain2', value=0, vary=False)

    #Add in complex offset (should not be necessary on a VNA, but might be needed for a mixer)
    if hardware == 'VNA':
        params.add('Ioffset', value=0, vary=False)
        params.add('Qoffset', value=0, vary=False)
    elif hardware == 'mixer':
        params.add('Ioffset', value=0, vary=True)
        params.add('Qoffset', value=0, vary=True)

    return params
コード例 #25
0
def _refine_orientation(
    solution,
    k_xy,
    structure_library,
    accelarating_voltage,
    camera_length,
    index_error_tol=0.2,
    method="leastsq",
    vary_angles=True,
    vary_center=False,
    vary_scale=False,
    verbose=False,
):
    """
    Refine a single orientation agains the given cartesian vector coordinates.

    Parameters
    ----------
    solution : OrientationResult
        Namedtuple containing the starting orientation
    k_xy : DiffractionVectors
        DiffractionVectors (x,y pixel format) to be indexed.
    structure_library : :obj:`diffsims:StructureLibrary` Object
        Dictionary of structures and associated orientations for which
        electron diffraction is to be simulated.
    index_error_tol : float
        Max allowed error in peak indexation for classifying it as indexed,
        calculated as :math:`|hkl_calculated - round(hkl_calculated)|`.
    method : str
        Minimization algorithm to use, choose from:
        'leastsq', 'nelder', 'powell', 'cobyla', 'least-squares'.
        See `lmfit` documentation (https://lmfit.github.io/lmfit-py/fitting.html)
        for more information.
    vary_angles : bool,
        Free the euler angles (rotation matrix) during the refinement.
    vary_center : bool
        Free the center of the diffraction pattern (beam center) during the refinement.
    vary_scale : bool
        Free the scale (i.e. pixel size) of the diffraction vectors during refinement.
    verbose : bool
        Be more verbose

    Returns
    -------
    result : OrientationResult
        Container for the orientation refinement results
    """

    # prepare reciprocal_lattice
    structure = structure_library.structures[solution.phase_index]
    lattice_recip = structure.lattice.reciprocal()

    def objfunc(params, k_xy, lattice_recip, wavelength, camera_length):
        cx = params["center_x"].value
        cy = params["center_y"].value
        ai = params["ai"].value
        aj = params["aj"].value
        ak = params["ak"].value
        scale = params["scale"].value

        rotmat = euler2mat(ai, aj, ak)

        k_xy = k_xy + np.array((cx, cy)) * scale
        cart = detector_to_fourier(k_xy, wavelength, camera_length)

        intermediate = cart.dot(rotmat.T)  # Must use the transpose here
        hklss = lattice_recip.fractional(intermediate) * scale

        rhklss = np.rint(hklss)
        ehklss = np.abs(hklss - rhklss)

        return ehklss

    ai, aj, ak = mat2euler(solution.rotation_matrix)

    params = lmfit.Parameters()
    params.add("center_x", value=solution.center_x, vary=vary_center)
    params.add("center_y", value=solution.center_y, vary=vary_center)
    params.add("ai", value=ai, vary=vary_angles)
    params.add("aj", value=aj, vary=vary_angles)
    params.add("ak", value=ak, vary=vary_angles)
    params.add("scale",
               value=solution.scale,
               vary=vary_scale,
               min=0.8,
               max=1.2)

    wavelength = get_electron_wavelength(accelarating_voltage)
    camera_length = camera_length * 1e10
    args = k_xy, lattice_recip, wavelength, camera_length

    res = lmfit.minimize(objfunc, params, args=args, method=method)

    if verbose:  # pragma: no cover
        lmfit.report_fit(res)

    p = res.params

    ai, aj, ak = p["ai"].value, p["aj"].value, p["ak"].value
    scale = p["scale"].value
    center_x = params["center_x"].value
    center_y = params["center_y"].value

    rotation_matrix = euler2mat(ai, aj, ak)

    k_xy = k_xy + np.array((center_x, center_y)) * scale
    cart = detector_to_fourier(k_xy,
                               wavelength=wavelength,
                               camera_length=camera_length)

    intermediate = cart.dot(rotation_matrix.T)  # Must use the transpose here
    hklss = lattice_recip.fractional(intermediate)

    rhklss = np.rint(hklss)

    error_hkls = res.residual.reshape(-1, 3)
    error_mean = np.mean(error_hkls)

    valid_peak_mask = np.max(error_hkls, axis=-1) < index_error_tol
    valid_peak_count = np.count_nonzero(valid_peak_mask, axis=-1)

    num_peaks = len(k_xy)

    match_rate = (valid_peak_count * (1 / num_peaks)) if num_peaks else 0

    orientation = OrientationResult(
        phase_index=solution.phase_index,
        rotation_matrix=rotation_matrix,
        match_rate=match_rate,
        error_hkls=error_hkls,
        total_error=error_mean,
        scale=scale,
        center_x=center_x,
        center_y=center_y,
    )

    res = np.empty(2, dtype=object)
    res[0] = orientation
    res[1] = rhklss

    return res
コード例 #26
0
def pars():
    """Create and initialize parameter set."""
    parameters = lmfit.Parameters()
    parameters.add_many(('a', 0.1), ('b', 1))
    return parameters
コード例 #27
0
def fit_B_vs_I(ndeg, df_meas, name='NMR', ycol='B_reg', yerr='sigma_B_reg',
               method='POLYFIT', I_min=-1000, fitcolor='red', datacolor='blue',
               fig=None, axs=None, plotfile=None, pklfile=None):
    # copy dataframes and limit current
    df_ = df_meas.copy()
    df_ = df_.query(f'I >= {I_min}')
    # remove run 11! FIXME!
    m = df_.index == 11
    df_ = df_[~m].copy()
    Is_fine = np.linspace(df_.I.min(), df_.I.max(), 200)
    # set up noise for least-squares fit
    ystd = df_[yerr].values
    # TESTING ONLY
    #ystd = None
    # run modeling (polyfit or interpolation)
    # check method
    if method == 'POLYFIT':
        print(f"Running {ndeg} Degree POLYFIT for {name}")
        # setup lmfit model
        model = lm.Model(ndeg_poly1d, independent_vars=['x'])
        params = lm.Parameters()
        for i in range(ndeg+1):
            params.add(f'C_{i}', value=0, vary=True)
        # fit
        result = model.fit(df_[ycol].values, x=df_.I.values,
                           params=params, weights=1/ystd, scale_covar=False)
        # calculate B for full dataset
        B_full = ndeg_poly1d(Is_fine, **result.params)
        # calculate residual (data - fit)
        res = df_[ycol].values - result.best_fit
        # other formatting
        fit_name = 'Polynomial Fit'
        ylab = 'Fit'
        datalab = ylab
        # label for fit
        label = '\n'
        label += (rf'$\underline{{\mathrm{{Degree\ {ndeg}\ Polynomial}}}}$'+
                 '\n')
        label_coeffs = []
        for i in range(ndeg+1):
            pv = result.params[f'C_{i}'].value
            label_coeffs.append(rf'$C_{{{i}}} = {pv:0.3E}$'+'\n')
        label += (''.join(label_coeffs)+'\n'+
              rf'$\chi^2_\mathrm{{red.}} = {result.redchi:0.2f}$'+'\n')

    elif method == 'INTERP_LIN':
        print(f"Running INTERP_LIN for {name}")
        # set up interpolation
        interp_func = interp1d(df_.I.values, df_[ycol].values,
                               kind='linear', fill_value='extrapolate')
        # calculate B for meas and full dfs
        B_full = interp_func(Is_fine)
        B_meas = interp_func(df_.I.values)
        # residuals
        res = df_[ycol].values - B_meas
        # other formatting
        fit_name = 'Linear Interpolation'
        ylab = 'Interpolation'
        datalab = 'Interp.'
        # label for fit
        label = f'Linear Interpolation'
        # return "result"
        result = interp_func

    elif method == 'INTERP_QUAD':
        print(f"Running INTERP_QUAD for {name}")
        # set up interpolation
        interp_func = interp1d(df_.I.values, df_[ycol].values,
                               kind='quadratic', fill_value='extrapolate')
        # calculate B for meas and full dfs
        B_full = interp_func(Is_fine)
        B_meas = interp_func(df_.I.values)
        # residuals
        res = df_[ycol].values - B_meas
        # other formatting
        fit_name = 'Quadratic Interpolation'
        ylab = 'Interpolation'
        datalab = 'Interp.'
        # label for fit
        label = f'Quadratic Interpolation'
        # return "result"
        result = interp_func

    elif method == 'INTERP_CUBIC':
        print(f"Running INTERP_CUBIC for {name}")
        # set up interpolation
        interp_func = interp1d(df_.I.values, df_[ycol].values, kind='cubic',
                               fill_value='extrapolate')
        # calculate B for meas and full dfs
        B_full = interp_func(Is_fine)
        B_meas = interp_func(df_.I.values)
        # residuals
        res = df_[ycol].values - B_meas
        # other formatting
        fit_name = 'Cubic Interpolation'
        ylab = 'Interpolation'
        datalab = 'Interp.'
        # label for fit
        label = f'Cubic Interpolation'
        # return "result"
        result = interp_func

    else:
        raise NotImplementedError
    # saving fit result function
    if not pklfile is None:
        pkl.dump(result, open(pklfile, 'wb'))
    # plot
    # set up figure with two axes
    if fig is None:
        fig = plt.figure()
        ax1 = fig.add_axes((0.1, 0.31, 0.8, 0.6))
        ax2 = fig.add_axes((0.1, 0.08, 0.8, 0.2))#, sharex=ax1)
    else:
        ax1, ax2 = axs
    # plot data and fit
    # data
    label_data = f'Regressed\nData ({datalab})'
    ax1.errorbar(df_.I.values, df_[ycol].values, yerr=ystd, c=datacolor,
                 fmt='x', ls='none', ms=6, zorder=100, capsize=3,
                 label=label_data)
    # fit
    ax1.plot(Is_fine, B_full, linewidth=1, color=fitcolor,
             zorder=99, label=label)
    # calculate ylimit for ax2
    yl = 1.2*(np.max(np.abs(res)) + np.max(ystd))
    # plot residual
    # zero-line
    xmin = np.min(df_.I.values)
    xmax = np.max(df_.I.values)
    ax2.plot([xmin, xmax], [0, 0], '--', color='black', linewidth=1,
             zorder=98)
    # residual
    ax2.errorbar(df_.I.values, res, yerr=ystd, fmt='x', ls='none', ms=6,
                 c=datacolor, capsize=3, zorder=100)
    # formatting
    # set ylimits
    ax2.set_ylim([-yl, yl])
    # remove ticklabels for ax1 xaxis
    ax1.set_xticklabels([])
    # axis labels
    ax2.set_xlabel('Magnet Current [A]')
    ax2.set_ylabel(f'(Data - {ylab}) [T]')
    ax1.set_ylabel(r'$|B|$')
    # force consistent x axis range for ax1 and ax2
    tmin = np.min(df_.I.values) - 10
    tmax = np.max(df_.I.values) + 10
    ax1.set_xlim([tmin, tmax])
    ax2.set_xlim([tmin, tmax])
    # turn on legend
    ax1.legend(fontsize=13).set_zorder(101)
    # add title
    fig.suptitle(f'Regressed Data -- B vs. I Modeling:'
                 f' {fit_name} for {name} Probe')
    # minor ticks
    ax1.xaxis.set_minor_locator(AutoMinorLocator())
    ax2.xaxis.set_minor_locator(AutoMinorLocator())
    ax1.yaxis.set_minor_locator(AutoMinorLocator())
    ax2.yaxis.set_minor_locator(AutoMinorLocator())
    # inward ticks and ticks on right and top
    ax1.tick_params(which='both', direction='in', top=True, right=True,
                    bottom=True)
    ax2.tick_params(which='both', direction='in', top=True, right=True)
    ax2.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
    # save file
    if not plotfile is None:
        fig.savefig(plotfile+'.pdf')
        fig.savefig(plotfile+'.png')

    return result, fig, ax1, ax2
コード例 #28
0
ファイル: pyRes.py プロジェクト: qingyangtang/pyRes
    def __init__(self, name, temp, pwr, freq, I, Q, sigmaI=None, sigmaQ=None):
        self.name = name
        self.temp = temp
        self.pwr = pwr
        self.freq = np.asarray(freq)
        self.I = np.asarray(I)
        self.Q = np.asarray(Q)
        self.sigmaI = np.asarray(sigmaI) if sigmaI is not None else None
        self.sigmaQ = np.asarray(sigmaQ) if sigmaQ is not None else None
        self.S21 = I + 1j * Q
        self.phase = np.arctan2(Q,
                                I)  #use arctan2 because it is quadrant-aware
        self.uphase = np.unwrap(self.phase)  #Unwrap the 2pi phase jumps
        self.mag = np.abs(self.S21)

        #If errorbars are not supplied for I and Q, then estimate them based on
        #the tail of the power-spectral densities

        if sigmaI is None:
            f, psdI = sps.welch(self.I)
            epsI = np.mean(np.sqrt(psdI[-150:]))
            self.sigmaI = np.full_like(I, epsI)

        if sigmaQ is None:
            f, psdQ = sps.welch(self.Q)
            epsQ = np.mean(np.sqrt(psdQ[-60:]))
            self.sigmaQ = np.full_like(Q, epsQ)

        #Get index of last datapoint
        findex_end = len(freq) - 1

        #Set up lmfit parameters object for fitting later

        #Detrend the mag and phase using first and last 5% of data
        findex_5pc = int(len(freq) * 0.05)

        findex_center = np.round(findex_end / 2)
        f_midpoint = freq[findex_center]

        magEnds = np.concatenate(
            (self.mag[0:findex_5pc], self.mag[-findex_5pc:-1]))
        freqEnds = np.concatenate(
            (self.freq[0:findex_5pc], self.freq[-findex_5pc:-1]))

        #This fits a second order polynomial
        magBaseCoefs = np.polyfit(freqEnds - f_midpoint, magEnds, 2)

        magBase = np.poly1d(magBaseCoefs)

        #Store the frequency at the magnitude minimum for future use.
        #Pull out the baseline variation first

        findex_min = np.argmin(self.mag - magBase(self.freq - f_midpoint))

        f_at_mag_min = freq[findex_min]
        self.fmin = f_at_mag_min
        self.argfmin = findex_min

        #Update best guess with minimum
        f0_guess = f_at_mag_min

        #Recalculate the baseline relative to the new f0_guess
        magBaseCoefs = np.polyfit(freqEnds - f0_guess, magEnds, 2)

        #Remove any linear variation from the phase (caused by electrical delay)
        phaseRot = self.uphase[findex_min] - self.phase[findex_min] + np.pi

        phaseBaseCoefs = np.polyfit(self.freq[0:findex_5pc] - f0_guess,
                                    self.uphase[0:findex_5pc] + phaseRot, 1)

        #Set some bounds (resonant frequency should not be within 5% of file end)
        f_min = freq[findex_5pc]
        f_max = freq[findex_end - findex_5pc]

        if f_min < f0_guess < f_max:
            pass
        else:
            f0_guess = freq[findex_center]

        #Design Qc for coupler is 50k
        #To-do: make a smarter way to guess qc and qi programmatically
        qc_guess = 50000

        #Pick some big number for Qi - should probably be smarter about this...
        qi_guess = 500000

        #Create a lmfit parameters dictionary for later fitting
        #Set up assymetric lorentzian parameters (Name, starting value, range, vary, etc):
        self.params = lf.Parameters()
        self.params.add('df', value=0, vary=True)
        self.params.add('f0', value=f0_guess, min=f_min, max=f_max, vary=True)
        self.params.add('qc', value=qc_guess, min=1, max=10**8, vary=True)
        self.params.add('qi', value=qi_guess, min=1, max=10**8, vary=True)

        #Allow for quadratic gain variation
        self.params.add('gain0',
                        value=magBaseCoefs[2],
                        min=0,
                        max=1,
                        vary=True)
        self.params.add('gain1', value=magBaseCoefs[1], vary=True)
        self.params.add('gain2', value=magBaseCoefs[0], vary=True)

        #Allow for linear phase variation
        self.params.add('pgain0', value=phaseBaseCoefs[1], vary=True)
        self.params.add('pgain1', value=phaseBaseCoefs[0], vary=True)

        #Add in complex offset (should not be necessary on a VNA, but might be needed for a mixer)
        self.params.add('Ioffset', value=0, vary=False)
        self.params.add('Qoffset', value=0, vary=False)
コード例 #29
0

def basis(baseline):
    return baseline * numpy.ones(len(x))


def fitfunc(x, height, center, width):
    return height / (width *
                     (2 * numpy.pi)**0.5) * numpy.exp(-1. / 2 * (
                         (x - center) / width)**2)  #Gauss Dichte
    # ~ return -height/2*(1+scipy.special.erf((x-center)/(width*2**0.5)))				#Gauss Kumulativ
    # ~ return height*(width**2/((x-center)**2+width**2))								#Lorentz Dichte


Fehler = 0.9995  #Anpassen
Rquadrat, i, params = 0, 0, lmfit.Parameters()
params.add('baseline', 0, min=0)
while Rquadrat < Fehler:
    i += 1
    for n in range(i):
        params.add('height' + str(n), (max(y) + min(y)) / 2, min=min(y))
        params.add('center' + str(n), (max(x) + min(x)) / 2,
                   min=min(x),
                   max=max(x))
        params.add('width' + str(n), 1, min=0)

    def multi_fitfunc(params):
        prm = params.valuesdict()
        global func
        func = basis(prm['baseline'])
        for n in range(i):
コード例 #30
0
def bayes_fit(xdata, ydata, distribution, burn=100, steps=1000, thin=20):
    """Identify and fit an arbitrary number of peaks in a 1-d spectrum array.

    Parameters
    ----------
    xdata : 1-d array
        X data.

    ydata : 1-d array
        Y data.

    Returns
    -------
    results : lmfit.MinimizerResults.
        results of the fit. To get parameters, use `results.params`.
    """
    # Identify peaks
    index = find_peaks_cwt(ydata, widths=np.arange(1,100))

    # Number of peaks
    n_peaks = len(index)

    # Construct initial guesses
    parameters = lmfit.Parameters()

    for peak_i in range(n_peaks):
        idx = index[peak_i]

        # Add center parameter
        parameters.add(
            name='peak_{}_center'.format(peak_i),
            value=xdata[idx]
        )

        # Add height parameter
        parameters.add(
            name='peak_{}_height'.format(peak_i),
            value=ydata[idx]
        )

        # Add width parameter
        parameters.add(
            name='peak_{}_width'.format(peak_i),
            value=.1,
        )


    # Minimize the above residual function.
    ML_results = lmfit.minimize(residual, parameters,
                            args=[distribution, xdata],
                            kws={'ydata': ydata})

    # Add a noise term for the Bayesian fit
    ML_results.params.add('noise', value=1, min=0.001, max=2)

    # Define the log probability expression for the emcee fitter
    def lnprob(params = ML_results.params):
        noise = params['noise']
        return -0.5 * np.sum((residual(params, distribution, xdata, ydata) / noise)**2 + np.log(2 * np.pi * noise**2))

    # Build a minizer object for the emcee search
    mini = lmfit.Minimizer(lnprob, ML_results.params)

    # Use the emcee version of minimizer class to perform MCMC sampling
    bayes_results = mini.emcee(burn=burn, steps=steps, thin=thin, params=ML_results.params)

    return bayes_results