Esempio n. 1
0
def test_get_new_df():
    np.random.seed(150)

    amplitude_0 = 200.0
    amplitude_1 = 100.0
    amplitude_2 = 50.0

    x_0_0 = 0.5
    x_0_1 = 2.0
    x_0_2 = 7.5

    fwhm_0 = 0.1
    fwhm_1 = 1.0
    fwhm_2 = 0.5

    whitenoise = 100.0

    model = models.Lorentz1D(amplitude_0, x_0_0, fwhm_0) + \
        models.Lorentz1D(amplitude_1, x_0_1, fwhm_1) + \
        models.Lorentz1D(amplitude_2, x_0_2, fwhm_2) + \
        models.Const1D(whitenoise)

    freq = np.linspace(0.01, 10.0, 1000)
    p = model(freq)
    noise = np.random.exponential(size=len(freq))

    power = p * noise
    cs = Crossspectrum()
    cs.freq = freq
    cs.power = power
    cs.df = cs.freq[1] - cs.freq[0]
    cs.n = len(freq)
    cs.m = 1

    assert np.isclose(cs.df, spec.get_new_df(cs, cs.n), rtol=0.001)
Esempio n. 2
0
    def setup_class(cls):
        np.random.seed(150)
        cls.nlor = 3

        cls.x_0_0 = 0.5
        cls.x_0_1 = 2.0
        cls.x_0_2 = 7.5

        cls.amplitude_0 = 200.0
        cls.amplitude_1 = 100.0
        cls.amplitude_2 = 50.0

        cls.fwhm_0 = 0.1
        cls.fwhm_1 = 1.0
        cls.fwhm_2 = 0.5

        cls.whitenoise = 2.0

        cls.priors = {
            'x_0_0': cls.x_0_0,
            'x_0_1': cls.x_0_1,
            'x_0_2': cls.x_0_2,
            'amplitude_0': cls.amplitude_0,
            'amplitude_1': cls.amplitude_1,
            'amplitude_2': cls.amplitude_2,
            'fwhm_0': cls.fwhm_0,
            'fwhm_1': cls.fwhm_1,
            'fwhm_2': cls.fwhm_2,
            'whitenoise': cls.whitenoise
        }

        cls.model = models.Lorentz1D(cls.amplitude_0, cls.x_0_0, cls.fwhm_0) +\
            models.Lorentz1D(cls.amplitude_1, cls.x_0_1, cls.fwhm_1) + \
            models.Lorentz1D(cls.amplitude_2, cls.x_0_2, cls.fwhm_2) + \
            models.Const1D(cls.whitenoise)

        freq = np.linspace(0.01, 10.0, 1000)
        p = cls.model(freq)
        noise = np.random.exponential(size=len(freq))

        power = p * noise
        cls.ps = Powerspectrum()
        cls.ps.freq = freq
        cls.ps.power = power
        cls.ps.power_err = np.array([0.] * len(power))
        cls.ps.df = cls.ps.freq[1] - cls.ps.freq[0]
        cls.ps.m = 1

        cls.cs = Crossspectrum()
        cls.cs.freq = freq
        cls.cs.power = power
        cls.cs.power_err = np.array([0.] * len(power))
        cls.cs.df = cls.cs.freq[1] - cls.cs.freq[0]
        cls.cs.m = 1

        cls.t0 = np.asarray(
            [200.0, 0.5, 0.1, 100.0, 2.0, 1.0, 50.0, 7.5, 0.5, 2.0])

        cls.parest, cls.res = fit_lorentzians(cls.ps, cls.nlor, cls.t0)
Esempio n. 3
0
def test_compute_rms():
    np.random.seed(150)

    amplitude_0 = 200.0
    amplitude_1 = 100.0
    amplitude_2 = 50.0

    x_0_0 = 0.5
    x_0_1 = 2.0
    x_0_2 = 7.5

    fwhm_0 = 0.1
    fwhm_1 = 1.0
    fwhm_2 = 0.5

    whitenoise = 100.0

    model = models.Lorentz1D(amplitude_0, x_0_0, fwhm_0) + \
        models.Lorentz1D(amplitude_1, x_0_1, fwhm_1) + \
        models.Lorentz1D(amplitude_2, x_0_2, fwhm_2) + \
        models.Const1D(whitenoise)

    freq = np.linspace(-10.0, 10.0, 1000)
    p = model(freq)
    noise = np.random.exponential(size=len(freq))

    power = p * noise
    cs = Crossspectrum()
    cs.freq = freq
    cs.power = power
    cs.df = cs.freq[1] - cs.freq[0]
    cs.n = len(freq)
    cs.m = 1

    rms = np.sqrt(np.sum(model(cs.freq) * cs.df)).mean()

    assert rms == spec.compute_rms(cs, model, criteria="all")

    rms_pos = np.sqrt(np.sum(model(cs.freq[cs.freq > 0]) * cs.df)).mean()

    assert rms_pos == spec.compute_rms(cs, model, criteria="posfreq")

    optimal_filter = Window1D(model)
    optimal_filter_freq = optimal_filter(cs.freq)
    filtered_cs_power = optimal_filter_freq * np.abs(model(cs.freq))

    rms = np.sqrt(np.sum(filtered_cs_power * cs.df)).mean()
    assert rms == spec.compute_rms(cs, model, criteria="window")

    with pytest.raises(ValueError):
        spec.compute_rms(cs, model, criteria="filter")
Esempio n. 4
0
    def setup_class(cls):
        total_length = 10000
        f_qpo = 1.5
        cls.dt = 1 / f_qpo / 40
        approx_Q = 10
        q_len = approx_Q / f_qpo
        sigma = 0.1
        astep = 0.01
        phstep = 1
        real_dphi = 0.4 * np.pi
        cls.n_seconds = 500
        cls.n_seg = int(total_length / cls.n_seconds)
        cls.n_bins = cls.n_seconds / cls.dt

        times = np.arange(0, total_length, cls.dt)
        _, cls.ref_counts = fake_qpo(times,
                                     f0=f_qpo,
                                     astep=astep,
                                     rms=sigma,
                                     waveform=waveform_simple,
                                     phstep=phstep,
                                     timescale=q_len,
                                     waveform_opts={'dph': real_dphi})
        _, ci_counts = fake_qpo(times,
                                f0=f_qpo,
                                astep=astep,
                                rms=sigma,
                                waveform=waveform_simple,
                                phstep=phstep,
                                timescale=q_len,
                                waveform_opts={'dph': real_dphi})
        cls.ci_counts = np.array([ci_counts])

        cls.ref_times = np.arange(0, cls.n_seconds * cls.n_seg, cls.dt)
        cls.ref_lc = Lightcurve(cls.ref_times, cls.ref_counts, dt=cls.dt)
        ref_aps = AveragedPowerspectrum(cls.ref_lc,
                                        segment_size=cls.n_seconds,
                                        norm='abs')
        df = ref_aps.freq[1] - ref_aps.freq[0]
        amplitude_0 = np.max(ref_aps.power)
        x_0_0 = ref_aps.freq[np.argmax(ref_aps.power)]
        amplitude_1 = amplitude_0 / 2
        x_0_1 = x_0_0 * 2
        fwhm = df

        cls.model = models.Lorentz1D(amplitude=amplitude_0, x_0=x_0_0,
                                     fwhm=fwhm) + \
            models.Lorentz1D(amplitude=amplitude_1, x_0=x_0_1,
                             fwhm=fwhm)
        cls.ref_aps = ref_aps
Esempio n. 5
0
def fit_lines_lorentz(
    Wave, Flux, Flux_range, emission
):  #Flux_range délimite la zone sur laquelle on veut fitter la line / Emission= Em/Abs
    fit_g = fitting.LevMarLSQFitter()
    fit_pol = fitting.LinearLSQFitter()
    l_init_continuum = fit_continuum(Wave, Flux, 2)[0]

    if emission == 'Em':
        ampli = np.nanmax(Flux_range)
        mean = float(Wave[np.where(Flux == np.nanmax(Flux_range))])
    if emission == 'Abs':
        ampli = -abs(np.nanmin(Flux_range))
        mean = float(Wave[np.where(Flux == np.nanmin(Flux_range))])

    g_init = models.Lorentz1D(
        amplitude=ampli,
        x_0=mean,
        fwhm=0.01,
        bounds={"mean": (mean - 0.01 * mean, mean + 0.01 * mean)})
    g_line = g_init + l_init_continuum

    fit_line = fit_g(g_line, Wave, Flux)
    y_line = fit_line(Wave)

    return y_line, g_init, fit_line
Esempio n. 6
0
 def evaluate(x, shift_l1, width_l1, i_r_l1, shift_n3, width_n3, i_r_n3):
     res = 0.0
     for i in range(0, len(FeII_template_obs.center_l1)):
         f = models.Lorentz1D(
             i_r_l1 * FeII_template_obs.i_l1[i],
             FeII_template_obs.center_l1[i] + shift_l1,
             width_l1 * np.sqrt(3 / 2) * FeII_template_obs.center_l1[i] /
             299792.458)
         res = res + f(x)
     for i in range(0, len(FeII_template_obs.center_n3)):
         f = models.Lorentz1D(
             i_r_n3 * FeII_template_obs.i_n3[i],
             FeII_template_obs.center_n3[i] + shift_n3,
             width_n3 * np.sqrt(3 / 2) * FeII_template_obs.center_n3[i] /
             299792.458)
         res = res + f(x)
     return res
Esempio n. 7
0
def fit_baseline_plus_bell(x, y, ye=None, kind='gauss'):
    """Fit a function composed of a linear baseline plus a bell function.

    Parameters
    ----------
    x : array-like
        the sample time/number/position
    y : array-like
        the data series corresponding to x

    Other parameters
    ----------------
    ye : array-like
        the errors on the data series
    kind: str
        Can be 'gauss' or 'lorentz'

    Returns
    -------
    mod_out : ``Astropy.modeling.model`` object
        The fitted model
    fit_info : dict
        Fit info from the Astropy fitting routine.
    """
    if kind not in ['gauss', 'lorentz']:
        raise ValueError('kind has to be one of: gauss, lorentz')
    from astropy.modeling import models, fitting

    base = models.Linear1D(slope=0, intercept=np.min(y), name='Baseline')

    xrange = np.max(x) - np.min(x)
    yrange = np.max(y) - np.min(y)

    if kind == 'gauss':
        bell = models.Gaussian1D(mean=np.mean(x), stddev=xrange / 20,
                                 amplitude=yrange, name='Bell')
        bell.amplitude.bounds = (0, None)
        bell.mean.bounds = (None, None)
        bell.stddev.bounds = (0, None)
        # max_name = 'mean'
    elif kind == 'lorentz':
        bell = models.Lorentz1D(x_0=np.mean(x), fwhm=xrange / 20,
                                amplitude=yrange, name='Bell')
        bell.amplitude.bounds = (0, None)
        bell.x_0.bounds = (None, None)
        bell.fwhm.bounds = (0, None)
        # max_name = 'x_0'

    mod_init = base + bell

    fit = fitting.LevMarLSQFitter()

    mod_out = fit(mod_init, x, y)

    return mod_out, fit.fit_info
Esempio n. 8
0
 def fit(self,
         peaks,
         q_factors,
         amplitudes,
         fitter=fitting.LevMarLSQFitter(),
         **kwargs):
     # start model
     first = True
     schumann_model = None
     for p, q, a in zip(peaks, q_factors, amplitudes):
         if first:
             schumann_model = models.Lorentz1D(a, p, q)
             first = False
         else:
             schumann_model += models.Lorentz1D(a, p, q)
     # schumann_model += models.Linear1D(-1e-4, 1e-3)
     fit = fitter(schumann_model, self.frequencies.value, self.value,
                  **kwargs)
     self.lorentzian_parameters = fit.parameters
     return fit, schumann_model
Esempio n. 9
0
def get_fwhm_from_image_fit(image, pix_size=0.020, arcsec=arcsec):
    disp_dir_counts = sum(image, axis=1)
    fit_levmar = fitting.SimplexLSQFitter()
    lmodel = models.Lorentz1D()
    lmodel.amplitude, lmodel.x0, lmodel.fwhm = max(disp_dir_counts), float(
        len(disp_dir_counts)) / 2, 2.35
    l = fit_levmar(lmodel,
                   range(len(disp_dir_counts)),
                   disp_dir_counts,
                   acc=1e-6)
    return l.fwhm * pix_size / arcsec, disp_dir_counts, l(
        range(len(disp_dir_counts)))
Esempio n. 10
0
    def setup_class(self):
        self.x = np.linspace(0, 10, 100)

        self.amplitude_0 = 5.
        self.x_0_0 = 5.
        self.fwhm_0 = 1.
        self.amplitude_1 = -5
        self.lorentz = models.Lorentz1D(amplitude=self.amplitude_0,
                                        x_0=self.x_0_0, fwhm=self.fwhm_0)
        self.const = models.Const1D(amplitude=self.amplitude_1)
        self.model = self.lorentz + self.const
        self.y = self.model(self.x)
Esempio n. 11
0
def test1():
    # optional initial model is a list of component instances:
    components = [
        models.Gaussian1D(2.0, 2.0, 2.0),
        models.Lorentz1D(3.0, 3.0, 3.0),
        models.GaussianAbsorption1D(0.1, 0.1, 0.1)
    ]

    # start manager and interact with the GUI
    manager = SpectralModelManagerApp(components)

    debug_print(manager)
Esempio n. 12
0
    def setup_class(cls):

        np.random.seed(150)
        cls.nlor = 3

        cls.x_0_0 = 0.5
        cls.x_0_1 = 2.0
        cls.x_0_2 = 7.5

        cls.amplitude_0 = 200.0
        cls.amplitude_1 = 100.0
        cls.amplitude_2 = 50.0

        cls.fwhm_0 = 0.1
        cls.fwhm_1 = 1.0
        cls.fwhm_2 = 0.5

        cls.whitenoise = 2.0

        cls.model = models.Lorentz1D(cls.amplitude_0, cls.x_0_0, cls.fwhm_0) + \
                    models.Lorentz1D(cls.amplitude_1, cls.x_0_1, cls.fwhm_1) + \
                    models.Lorentz1D(cls.amplitude_2, cls.x_0_2, cls.fwhm_2) + \
                    models.Const1D(cls.whitenoise)

        freq = np.linspace(0.01, 10.0, 10.0 / 0.01)
        p = cls.model(freq)
        noise = np.random.exponential(size=len(freq))

        power = p * noise
        cls.ps = Powerspectrum()
        cls.ps.freq = freq
        cls.ps.power = power
        cls.ps.df = cls.ps.freq[1] - cls.ps.freq[0]
        cls.ps.m = 1

        cls.t0 = [200.0, 0.5, 0.1, 100.0, 2.0, 1.0, 50.0, 7.5, 0.5, 2.0]

        cls.parest, cls.res = fit_lorentzians(cls.ps, cls.nlor, cls.t0)
Esempio n. 13
0
    def test_fitting_with_ties_and_bounds(self, capsys):
        double_f = lambda model: model.x_0_0 * 2
        model = self.model.copy()
        model = self.model + models.Lorentz1D(amplitude=model.amplitude_0,
                                              x_0=model.x_0_0 * 2,
                                              fwhm=model.fwhm_0)
        model.x_0_0 = self.model.x_0_0
        model.amplitude_0 = self.model.amplitude_0
        model.amplitude_1 = self.model.amplitude_1
        model.fwhm_0 = self.model.fwhm_0
        model.x_0_2.tied = double_f
        model.fwhm_0.bounds = [0, 10]
        model.amplitude_0.fixed = True

        p = model(self.ps.freq)

        noise = np.random.exponential(size=len(p))
        power = noise * p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "leahy"

        pe = PSDParEst(ps)
        llike = PSDLogLikelihood(ps.freq, ps.power, model)

        true_pars = [
            self.amplitude_0, self.x_0_0, self.fwhm_0, self.amplitude_1,
            model.amplitude_2.value, model.x_0_2.value, model.fwhm_2.value
        ]
        res = pe.fit(llike, true_pars)

        res.print_summary(llike)
        out, err = capsys.readouterr()
        assert "100.00000            (Fixed)" in out
        pattern = \
            re.compile(r"5\) Parameter x_0_2\s+: [0-9]\.[0-9]{5}\s+\(Tied\)")
        assert pattern.search(out)

        compare_pars = [
            self.x_0_0, self.fwhm_0, self.amplitude_1, model.amplitude_2.value,
            model.fwhm_2.value
        ]

        assert np.all(np.isclose(compare_pars, res.p_opt, rtol=0.5))
Esempio n. 14
0
    def test_fitting_with_ties_and_bounds(self, capsys, rebin):
        double_f = lambda model : model.x_0_0 * 2
        model = self.model.copy()
        model += models.Lorentz1D(amplitude=model.amplitude_0,
                                   x_0 = model.x_0_0 * 2,
                                   fwhm = model.fwhm_0)
        model.x_0_0 = self.model.x_0_0
        model.amplitude_0 = self.model.amplitude_0
        model.amplitude_1 = self.model.amplitude_1
        model.fwhm_0 = self.model.fwhm_0
        model.x_0_2.tied = double_f
        model.fwhm_0.bounds = [0, 10]
        model.amplitude_0.fixed = True

        p = model(self.ps.freq)

        noise = np.random.exponential(size=len(p))
        power = noise*p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "leahy"

        if rebin != 0:
            ps = ps.rebin_log(rebin)

        pe = PSDParEst(ps, fitmethod="TNC")
        llike = PSDLogLikelihood(ps.freq, ps.power, model)

        true_pars = [self.x_0_0, self.fwhm_0,
                     self.amplitude_1,
                     model.amplitude_2.value,
                     model.fwhm_2.value]

        res = pe.fit(llike, true_pars, neg=True)

        compare_pars = [self.x_0_0, self.fwhm_0,
                        self.amplitude_1,
                        model.amplitude_2.value,
                        model.fwhm_2.value]

        assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
Esempio n. 15
0
# this should (perhaps?) be replaced by introspection from
# the astropy.modeling.Model registry. For now, we directly
# store hard-coded instances.

import astropy.modeling.models as models
from astropy.modeling import Parameter, Fittable1DModel
from astropy.modeling.polynomial import PolynomialModel

registry = {
    'Gaussian1D':
    models.Gaussian1D(1.0, 1.0, 1.0),
    'GaussianAbsorption1D':
    models.GaussianAbsorption1D(1.0, 1.0, 1.0),
    'Lorentz1D':
    models.Lorentz1D(1.0, 1.0, 1.0),
    'MexicanHat1D':
    models.MexicanHat1D(1.0, 1.0, 1.0),
    'Trapezoid1D':
    models.Trapezoid1D(1.0, 1.0, 1.0, 1.0),
    'Moffat1D':
    models.Moffat1D(1.0, 1.0, 1.0, 1.0),
    'ExponentialCutoffPowerLaw1D':
    models.ExponentialCutoffPowerLaw1D(1.0, 1.0, 1.0, 1.0),
    'BrokenPowerLaw1D':
    models.BrokenPowerLaw1D(1.0, 1.0, 1.0, 1.0),
    'LogParabola1D':
    models.LogParabola1D(1.0, 1.0, 1.0, 1.0),
    'PowerLaw1D':
    models.PowerLaw1D(1.0, 1.0, 1.0),
    'Linear1D':
    models.Linear1D(1.0, 0.0),
Esempio n. 16
0
    def _autofocus(self, seconds, focus_range, focus_step, thumbnail_size,
                   merit_function, merit_function_kwargs, coarse, plots,
                   start_event, finished_event, *args, **kwargs):
        # If passed a start_event wait until Event is set before proceeding (e.g. wait for coarse focus
        # to finish before starting fine focus).
        if start_event:
            start_event.wait()

        initial_focus = self.position
        if coarse:
            self.logger.debug(
                "Beginning coarse autofocus of {} - initial focus position: {}"
                .format(self._camera, initial_focus))
        else:
            self.logger.debug(
                "Beginning autofocus of {} - initial focus position: {}".
                format(self._camera, initial_focus))

        # Set up paths for temporary focus files, and plots if requested.
        image_dir = self.config['directories']['images']
        start_time = current_time(flatten=True)
        file_path = "{}/{}/{}/{}.{}".format(image_dir, 'focus',
                                            self._camera.uid, start_time,
                                            self._camera.file_extension)

        if plots:
            # Take an image before focusing, grab a thumbnail from the centre and add it to the plot
            thumbnail = self._camera.get_thumbnail(seconds, file_path,
                                                   thumbnail_size)
            fig = plt.figure(figsize=(9, 18), tight_layout=True)
            ax1 = fig.add_subplot(3, 1, 1)
            im1 = ax1.imshow(thumbnail, interpolation='none', cmap='cubehelix')
            fig.colorbar(im1)
            ax1.set_title('Initial focus position: {}'.format(initial_focus))

        # Set up encoder positions for autofocus sweep, truncating at focus travel limits if required.
        if coarse:
            focus_range = focus_range[1]
            focus_step = focus_step[1]
        else:
            focus_range = focus_range[0]
            focus_step = focus_step[0]

        focus_positions = np.arange(
            max(initial_focus - focus_range / 2, self.min_position),
            min(initial_focus + focus_range / 2, self.max_position) + 1,
            focus_step,
            dtype=np.int)
        n_positions = len(focus_positions)

        metric = np.empty((n_positions))

        for i, position in enumerate(focus_positions):
            # Move focus, updating focus_positions with actual encoder position after move.
            focus_positions[i] = self.move_to(position)

            # Take exposure
            thumbnail = self._camera.get_thumbnail(seconds, file_path,
                                                   thumbnail_size)

            # Calculate Vollath F4 focus metric
            metric[i] = images.focus_metric(thumbnail, merit_function,
                                            **merit_function_kwargs)
            self.logger.debug("Focus metric at position {}: {}".format(
                position, metric[i]))

        # Find maximum values
        imax = metric.argmax()

        if imax == 0 or imax == (n_positions - 1):
            # TODO: have this automatically switch to coarse focus mode if this happens
            self.logger.warning(
                "Best focus outside sweep range, aborting autofocus on {}!".
                format(self._camera))
            best_focus = focus_positions[imax]

        elif not coarse:
            # Fit to data around the max value to determine best focus position. Lorentz function seems to fit OK
            # provided you only fit in the immediate vicinity of the max value.

            # Initialise models
            fit = models.Lorentz1D(x_0=focus_positions[imax],
                                   amplitude=metric.max())

            # Initialise fitter
            fitter = fitting.LevMarLSQFitter()

            # Select data range for fitting. Tries to use 2 points either side of max, if in range.
            fitting_indices = (max(imax - 2, 0), min(imax + 2,
                                                     n_positions - 1))

            # Fit models to data
            fit = fitter(
                fit,
                focus_positions[fitting_indices[0]:fitting_indices[1] + 1],
                metric[fitting_indices[0]:fitting_indices[1] + 1])

            best_focus = fit.x_0.value

            # Guard against fitting failures, force best focus to stay within sweep range
            if best_focus < focus_positions[0]:
                self.logger.warning(
                    "Fitting failure: best focus {} below sweep limit {}".
                    format(best_focus, focus_positions[0]))
                best_focus = focus_positions[0]

            if best_focus > focus_positions[-1]:
                self.logger.warning(
                    "Fitting failure: best focus {} above sweep limit {}".
                    format(best_focus, focus_positions[-1]))
                best_focus = focus_positions[-1]

        else:
            # Coarse focus, just use max value.
            best_focus = focus_positions[imax]

        if plots:
            ax2 = fig.add_subplot(3, 1, 2)
            ax2.plot(focus_positions,
                     metric,
                     'bo',
                     label='{}'.format(merit_function))
            if not (imax == 0 or imax == (n_positions - 1)) and not coarse:
                fs = np.arange(focus_positions[fitting_indices[0]],
                               focus_positions[fitting_indices[1]] + 1)
                ax2.plot(fs, fit(fs), 'b-', label='Lorentzian fit')

            ax2.set_xlim(focus_positions[0] - focus_step / 2,
                         focus_positions[-1] + focus_step / 2)
            u_limit = 1.10 * metric.max()
            l_limit = min(0.95 * metric.min(), 1.05 * metric.min())
            ax2.set_ylim(l_limit, u_limit)
            ax2.vlines(initial_focus,
                       l_limit,
                       u_limit,
                       colors='k',
                       linestyles=':',
                       label='Initial focus')
            ax2.vlines(best_focus,
                       l_limit,
                       u_limit,
                       colors='k',
                       linestyles='--',
                       label='Best focus')
            ax2.set_xlabel('Focus position')
            ax2.set_ylabel('Focus metric')
            if coarse:
                ax2.set_title('{} coarse focus at {}'.format(
                    self._camera, start_time))
            else:
                ax2.set_title('{} fine focus at {}'.format(
                    self._camera, start_time))
            ax2.legend(loc='best')

        final_focus = self.move_to(best_focus)

        if plots:
            thumbnail = self._camera.get_thumbnail(seconds, file_path,
                                                   thumbnail_size)
            ax3 = fig.add_subplot(3, 1, 3)
            im3 = ax3.imshow(thumbnail, interpolation='none', cmap='cubehelix')
            fig.colorbar(im3)
            ax3.set_title('Final focus position: {}'.format(final_focus))
            plot_path = os.path.splitext(file_path)[0] + '.png'
            fig.savefig(plot_path)
            plt.close(fig)
            if coarse:
                self.logger.info(
                    'Coarse focus plot for camera {} written to {}'.format(
                        self._camera, plot_path))
            else:
                self.logger.info(
                    'Fine focus plot for camera {} written to {}'.format(
                        self._camera, plot_path))

        self.logger.debug(
            'Autofocus of {} complete - final focus position: {}'.format(
                self._camera, final_focus))

        if finished_event:
            finished_event.set()

        return initial_focus, final_focus
Esempio n. 17
0
def fit_lorentzians(ps,
                    nlor,
                    starting_pars,
                    fit_whitenoise=True,
                    max_post=False,
                    priors=None,
                    fitmethod="L-BFGS-B"):
    """
    Fit a number of Lorentzians to a power spectrum, possibly including white
    noise. Each Lorentzian has three parameters (amplitude, centroid position,
    full-width at half maximum), plus one extra parameter if the white noise
    level should be fit as well. Priors for each parameter can be included in
    case `max_post = True`, in which case the function will attempt a
    Maximum-A-Posteriori fit. Priors must be specified as a dictionary with one
    entry for each parameter.
    The parameter names are `(amplitude_i, x_0_i, fwhm_i)` for each `i` out of
    a total of `N` Lorentzians. The white noise level has a parameter
    `amplitude_(N+1)`. For example, a model with two Lorentzians and a
    white noise level would have parameters:
    [amplitude_0, x_0_0, fwhm_0, amplitude_1, x_0_1, fwhm_1, amplitude_2].

    Parameters
    ----------
    ps : Powerspectrum
        A Powerspectrum object with the data to be fit

    nlor : int
        The number of Lorentzians to fit

    starting_pars : iterable
        The list of starting guesses for the optimizer. See explanation above
        for ordering of parameters in this list.

    fit_whitenoise : bool, optional, default True
        If True, the code will attempt to fit a white noise level along with
        the Lorentzians. Be sure to include a starting parameter for the
        optimizer in `starting_pars`!

    max_post : bool, optional, default False
        If True, perform a Maximum-A-Posteriori fit of the data rather than a
        Maximum Likelihood fit. Note that this requires priors to be specified,
        otherwise this will cause an exception!

    priors : {dict | None}, optional, default None
        Dictionary with priors for the MAP fit. This should be of the form
        {"parameter name": probability distribution, ...}

    fitmethod : string, optional, default "L-BFGS-B"
        Specifies an optimization algorithm to use. Supply any valid option for
        `scipy.optimize.minimize`.

    Returns
    -------
    parest : PSDParEst object
        A PSDParEst object for further analysis

    res : OptimizationResults object
        The OptimizationResults object storing useful results and quantities
        relating to the fit

    Example
    -------

    We start by making an example power spectrum with three Lorentzians
    >>> np.random.seed(400)
    >>> nlor = 3

    >>> x_0_0 = 0.5
    >>> x_0_1 = 2.0
    >>> x_0_2 = 7.5

    >>> amplitude_0 = 150.0
    >>> amplitude_1 = 50.0
    >>> amplitude_2 = 15.0

    >>> fwhm_0 = 0.1
    >>> fwhm_1 = 1.0
    >>> fwhm_2 = 0.5

    We will also include a white noise level:
    >>> whitenoise = 2.0

    >>> model = models.Lorentz1D(amplitude_0, x_0_0, fwhm_0) + \\
    ...         models.Lorentz1D(amplitude_1, x_0_1, fwhm_1) + \\
    ...         models.Lorentz1D(amplitude_2, x_0_2, fwhm_2) + \\
    ...         models.Const1D(whitenoise)

    >>> freq = np.linspace(0.01, 10.0, 10.0/0.01)
    >>> p = model(freq)
    >>> noise = np.random.exponential(size=len(freq))

    >>> power = p*noise
    >>> ps = Powerspectrum()
    >>> ps.freq = freq
    >>> ps.power = power
    >>> ps.df = ps.freq[1] - ps.freq[0]
    >>> ps.m = 1

    Now we have to guess starting parameters. For each Lorentzian, we have
    amplitude, centroid position and fwhm, and this pattern repeats for each
    Lorentzian in the fit. The white noise level is the last parameter.
    >>> t0 = [150, 0.4, 0.2, 50, 2.3, 0.6, 20, 8.0, 0.4, 2.1]

    We're ready for doing the fit:
    >>> parest, res = fit_lorentzians(ps, nlor, t0)

    `res` contains a whole array of useful information about the fit, for
    example the parameters at the optimum:
    >>> p_opt = res.p_opt

    """

    model = models.Lorentz1D()

    if nlor > 1:
        for i in range(nlor - 1):
            model += models.Lorentz1D()

    if fit_whitenoise:
        model += models.Const1D()

    return fit_powerspectrum(ps,
                             model,
                             starting_pars,
                             max_post=max_post,
                             priors=priors,
                             fitmethod=fitmethod)
Esempio n. 18
0
def test_single_peak_estimate():
    """
    Single Peak fit.
    """

    # Create the spectrum
    x_single, y_single = single_peak()
    s_single = Spectrum1D(flux=y_single*u.Jy, spectral_axis=x_single*u.um)

    #
    # Estimate parameter Gaussian1D
    # we give the true values for the Gaussian because it actually *should*
    # be pretty close to the true values, because it's a Gaussian...
    #

    g_init = estimate_line_parameters(s_single, models.Gaussian1D())


    assert np.isclose(g_init.amplitude.value, 3., rtol=.2)
    assert np.isclose(g_init.mean.value, 6.3, rtol=.1)
    assert np.isclose(g_init.stddev.value, 0.8, rtol=.3)

    assert g_init.amplitude.unit == u.Jy
    assert g_init.mean.unit == u.um
    assert g_init.stddev.unit == u.um

    #
    # Estimate parameter Lorentz1D
    # unlike the Gaussian1D here we do hand-picked comparison values, because
    # the "single peak" is a Gaussian and therefore the Lorentzian fit shouldn't
    # be quite right anyway
    #

    g_init = estimate_line_parameters(s_single, models.Lorentz1D())

    assert np.isclose(g_init.amplitude.value, 3.354169257846847)
    assert np.isclose(g_init.x_0.value, 6.218588636687762)
    assert np.isclose(g_init.fwhm.value, 1.6339001193853715)

    assert g_init.amplitude.unit == u.Jy
    assert g_init.x_0.unit == u.um
    assert g_init.fwhm.unit == u.um

    #
    # Estimate parameter Voigt1D
    #

    g_init = estimate_line_parameters(s_single, models.Voigt1D())

    assert np.isclose(g_init.amplitude_L.value, 3.354169257846847)
    assert np.isclose(g_init.x_0.value, 6.218588636687762)
    assert np.isclose(g_init.fwhm_L.value, 1.1553418541989058)
    assert np.isclose(g_init.fwhm_G.value, 1.1553418541989058)

    assert g_init.amplitude_L.unit == u.Jy
    assert g_init.x_0.unit == u.um
    assert g_init.fwhm_L.unit == u.um
    assert g_init.fwhm_G.unit == u.um


    #
    # Estimate parameter RickerWavelet1D
    #
    mh = models.RickerWavelet1D
    estimators = {
        'amplitude': lambda s: max(s.flux),
        'x_0': lambda s: centroid(s, region=None),
        'sigma': lambda s: fwhm(s)
    }
    #mh._constraints['parameter_estimator'] = estimators
    mh.amplitude.estimator = lambda s: max(s.flux)
    mh.x_0.estimator = lambda s: centroid(s, region=None)
    mh.sigma.estimator = lambda s: fwhm(s)

    g_init = estimate_line_parameters(s_single, mh)

    assert np.isclose(g_init.amplitude.value, 3.354169257846847)
    assert np.isclose(g_init.x_0.value, 6.218588636687762)
    assert np.isclose(g_init.sigma.value, 1.6339001193853715)

    assert g_init.amplitude.unit == u.Jy
    assert g_init.x_0.unit == u.um
    assert g_init.sigma.unit == u.um
Esempio n. 19
0
    def _fit_model_to_fluxOLD(self,
                              trace_names,
                              application_data,
                              fitting_models,
                              selected_data,
                              custom_model=None,
                              custom_fitter=None,
                              do_update_client=True,
                              include_fit_substracted_trace=False):
        # http://learn.astropy.org/rst-tutorials/User-Defined-Model.html
        # https://docs.astropy.org/en/stable/modeling/new-model.html
        # https://docs.astropy.org/en/stable/modeling/index.html
        # https://docs.astropy.org/en/stable/modeling/reference_api.html

        curve_mapping = {
            name: ind
            for ind, name in enumerate(application_data['traces'])
        }

        for fitting_model in fitting_models:
            for trace_name in trace_names:
                #ind = trace_indexes[trace_name]

                trace = application_data['traces'].get(trace_name)
                curve_number = curve_mapping[trace_name]

                x = np.asarray([
                    point['x'] for point in selected_data["points"]
                    if point['curveNumber'] == curve_number
                ])
                y = np.asarray([
                    point['y'] for point in selected_data["points"]
                    if point['curveNumber'] == curve_number
                ])
                ind = [
                    point['pointIndex'] for point in selected_data["points"]
                    if point['curveNumber'] == curve_number
                ]

                y_err = np.asarray(
                    trace["flux_error"]
                )[ind] if trace["flux_error"] is not None or len(
                    trace["flux_error"]) > 0 else None

                min_x, max_x = np.min(x), np.max(x)

                if custom_model is None and custom_fitter is None:

                    location_param = np.mean(x)
                    amplitude_param = np.max(np.abs(y))
                    spread_param = (max_x - min_x) / len(x)

                    if fitting_model == FittingModels.GAUSSIAN_PLUS_LINEAR:
                        data_model = models.Gaussian1D(
                            amplitude=amplitude_param,
                            mean=location_param,
                            stddev=spread_param) + models.Polynomial1D(
                                degree=1)
                    elif fitting_model == FittingModels.LORENTZIAN_PLUS_LINEAR:
                        data_model = models.Lorentz1D(
                            amplitude=amplitude_param,
                            x_0=location_param,
                            fwhm=spread_param) + models.Polynomial1D(degree=1)
                    elif fitting_model == FittingModels.VOIGT_PLUS_LINEAR:
                        data_model = models.Voigt1D(
                            x_0=location_param,
                            amplitude_L=amplitude_param,
                            fwhm_L=spread_param,
                            fwhm_G=spread_param) + models.Polynomial1D(
                                degree=1)
                    else:
                        raise Exception("Unsupported fitting model " +
                                        str(fitting_model))

                    fitting_model_name = fitting_model
                    fitter = fitting.LevMarLSQFitter()
                    fitted_model = fitter(data_model, x, y, weights=1. / y_err)

                else:
                    fitting_model_name = str(custom_model)
                    fitter = custom_fitter
                    fitted_model = fitter(custom_model,
                                          x,
                                          y,
                                          weights=1. / y_err)

                x_grid = np.linspace(min_x, max_x, 5 * len(x))
                y_grid = fitted_model(x_grid)

                parameter_errors = np.sqrt(
                    np.diag(fitter.fit_info['param_cov'])
                ) if fitter.fit_info['param_cov'] is not None else None

                fitted_trace_name = "fit" + str(
                    len(application_data['fitted_models']) +
                    1) + "_" + trace_name
                ancestors = trace['ancestors'] + [trace_name]
                flambda = [f for f in np.asarray(trace['flambda'])[ind]]
                fitted_trace = Trace(name=fitted_trace_name,
                                     wavelength=[x for x in x_grid],
                                     flux=[y for y in y_grid],
                                     ancestors=ancestors,
                                     spectrum_type=SpectrumType.FIT,
                                     color="black",
                                     linewidth=1,
                                     alpha=1.0,
                                     wavelength_unit=trace['wavelength_unit'],
                                     flux_unit=trace['flux_unit'],
                                     flambda=flambda,
                                     catalog=trace['catalog']).to_dict()

                self._set_color_for_new_trace(fitted_trace, application_data)
                self._add_trace_to_data(application_data, fitted_trace_name,
                                        fitted_trace, False)

                if include_fit_substracted_trace:
                    fitted_trace_name = "fit_substr_" + str(
                        len(application_data['fitted_models']) +
                        1) + "_" + trace_name
                    ancestors = trace['ancestors'] + [trace_name]

                    y_grid2 = fitted_model(x)
                    flux = y - y_grid2

                    f_labmda = fl.convert_flux(
                        flux=x,
                        wavelength=y,
                        from_flux_unit=trace['flux_unit'],
                        to_flux_unit=FluxUnit.F_lambda,
                        to_wavelength_unit=trace.get('flux_unit'))

                    fitted_trace = Trace(
                        name=fitted_trace_name,
                        wavelength=[x for x in x],
                        flux=[y for y in flux],
                        ancestors=ancestors,
                        spectrum_type=SpectrumType.FIT,
                        color="black",
                        linewidth=1,
                        alpha=1.0,
                        wavelength_unit=trace['wavelength_unit'],
                        flux_unit=trace['flux_unit'],
                        flambda=f_labmda,
                        catalog=trace['catalog']).to_dict()

                    self._set_color_for_new_trace(fitted_trace,
                                                  application_data)
                    self._add_trace_to_data(application_data,
                                            fitted_trace_name,
                                            fitted_trace,
                                            do_update_client=False)

                fitted_info = {}
                fitted_info['name'] = fitted_trace_name
                fitted_info['ancestors'] = ancestors
                fitted_info['model'] = fitting_model_name
                fitted_info['parameters'] = {
                    x: y
                    for (x, y) in zip(fitted_model.param_names,
                                      fitted_model.parameters)
                }
                fitted_info['parameter_errors'] = {
                    x: y
                    for (x,
                         y) in zip(fitted_model.param_names, parameter_errors)
                } if parameter_errors is not None else None
                fitted_info['selection_indexes'] = ind
                fitted_info['wavelength_unit'] = trace['wavelength_unit']
                fitted_info['flux_unit'] = trace['flux_unit']

                # add to application data:
                fitted_models = application_data['fitted_models']
                fitted_models[fitted_trace_name] = fitted_info
                application_data['fitted_models'] = fitted_models

                application_data['traces'][fitted_trace_name] = fitted_trace
                application_data['fitted_models'][
                    fitted_trace_name] = fitted_info
                self.write_info(
                    "fitting model2122 : " +
                    str(application_data['fitted_models'][fitted_trace_name]))

        if do_update_client:
            self.update_client()
            values.append(value)

    a = np.asarray(values)

    #a  = np.loadtxt(batsman, skiprows=6).flatten()
    #a /= np.max(np.abs(a),axis=0)
    #a *= (39.0/a.max())
    #a = a.astype(int)

    current_histogram = np.histogram(a, bins=range(255))

    bin_heights, bin_borders = np.histogram(a, bins=range(255))
    bin_widths = np.diff(bin_borders)
    bin_centers = bin_borders[:-1] + bin_widths / 2

    t_init = models.Lorentz1D()
    fit_t = fitting.LevMarLSQFitter()
    t = fit_t(t_init, bin_centers, bin_heights)

    x_interval_for_fit = np.linspace(bin_borders[0], bin_borders[-1], 20000)
    plt.figure()
    plt.bar(bin_centers, bin_heights, width=bin_widths, label='histogram')
    plt.plot(x_interval_for_fit,
             t(x_interval_for_fit),
             label='fit',
             c='red',
             linestyle='--')
    plt.xlabel('Pixel Value')
    plt.ylabel('Count')
    plt.legend()
Esempio n. 21
0
'''some BrenDan math'''
v_x = optim_gx[2] * np.sqrt(-2 * np.log(hmaxx / optim_gx[0])) + optim_gx[1]
v_y = optim_gy[2] * np.sqrt(-2 * np.log(hmaxy / optim_gy[0])) + optim_gy[1]
'''half-width half maxes'''
hwhmx = abs(max_locx - v_x)
hwhmy = abs(max_locy - v_y)
'''fit the lorentzian with SciPy'''
optim_lx, covar_lx = curve_fit(lorentz, xra, sum_x, p0=[1, max_locx, hwhmx])
optim_ly, covar_ly = curve_fit(lorentz, yra, sum_y, p0=[1, max_locy, hwhmy])
'''FWHM and HWHM my way'''
gwx = FWHM(sum_x, optim_gx, gauss)
dif_gx = gwx[1] - gwx[0]
gwy = FWHM(sum_y, optim_gy, gauss)
dif_gy = gwy[1] - gwy[0]

lx_init = models.Lorentz1D(amplitude=120, x_0=max_locx, fwhm=dif_gx)
fit_lx = fitting.LevMarLSQFitter()
lx = fit_lx(lx_init, xra, sum_x)

ly_init = models.Lorentz1D(amplitude=120, x_0=max_locy, fwhm=dif_gy)
fit_ly = fitting.LevMarLSQFitter()
ly = fit_ly(ly_init, yra, sum_y)

print('=' * 50)
print('Lorentzian')
print('=' * 50)

plt.figure()
plt.plot(xra, sum_x, 'r*', label='data')
plt.plot(xra, lorentz(xra, *optim_lx), 'b-', label='scipy.curve_fit')
plt.plot(xra, lx(xra), 'g--', label='astropy.fitting')
bin_centers_3 = 0.5 * (edges_3[:-1] + edges_3[1:])

Mbin_centers_0 = 0.5 * (Aedges_0[:-1] + Aedges_0[1:])
Mbin_centers_1 = 0.5 * (Aedges_1[:-1] + Aedges_1[1:])
Mbin_centers_2 = 0.5 * (Aedges_2[:-1] + Aedges_2[1:])
Mbin_centers_3 = 0.5 * (Aedges_3[:-1] + Aedges_3[1:])

# Axis 1

ax1.hist(NewFinalFIRST, bins=36, normed=True, histtype='step', linewidth=0.2)
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, bin_centers_0, nentries_0)
ax1.plot(bin_centers_0, g(bin_centers_0), label='Gaussian', linewidth=0.2)

l_init = models.Lorentz1D(amplitude=0.75, x_0=0.001, fwhm=1000)
fit_l = fitting.LevMarLSQFitter()
l = fit_l(l_init, bin_centers_0, nentries_0)
ax1.plot(bin_centers_0, l(bin_centers_0), label='Lorentzian', linewidth=0.2)
#ax1.errorbar(Mbin_centers_0, Mentries_0, yerr = Ayerr_0, elinewidth = 0.2, fmt = 'r.',markersize = 0.1, capsize = 1)
ax1.text(x=-1650, y=0.0025, s='3D Host-Satellite Distance:', fontsize=3.7)
ax1.text(x=-1680,
         y=0.00235,
         s='0.0014'
         r'$\leq$'
         'Distance [kpc]'
         r'$\leq$'
         '0.3816',
         fontsize=3.2)
ax1.text(x=1150, y=0.00229, s='Plot #1', fontsize=5.5)
#ax1.text(x = 900, y = 0.00079, s = '7221 Satellites', fontsize = 3.7)
Esempio n. 23
0
 astmodels.Box2D(amplitude=10., x_0=0.5, x_width=5., y_0=1.5, y_width=7.),
 astmodels.Const1D(amplitude=5.),
 astmodels.Const2D(amplitude=5.),
 astmodels.Disk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5.),
 astmodels.Ellipse2D(amplitude=10., x_0=0.5, y_0=1.5, a=2., b=4.,
                     theta=0.1),
 astmodels.Exponential1D(amplitude=10., tau=3.5),
 astmodels.Gaussian1D(amplitude=10., mean=5., stddev=3.),
 astmodels.Gaussian2D(amplitude=10.,
                      x_mean=5.,
                      y_mean=5.,
                      x_stddev=3.,
                      y_stddev=3.),
 astmodels.KingProjectedAnalytic1D(amplitude=10., r_core=5., r_tide=2.),
 astmodels.Logarithmic1D(amplitude=10., tau=3.5),
 astmodels.Lorentz1D(amplitude=10., x_0=0.5, fwhm=2.5),
 astmodels.Moffat1D(amplitude=10., x_0=0.5, gamma=1.2, alpha=2.5),
 astmodels.Moffat2D(amplitude=10., x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
 astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
 astmodels.RedshiftScaleFactor(z=2.5),
 astmodels.RickerWavelet1D(amplitude=10., x_0=0.5, sigma=1.2),
 astmodels.RickerWavelet2D(amplitude=10., x_0=0.5, y_0=1.5, sigma=1.2),
 astmodels.Ring2D(amplitude=10., x_0=0.5, y_0=1.5, r_in=5., width=10.),
 astmodels.Sersic1D(amplitude=10., r_eff=1., n=4.),
 astmodels.Sersic2D(amplitude=10.,
                    r_eff=1.,
                    n=4.,
                    x_0=0.5,
                    y_0=1.5,
                    ellip=0.0,
                    theta=0.0),
Esempio n. 24
0
print('Search for the best frequency')

# We will search for pulsations over a range of frequencies around the known pulsation period.

df = (period_ranges[1] - period_ranges[0]) / period_bins
frequencies = 1 / np.arange(period_ranges[0], period_ranges[1], df)

freq, efstat = epoch_folding_search(times, frequencies, nbin=nbin)
pulse_frequency = freq[np.where(efstat == max(efstat))[0][0]]

print('pulse frequency', pulse_frequency)
_ = write_files('pulse_frequency_NuSTAR', pulse_frequency)

#fitting epoch folding distribution with Lorentzian curve
g_init = models.Lorentz1D(
    amplitude=max(efstat) - min(efstat),
    x_0=pulse_frequency,
    fwhm=pulse_frequency / 500) + models.Const1D(amplitude=min(efstat))
fit_g = fitting.LevMarLSQFitter()
bin_max = [np.where(efstat == max(efstat))[0][0]][0]
bin_left_min = 0
bin_right_min = len(efstat)

for j in range(min(bin_max, len(efstat) - bin_max) - 2):
    d_efstat = efstat[bin_max - j - 1] - efstat[bin_max - j]
    if d_efstat > 0 and max(efstat) - efstat[bin_max - j] > (max(efstat) -
                                                             min(efstat)) / 2:
        bin_left_min = bin_max - j
        break
for j in range(min(bin_max, len(efstat) - bin_max) - 2):
    d_efstat = efstat[bin_max + j + 1] - efstat[bin_max + j]
    if d_efstat > 0 and max(efstat) - efstat[bin_max + j] > (max(efstat) -
Esempio n. 25
0
        print('Sigma Clipping Complete')

Mean_Poly_Fit = np.ma.polyfit(Mean_Range, Mean_Data_Clipped, Polydegree)
Mean_Range_Fit = np.linspace(0, Image_Data_All2.shape[1] - 1,
                             Image_Data_All2.shape[1])
Mean_Data_Fit = Mean_Poly_Fit[0] * Mean_Range_Fit**2 + Mean_Poly_Fit[
    1] * Mean_Range_Fit**1 + Mean_Poly_Fit[2]

plt.plot(Mean_Data_2)
plt.plot(Mean_Data_Fit)
plt.show()

##Normal Lorentz Model Fitting

Fit_Data_4 = []
Lorentz_Model = models.Lorentz1D(amplitude=1000, x_0=0, fwhm=1)
Lorentz_Model.x_0.fixed = False

#for i in range(0, Image_Data_All2.shape[1]):
#    Fit_Data_4.append(Fitting_Model(Lorentz_Model, x, Image_Data_All2[:,i]))

for i in range(0, Image_Data_All2.shape[1]):
    if Fit_Data_4:  # true if not an empty list
        #Amp_Data_4[1023] = Amp_Data_4[0]
        Lorentz_Model = models.Lorentz1D(
            amplitude=Amp_Data_2[-1],
            #x_0 = Mean_Data_4[i],
            x_0=Mean_Data_Fit[i],
            fwhm=1)
        Lorentz_Model.x_0.fixed = True
    Fit_Data_4.append(Fitting_Model(Lorentz_Model, x, Image_Data_All2[:, i]))
Esempio n. 26
0
x3 = np.empty((1024,3))

for n in range(1024):
    x3[n,:] = Fit_Data_2[n].parameters

Amp_Data_2 = x3[:,0]
Mean_Data_2 = x3[:,1]
Stdv_Data_2 = x3[:,2]


#for i in range(0, Image_Data_All2.shape[1]):
    #Fit_Data_4.append(Fitting_Model(Moffat_Model, x, Image_Data_All2[:,i]))

Moffat_Model = models.Moffat1D(amplitude = 1000, x_0 = 0, gamma = 1, alpha = 2)
Fit_Data_4 = []
Lorentz_Model = models.Lorentz1D(amplitude = 1000, x_0 = 0, fwhm = 1)
Lorentz_Model.x_0.fixed = False

for i in range(0, Image_Data_All2.shape[1]):
    Fit_Data_4.append(Fitting_Model(Lorentz_Model, x, Image_Data_All2[:,i]))

x5 = np.empty((1024,3))
for n in range(1024):
    x5[n,:] = Fit_Data_4[n].parameters

Amp_Data_4 = x5[:,0]
Mean_Data_4 = x5[:,1]
FWHM_Data_4 = x5[:,2]


plt.plot(x, Image_Data_All2[:,0])
Esempio n. 27
0
def test_single_peak_estimate():
    """
    Single Peak fit.
    """

    # Create the spectrum
    x_single, y_single = single_peak()
    s_single = Spectrum1D(flux=y_single*u.Jy, spectral_axis=x_single*u.um)

    #
    # Estimate parameter Gaussian1D
    #

    g_init = estimate_line_parameters(s_single, models.Gaussian1D())

    assert np.isclose(g_init.amplitude.value, 3.354169257846847)
    assert np.isclose(g_init.mean.value, 6.218588636687762)
    assert np.isclose(g_init.stddev.value, 1.6339001193853715)

    assert g_init.amplitude.unit == u.Jy
    assert g_init.mean.unit == u.um
    assert g_init.stddev.unit == u.um

    #
    # Estimate parameter Lorentz1D
    #

    g_init = estimate_line_parameters(s_single, models.Lorentz1D())

    assert np.isclose(g_init.amplitude.value, 3.354169257846847)
    assert np.isclose(g_init.x_0.value, 6.218588636687762)
    assert np.isclose(g_init.fwhm.value, 1.6339001193853715)

    assert g_init.amplitude.unit == u.Jy
    assert g_init.x_0.unit == u.um
    assert g_init.fwhm.unit == u.um

    #
    # Estimate parameter Voigt1D
    #

    g_init = estimate_line_parameters(s_single, models.Voigt1D())

    assert np.isclose(g_init.amplitude_L.value, 3.354169257846847)
    assert np.isclose(g_init.x_0.value, 6.218588636687762)
    assert np.isclose(g_init.fwhm_L.value, 1.1553418541989058)
    assert np.isclose(g_init.fwhm_G.value, 1.1553418541989058)

    assert g_init.amplitude_L.unit == u.Jy
    assert g_init.x_0.unit == u.um
    assert g_init.fwhm_L.unit == u.um
    assert g_init.fwhm_G.unit == u.um


    #
    # Estimate parameter MexicanHat1D
    #
    mh = models.MexicanHat1D()
    estimators = {
        'amplitude': lambda s: max(s.flux),
        'x_0': lambda s: centroid(s, region=None),
        'stddev': lambda s: fwhm(s)
    }
    mh._constraints['parameter_estimator'] = estimators

    g_init = estimate_line_parameters(s_single, mh)

    assert np.isclose(g_init.amplitude.value, 3.354169257846847)
    assert np.isclose(g_init.x_0.value, 6.218588636687762)
    assert np.isclose(g_init.stddev.value, 1.6339001193853715)

    assert g_init.amplitude.unit == u.Jy
    assert g_init.x_0.unit == u.um
    assert g_init.stddev.unit == u.um
Esempio n. 28
0
    def setup_class(cls):

        m = 1
        nfreq = 100
        freq = np.linspace(0, 10.0, nfreq + 1)[1:]


        rng = np.random.RandomState(100)  # set the seed for the random number generator
        noise = rng.exponential(size=nfreq)

        cls.model = models.Lorentz1D() + models.Const1D()

        cls.x_0_0 = 2.0
        cls.fwhm_0 = 0.05
        cls.amplitude_0 = 1000.0

        cls.amplitude_1 = 2.0
        cls.model.x_0_0 = cls.x_0_0
        cls.model.fwhm_0 = cls.fwhm_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)

        np.random.seed(400)
        power = noise*p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1]-freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_x_0_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_fwhm_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 0.5).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)

        cls.priors = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "x_0_0": p_x_0_0,
                      "fwhm_0": p_fwhm_0}

        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                 cls.model, m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "powell"
        cls.max_post = True
        cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
        cls.neg = True
Esempio n. 29
0
    def get_model_with_fitter(model_type, x, y):
        min_x, max_x = np.min(x), np.max(x)
        location_param = np.mean(x)
        amplitude_param = np.max(np.abs(y))
        spread_param = (max_x - min_x) / len(x)

        if model_type == FittingModels.GAUSSIAN_PLUS_LINEAR:
            model = models.Gaussian1D(
                amplitude=amplitude_param,
                mean=location_param,
                stddev=spread_param) + models.Polynomial1D(degree=1)
            fitter = fitting.LevMarLSQFitter()

        elif model_type == FittingModels.LORENTZIAN_PLUS_LINEAR:
            model = models.Lorentz1D(
                amplitude=amplitude_param,
                x_0=location_param,
                fwhm=spread_param) + models.Polynomial1D(degree=1)
            fitter = fitting.LevMarLSQFitter()

        elif model_type == FittingModels.VOIGT_PLUS_LINEAR:
            model = models.Voigt1D(
                x_0=location_param,
                amplitude_L=amplitude_param,
                fwhm_L=spread_param,
                fwhm_G=spread_param) + models.Polynomial1D(degree=1)
            fitter = fitting.LevMarLSQFitter()

        elif model_type == FittingModels.GAUSSIAN:
            model = models.Gaussian1D(amplitude=amplitude_param,
                                      mean=location_param,
                                      stddev=spread_param)
            fitter = fitting.LevMarLSQFitter()

        elif model_type == FittingModels.LORENTZIAN:
            model = models.Lorentz1D(amplitude=amplitude_param,
                                     x_0=location_param,
                                     fwhm=spread_param)
            fitter = fitting.LevMarLSQFitter()

        elif model_type == FittingModels.VOIGT:
            model = models.Voigt1D(x_0=location_param,
                                   amplitude_L=amplitude_param,
                                   fwhm_L=spread_param,
                                   fwhm_G=spread_param)
            fitter = fitting.LevMarLSQFitter()

        elif model_type == FittingModels.CHEBYSHEV_3:
            model = models.Chebyshev1D(degree=3)
            fitter = fitting.LinearLSQFitter()

        elif model_type == FittingModels.POLYNOMIAL_1:
            model = models.Polynomial1D(degree=1)
            fitter = fitting.LinearLSQFitter()

        elif model_type == FittingModels.POLYNOMIAL_2:
            model = models.Polynomial1D(degree=2)
            fitter = fitting.LinearLSQFitter()

        elif model_type == FittingModels.POLYNOMIAL_3:
            model = models.Polynomial1D(degree=3)
            fitter = fitting.LinearLSQFitter()

        else:
            raise Exception("Model " + str(model_type) +
                            " not in default models list.")

        return model, fitter