Exemple #1
0
 def test_bounds_gauss2d_slsqp(self):
     X, Y = np.meshgrid(np.arange(11), np.arange(11))
     bounds = {
         "x_mean": [0., 11.],
         "y_mean": [0., 11.],
         "x_stddev": [1., 4],
         "y_stddev": [1., 4]
     }
     gauss = models.Gaussian2D(amplitude=10.,
                               x_mean=5.,
                               y_mean=5.,
                               x_stddev=4.,
                               y_stddev=4.,
                               theta=0.5,
                               bounds=bounds)
     gauss_fit = fitting.SLSQPLSQFitter()
     # Warning does not appear in all the CI jobs.
     # TODO: Rewrite the test for more consistent warning behavior.
     with warnings.catch_warnings():
         warnings.filterwarnings('ignore',
                                 message=r'.*The fit may be unsuccessful.*',
                                 category=AstropyUserWarning)
         model = gauss_fit(gauss, X, Y, self.data)
     x_mean = model.x_mean.value
     y_mean = model.y_mean.value
     x_stddev = model.x_stddev.value
     y_stddev = model.y_stddev.value
     assert x_mean + 10**-5 >= bounds['x_mean'][0]
     assert x_mean - 10**-5 <= bounds['x_mean'][1]
     assert y_mean + 10**-5 >= bounds['y_mean'][0]
     assert y_mean - 10**-5 <= bounds['y_mean'][1]
     assert x_stddev + 10**-5 >= bounds['x_stddev'][0]
     assert x_stddev - 10**-5 <= bounds['x_stddev'][1]
     assert y_stddev + 10**-5 >= bounds['y_stddev'][0]
     assert y_stddev - 10**-5 <= bounds['y_stddev'][1]
 def test_bounds_gauss2d_slsqp(self):
     X, Y = np.meshgrid(np.arange(11), np.arange(11))
     bounds = {
         "x_mean": [0., 11.],
         "y_mean": [0., 11.],
         "x_stddev": [1., 4],
         "y_stddev": [1., 4]
     }
     gauss = models.Gaussian2D(amplitude=10.,
                               x_mean=5.,
                               y_mean=5.,
                               x_stddev=4.,
                               y_stddev=4.,
                               theta=0.5,
                               bounds=bounds)
     gauss_fit = fitting.SLSQPLSQFitter()
     # Warning does not appear in all the CI jobs.
     # TODO: Rewrite the test for more consistent warning behavior.
     with pytest.warns(None) as warning_lines:
         model = gauss_fit(gauss, X, Y, self.data)
         x_mean = model.x_mean.value
         y_mean = model.y_mean.value
         x_stddev = model.x_stddev.value
         y_stddev = model.y_stddev.value
         assert x_mean + 10**-5 >= bounds['x_mean'][0]
         assert x_mean - 10**-5 <= bounds['x_mean'][1]
         assert y_mean + 10**-5 >= bounds['y_mean'][0]
         assert y_mean - 10**-5 <= bounds['y_mean'][1]
         assert x_stddev + 10**-5 >= bounds['x_stddev'][0]
         assert x_stddev - 10**-5 <= bounds['x_stddev'][1]
         assert y_stddev + 10**-5 >= bounds['y_stddev'][0]
         assert y_stddev - 10**-5 <= bounds['y_stddev'][1]
     for w in warning_lines:
         assert issubclass(w.category, AstropyUserWarning)
         assert 'The fit may be unsuccessful' in str(w.message)
Exemple #3
0
    def determine_gaussian_fit_models(self, gaussians, spectrum):
        fit_values = None
        optimizers.DEFAULT_MAXITER = 1000
        channels = np.arange(self.n_channels)

        # To fit the data create a new superposition with initial
        # guesses for the parameters:
        if len(gaussians) > 0:
            gg_init = gaussians[0]

            if len(gaussians) > 1:
                for i in range(1, len(gaussians)):
                    gg_init += gaussians[i]

            fitter = fitting.SLSQPLSQFitter()
            try:
                gg_fit = fitter(gg_init, channels, spectrum, disp=False)
            except TypeError:
                gg_fit = fitter(gg_init, channels, spectrum, verblevel=False)

            fit_values = []
            if len(gg_fit.param_sets) > 3:
                for i in range(len(gg_fit.submodel_names)):
                    fit_values.append([
                        gg_fit[i].amplitude.value, gg_fit[i].mean.value,
                        abs(gg_fit[i].stddev.value)
                    ])
            else:
                fit_values.append([
                    gg_fit.amplitude.value, gg_fit.mean.value,
                    abs(gg_fit.stddev.value)
                ])
        return fit_values
Exemple #4
0
 def test_bounds_gauss2d_slsqp(self):
     X, Y = np.meshgrid(np.arange(11), np.arange(11))
     bounds = {
         "x_mean": [0., 11.],
         "y_mean": [0., 11.],
         "x_stddev": [1., 4],
         "y_stddev": [1., 4]
     }
     gauss = models.Gaussian2D(amplitude=10.,
                               x_mean=5.,
                               y_mean=5.,
                               x_stddev=4.,
                               y_stddev=4.,
                               theta=0.5,
                               bounds=bounds)
     gauss_fit = fitting.SLSQPLSQFitter()
     with ignore_non_integer_warning():
         model = gauss_fit(gauss, X, Y, self.data)
     x_mean = model.x_mean.value
     y_mean = model.y_mean.value
     x_stddev = model.x_stddev.value
     y_stddev = model.y_stddev.value
     assert x_mean + 10**-5 >= bounds['x_mean'][0]
     assert x_mean - 10**-5 <= bounds['x_mean'][1]
     assert y_mean + 10**-5 >= bounds['y_mean'][0]
     assert y_mean - 10**-5 <= bounds['y_mean'][1]
     assert x_stddev + 10**-5 >= bounds['x_stddev'][0]
     assert x_stddev - 10**-5 <= bounds['x_stddev'][1]
     assert y_stddev + 10**-5 >= bounds['y_stddev'][0]
     assert y_stddev - 10**-5 <= bounds['y_stddev'][1]
Exemple #5
0
 def _gaussian_fit(self, a, k):
     from astropy.modeling import fitting, models
     fitter = fitting.SLSQPLSQFitter()
     gaus = models.Gaussian1D(amplitude=1., mean=a, stddev=5.)
     # print(gaus)
     # print(a, k)
     y1 = a - 25
     y2 = a + 25
     y = np.arange(y1, y2)
     try:
         gfit = fitter(gaus, y, self.hrs.data[y, self.step * (k + 1)] / self.hrs.data[y, self.step * (k + 1)].max(), verblevel=0)
     except IndexError:
         return
     return gfit
Exemple #6
0
def gaussian_fit(RF):

    from astropy.modeling import models, fitting

    # RF = np.pad(RF, pad_width=((5,0), (0, 0)), mode='constant')
    bound = 10
    x = np.linspace(-bound, bound, RF.shape[0])
    y = np.linspace(-bound, bound, RF.shape[1])
    Y, X = np.meshgrid(x, y)

    g_init = models.Gaussian2D()
    fitter = fitting.SLSQPLSQFitter()
    g = fitter(g_init, X, Y, RF.T, verblevel=0)

    return g(X, Y).T
Exemple #7
0
    def evaluate(self):
        relaxation_rate = 0.01  # Some initial guess
        model = ScatteringModel(self.beta.value,
                                self.baseline.value,
                                relaxation_rate=relaxation_rate)
        fitter = fitting.SLSQPLSQFitter()
        threshold = min(
            len(self.lag_steps.value),
            np.argmax(self.g2.value < self.correlation_threshold.value))

        fit = fitter(model, self.lag_steps.value[:threshold],
                     self.g2.value[:threshold])

        self.relaxation_rate.value = fit.relaxation_rate.value
        self.fit_curve.value = fit(self.lag_steps.value)
Exemple #8
0
    def create_fits(self):
        fit_p = fitting.SLSQPLSQFitter()
        maxpix = self.imagearray[self.spotloc[1], self.spotloc[0]]
        singl_gaus_mod = models.Gaussian2D(amplitude=1,
                                           x_mean=0,
                                           y_mean=0,
                                           bounds={
                                               'amplitude': (0.9, 1),
                                               'theta': (0, 0)
                                           })

        doubl_gaus_mod = doubl_gaus(amplitude=0.8,
                                    x_mean=0,
                                    y_mean=0,
                                    sigma_x1=1,
                                    sigma_y1=1,
                                    sigma_x2=20,
                                    sigma_y2=20,
                                    bounds={
                                        'amplitude': (0.7, 1),
                                        'sigma_x1': (1E-9, 100),
                                        'sigma_y1': (1E-9, 100),
                                        'sigma_x2': (1, 100),
                                        'sigma_y2': (1, 100),
                                        'theta': (0, 0)
                                    })

        xx = np.arange(0, len(self.cropspot[0]))
        yy = np.arange(0, len(self.cropspot))

        xx = np.true_divide(xx - self.cropspotcentre[0],
                            self.pixeldimensions[0])
        yy = np.true_divide(yy - self.cropspotcentre[1],
                            self.pixeldimensions[1])

        x, y = np.meshgrid(xx, yy)

        normarray = np.true_divide(self.cropspot, maxpix)
        print(f'Fitting Single Gaussian for {self.energy}')
        p1 = fit_p(singl_gaus_mod, x, y, normarray, verblevel=0)
        print(f'Fitting Double Gaussian for {self.energy}')
        p2 = fit_p(doubl_gaus_mod, x, y, normarray, verblevel=0)

        self.normcropspot = normarray
        self.singlefit = p1
        self.singlefitarray = p1(x, y)
        self.doublefit = p2
        self.doublefitarray = p2(x, y)
Exemple #9
0
def fit_scattering_factor(
        g2: np.ndarray,
        tau: np.ndarray,
        beta: float = 1.0,
        baseline: float = 1.0,
        correlation_threshold: float = 1.5) -> Tuple[np.ndarray, float]:
    relaxation_rate = 0.01  # Some initial guess
    model = ScatteringModel(beta, baseline, relaxation_rate=relaxation_rate)
    fitting_algorithm = fitting.SLSQPLSQFitter()
    threshold = min(len(tau), np.argmax(g2 < correlation_threshold))

    fit = fitting_algorithm(model, tau[:threshold], g2[:threshold])

    relaxation_rate = fit.relaxation_rate
    fit_curve = fit(tau)

    return fit_curve, relaxation_rate
Exemple #10
0
    def test_bounds_slsqp(self):
        guess_slope = 1.1
        guess_intercept = 0.0
        bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
        line_model = models.Linear1D(guess_slope, guess_intercept,
                                     bounds=bounds)
        fitter = fitting.SLSQPLSQFitter()

        with ignore_non_integer_warning():
            model = fitter(line_model, self.x, self.y)

        slope = model.slope.value
        intercept = model.intercept.value
        assert slope + 10 ** -5 >= bounds['slope'][0]
        assert slope - 10 ** -5 <= bounds['slope'][1]
        assert intercept + 10 ** -5 >= bounds['intercept'][0]
        assert intercept - 10 ** -5 <= bounds['intercept'][1]
Exemple #11
0
    def test_bounds_slsqp(self):
        guess_slope = 1.1
        guess_intercept = 0.0
        bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
        line_model = models.Linear1D(guess_slope,
                                     guess_intercept,
                                     bounds=bounds)
        fitter = fitting.SLSQPLSQFitter()
        with pytest.warns(AstropyUserWarning,
                          match='consider using linear fitting methods'):
            model = fitter(line_model, self.x, self.y)

        slope = model.slope.value
        intercept = model.intercept.value
        assert slope + 10**-5 >= bounds['slope'][0]
        assert slope - 10**-5 <= bounds['slope'][1]
        assert intercept + 10**-5 >= bounds['intercept'][0]
        assert intercept - 10**-5 <= bounds['intercept'][1]
Exemple #12
0
def fit_scattering_factor(g2: np.ndarray,
                          tau: np.ndarray,
                          beta: float = 1.0,
                          baseline: float = 1.0,
                          relaxation_rate: float = 0.01,
                          correlation_threshold: float = 2) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    model = ScatteringModel(beta, baseline, relaxation_rate=relaxation_rate)
    fitting_algorithm = fitting.SLSQPLSQFitter()
    threshold = min(len(tau), np.argmax(g2 < correlation_threshold))

    if g2.ndim > 1:
        fits = [fitting_algorithm(model, tau[:threshold], g2[i][:threshold]) for i in range(len(g2))]
    else:
        fits = [fitting_algorithm(model, tau[:threshold], g2[:threshold])]

    relaxation_rates = np.asarray([fit.relaxation_rate.value for fit in fits]).squeeze()
    fit_curves = np.asarray([fit(tau) for fit in fits]).squeeze()

    return fit_curves, relaxation_rates, tau, g2
Exemple #13
0
class AstropyQSpectraFit(ProcessingPlugin):
    name = 'Q Fit (Astropy)'

    q = InOut(description='Q bin center positions',
              type=np.array)
    Iq = InOut(description='Q spectra bin intensities', type=np.array)
    model = Input(description='Fittable model class in the style of Astropy', type=Enum)
    domainmin = Input(description='Min bound on the domain of the input data', type=float)
    domainmax = Input(description='Max bound on the domain of the input data', type=float)
    fitter = Input(description='Fitting algorithm', default=fitting.LevMarLSQFitter(), type=Enum,
                   limits={'Linear LSQ': fitting.LinearLSQFitter(),
                           'Levenberg-Marquardt LSQ': fitting.LevMarLSQFitter(), 'SLSQP LSQ': fitting.SLSQPLSQFitter(),
                           'Simplex LSQ': fitting.SimplexLSQFitter()})

    fittedmodel = Output(description='A new model with the fitted parameters; behaves as parameterized function',
                         type=Fittable1DModel)
    fittedprofile = Output(
        description='The fitted profile from the evaluation of the resulting model over the input range.')

    hints = [PlotHint(q, Iq), PlotHint(q, fittedprofile)]

    modelvars = {}

    def __init__(self):
        super(AstropyQSpectraFit, self).__init__()
        self.model.limits = {plugin.name: plugin.plugin_object for plugin in
                             pluginmanager.getPluginsOfCategory('Fittable1DModelPlugin')}
        self.model.value = list(self.model.limits.values())[0]

    @property
    def parameter(self):

        # clear cache in
        for input in self.modelvars:
            del self.__dict__[input]
        varcache = self.modelvars.copy()
        self.modelvars = {}
        self._inputs = None
        self._inverted_vars = None
        if hasattr(self, '_inverted_vars'): del self._inverted_vars

        for name in self.model.value.param_names:
            param = getattr(self.model.value, name)
            # TODO: CHECK NAMESPACE
            if name in varcache:
                input = varcache[name]
            else:
                input = InOut(name=name, default=param.default, limits=param.bounds, type=float, fixed=False,
                              fixable=True)
            setattr(self, name, input)
            self.modelvars[name] = input
        parameter = super(AstropyQSpectraFit, self).parameter
        parameter.child('model').sigValueChanged.connect(self.reset_parameter)
        return parameter

    def reset_parameter(self):
        # cache old parameter
        oldparam = self._param

        # empty it
        for child in oldparam.children():
            child.remove()

        # reset attribute so new parameter is generated
        self._param = None

        # add new children to old parameter
        for child in self.parameter.children():  # type: Parameter
            oldparam.addChild(child)

        # set old parameter to attribute
        self._param = oldparam

    def evaluate(self):
        if self.model.value is None or self.model.value == '----': return
        norange = self.domainmin.value == self.domainmax.value
        if self.domainmin.value is None and self.q.value is not None or norange:  # truncate the q and I arrays with limits
            self.domainmin.value = self.q.value.min()
        if self.domainmax.value is None and self.q.value is not None or norange:  # truncate the q and I arrays with limits
            self.domainmax.value = self.q.value.max()
        for name, input in self.modelvars.items():  # propogate user-defined values to the model
            getattr(self.model.value, name).value = input.value
            getattr(self.model.value, name).fixed = input.fixed
        filter = np.logical_and(self.domainmin.value <= self.q.value, self.q.value <= self.domainmax.value)
        q = self.q.value[filter]
        Iq = self.Iq.value[filter]
        self.fittedmodel.value = self.fitter.value(self.model.value, q, Iq)
        self.fittedprofile.value = self.fittedmodel.value(self.q.value)

    def getCategory() -> str:
        return "Fits"
    def gauss_2_fit(hist_x,
                    hist_y,
                    gauss_1=(1, 0, 0.1),
                    gauss_2=(1, 0, 0.1),
                    show_plot=True,
                    fit_LevMar=False,
                    verbose=True,
                    vline=None):
        # ""
        # NOTEBOOK: "04_astrometry_extinction" [/sample_comp/]
        # ""
        # Pad input to facilitate the fitting ============
        step = hist_x[1] - hist_x[0]
        trim = 30
        for i in range(trim):
            hist_x = np.pad(hist_x, (1, 1),
                            'constant',
                            constant_values=(hist_x[0] - step,
                                             hist_x[-1] + step))
            hist_y = np.pad(hist_y, (1, 1), 'constant', constant_values=(0, 0))

        # Prepare fit ====================================
        if fit_LevMar == False: fitter = fitting.SLSQPLSQFitter()
        if fit_LevMar == True: fitter = fitting.LevMarLSQFitter()

        # Construct individual Gaussians (first guess)====
        gaus_1 = models.Gaussian1D(gauss_1[0], gauss_1[1], gauss_1[2])
        gaus_2 = models.Gaussian1D(gauss_2[0], gauss_2[1], gauss_2[2])

        # Perform fit & extract models ===================
        gg_fit = fitter(gaus_1 + gaus_2, hist_x, hist_y)
        errors = np.sqrt(np.diag(
            fitter.fit_info['param_cov']))  # 1 Sigma Error

        gg_fit.amplitude_0_err = errors[0]
        gg_fit.mean_0_err = errors[1]
        gg_fit.stddev_0_err = errors[2]
        gg_fit.amplitude_1_err = errors[3]
        gg_fit.mean_1_err = errors[4]
        gg_fit.stddev_1_err = errors[5]

        # Construct individual Gaussians (model)==========
        fit_1 = models.Gaussian1D(gg_fit.amplitude_0, gg_fit.mean_0,
                                  gg_fit.stddev_0)
        fit_2 = models.Gaussian1D(gg_fit.amplitude_1, gg_fit.mean_1,
                                  gg_fit.stddev_1)

        if show_plot:
            hist_x = hist_x[trim:-trim]
            xi = np.min(hist_x)
            xf = np.max(hist_x)
            n_samps = 100
            xrange = np.arange(xi, xf, np.abs(xi - xf) / n_samps)

            linewidth = 3
            plt.plot(xrange,
                     gg_fit(xrange),
                     color='black',
                     label='',
                     linestyle='-',
                     linewidth=linewidth)
            plt.plot(xrange,
                     fit_1(xrange),
                     color='black',
                     label='',
                     linestyle='--',
                     linewidth=linewidth)
            plt.plot(xrange,
                     fit_2(xrange),
                     color='black',
                     label='',
                     linestyle=':',
                     linewidth=linewidth)

            if vline: plt.vlines(x=vline, ymin=0, ymax=100)

        if verbose:
            sample_comp.model_info(gg_fit)

        return gg_fit
Exemple #15
0
def collapsed_met_histogram():
    '''Plot the distribution of K Excesses in the cool, unevolved sample.'''
    targs = cache.apogee_splitter_with_DSEP()
    cooldwarfs = targs.subsample(["Dwarfs", "Cool Noev"])

    f, (ax1, ax2) = plt.subplots(1,
                                 2,
                                 figsize=(24, 12),
                                 sharex=True,
                                 sharey=True)
    cons_limit = -0.3
    arr1, bins, patches = ax1.hist(cooldwarfs["Corrected K Excess"],
                                   bins=60,
                                   color=bc.blue,
                                   alpha=0.5,
                                   range=(-1.6, 1.1),
                                   histtype="bar",
                                   label="")
    arr2, bins, patches = ax2.hist(cooldwarfs["Corrected K Solar"],
                                   bins=60,
                                   color=bc.red,
                                   alpha=0.5,
                                   range=(-1.6, 1.1),
                                   histtype="bar",
                                   label="")
    metarray = arr1
    nometarray = arr2
    singlemodel = models.Gaussian1D(100,
                                    0,
                                    0.1,
                                    bounds={
                                        "mean": (-0.5, 0.5),
                                        "stddev": (0.01, 0.5)
                                    })
    binarymodel = models.Gaussian1D(20,
                                    -0.75,
                                    0.1,
                                    bounds={
                                        "mean": (-1.5, 0.0),
                                        "stddev": (0.01, 0.5)
                                    })
    dualmodel = singlemodel + binarymodel
    fitter = fitting.SLSQPLSQFitter()
    fittedmet = fitter(dualmodel, (bins[1:] + bins[:-1]) / 2, metarray)
    inputexcesses = np.linspace(-1.6, 1.1, 200)
    metmodel = fittedmet(inputexcesses)
    fittednomet = fitter(dualmodel, (bins[1:] + bins[:-1]) / 2, nometarray)
    nometmodel = fittednomet(inputexcesses)
    ax1.plot(inputexcesses,
             metmodel,
             color=bc.blue,
             ls="-",
             lw=3,
             marker="",
             label="[Fe/H] Corrected")
    ax1.plot(inputexcesses,
             nometmodel,
             color=bc.red,
             ls="-",
             lw=3,
             marker="",
             label="[Fe/H] = 0.08")
    ax2.plot(inputexcesses, metmodel, color=bc.blue, ls="-", lw=3, marker="")
    ax2.plot(inputexcesses, nometmodel, color=bc.red, ls="-", lw=3, marker="")
    ax1.plot([cons_limit, cons_limit], [0, 100],
             marker="",
             ls="--",
             color=bc.violet,
             lw=4,
             zorder=3)
    ax2.plot([cons_limit, cons_limit], [0, 100],
             marker="",
             ls="--",
             color=bc.violet,
             lw=4,
             zorder=3)
    ax1.set_xlabel(r"Corrected {0} Excess".format(MKstr))
    ax1.set_ylabel("N")
    ax2.set_xlabel(r"Corrected {0} Excess".format(MKstr))
    ax1.set_ylabel("")
    ax1.set_ylim(0, 100)
    ax1.legend(loc="upper left")
    def makeSlitIllum(self, adinputs=None, **params):
        """
        Makes the processed Slit Illumination Function by binning a 2D
        spectrum along the dispersion direction, fitting a smooth function
        for each bin, fitting a smooth 2D model, and reconstructing the 2D
        array using this last model.

        Its implementation based on the IRAF's `noao.twodspec.longslit.illumination`
        task following the algorithm described in [Valdes, 1968].

        It expects an input calibration image to be an a dispersed image of the
        slit without illumination problems (e.g, twilight flat). The spectra is
        not required to be smooth in wavelength and may contain strong emission
        and absorption lines. The image should contain a `.mask` attribute in
        each extension, and it is expected to be overscan and bias corrected.

        Parameters
        ----------
        adinputs : list
            List of AstroData objects containing the dispersed image of the
            slit of a source free of illumination problems. The data needs to
            have been overscan and bias corrected and is expected to have a
            Data Quality mask.
        bins : {None, int}, optional
            Total number of bins across the dispersion axis. If None,
            the number of bins will match the number of extensions on each
            input AstroData object. It it is an int, it will create N bins
            with the same size.
        border : int, optional
            Border size that is added on every edge of the slit illumination
            image before cutting it down to the input AstroData frame.
        smooth_order : int, optional
            Order of the spline that is used in each bin fitting to smooth
            the data (Default: 3)
        x_order : int, optional
            Order of the x-component in the Chebyshev2D model used to
            reconstruct the 2D data from the binned data.
        y_order : int, optional
            Order of the y-component in the Chebyshev2D model used to
            reconstruct the 2D data from the binned data.

        Return
        ------
        List of AstroData : containing an AstroData with the Slit Illumination
            Response Function for each of the input object.

        References
        ----------
        .. [Valdes, 1968] Francisco Valdes "Reduction Of Long Slit Spectra With
           IRAF", Proc. SPIE 0627, Instrumentation in Astronomy VI,
           (13 October 1986); https://doi.org/10.1117/12.968155
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params["suffix"]
        bins = params["bins"]
        border = params["border"]
        debug_plot = params["debug_plot"]
        smooth_order = params["smooth_order"]
        cheb2d_x_order = params["x_order"]
        cheb2d_y_order = params["y_order"]

        ad_outputs = []
        for ad in adinputs:

            if len(ad) > 1 and "mosaic" not in ad[0].wcs.available_frames:

                log.info('Add "mosaic" gWCS frame to input data')
                geotable = import_module('.geometry_conf', self.inst_lookups)

                # deepcopy prevents modifying input `ad` inplace
                ad = transform.add_mosaic_wcs(deepcopy(ad), geotable)

                log.info("Temporarily mosaicking multi-extension file")
                mosaicked_ad = transform.resample_from_wcs(
                    ad,
                    "mosaic",
                    attributes=None,
                    order=1,
                    process_objcat=False)

            else:

                log.info('Input data already has one extension and has a '
                         '"mosaic" frame.')

                # deepcopy prevents modifying input `ad` inplace
                mosaicked_ad = deepcopy(ad)

            log.info("Transposing data if needed")
            dispaxis = 2 - mosaicked_ad[0].dispersion_axis()  # python sense
            should_transpose = dispaxis == 1

            data, mask, variance = _transpose_if_needed(
                mosaicked_ad[0].data,
                mosaicked_ad[0].mask,
                mosaicked_ad[0].variance,
                transpose=should_transpose)

            log.info("Masking data")
            data = np.ma.masked_array(data, mask=mask)
            variance = np.ma.masked_array(variance, mask=mask)
            std = np.sqrt(variance)  # Easier to work with

            log.info("Creating bins for data and variance")
            height = data.shape[0]
            width = data.shape[1]

            if bins is None:
                nbins = max(len(ad), 12)
                bin_limits = np.linspace(0, height, nbins + 1, dtype=int)
            elif isinstance(bins, int):
                nbins = bins
                bin_limits = np.linspace(0, height, nbins + 1, dtype=int)
            else:
                # ToDo: Handle input bins as array
                raise TypeError("Expected None or Int for `bins`. "
                                "Found: {}".format(type(bins)))

            bin_top = bin_limits[1:]
            bin_bot = bin_limits[:-1]
            binned_data = np.zeros_like(data)
            binned_std = np.zeros_like(std)

            log.info("Smooth binned data and variance, and normalize them by "
                     "smoothed central value")
            for bin_idx, (b0, b1) in enumerate(zip(bin_bot, bin_top)):

                rows = np.arange(width)

                avg_data = np.ma.mean(data[b0:b1], axis=0)
                model_1d_data = astromodels.UnivariateSplineWithOutlierRemoval(
                    rows, avg_data, order=smooth_order)

                avg_std = np.ma.mean(std[b0:b1], axis=0)
                model_1d_std = astromodels.UnivariateSplineWithOutlierRemoval(
                    rows, avg_std, order=smooth_order)

                slit_central_value = model_1d_data(rows)[width // 2]
                binned_data[b0:b1] = model_1d_data(rows) / slit_central_value
                binned_std[b0:b1] = model_1d_std(rows) / slit_central_value

            log.info("Reconstruct 2D mosaicked data")
            bin_center = np.array(0.5 * (bin_bot + bin_top), dtype=int)
            cols_fit, rows_fit = np.meshgrid(np.arange(width), bin_center)

            fitter = fitting.SLSQPLSQFitter()
            model_2d_init = models.Chebyshev2D(x_degree=cheb2d_x_order,
                                               x_domain=(0, width),
                                               y_degree=cheb2d_y_order,
                                               y_domain=(0, height))

            model_2d_data = fitter(model_2d_init, cols_fit, rows_fit,
                                   binned_data[rows_fit, cols_fit])

            model_2d_std = fitter(model_2d_init, cols_fit, rows_fit,
                                  binned_std[rows_fit, cols_fit])

            rows_val, cols_val = \
                np.mgrid[-border:height+border, -border:width+border]

            slit_response_data = model_2d_data(cols_val, rows_val)
            slit_response_mask = np.pad(
                mask, border, mode='edge')  # ToDo: any update to the mask?
            slit_response_std = model_2d_std(cols_val, rows_val)
            slit_response_var = slit_response_std**2

            del cols_fit, cols_val, rows_fit, rows_val

            _data, _mask, _variance = _transpose_if_needed(
                slit_response_data,
                slit_response_mask,
                slit_response_var,
                transpose=dispaxis == 1)

            log.info("Update slit response data and data_section")
            slit_response_ad = deepcopy(mosaicked_ad)
            slit_response_ad[0].data = _data
            slit_response_ad[0].mask = _mask
            slit_response_ad[0].variance = _variance

            if "mosaic" in ad[0].wcs.available_frames:

                log.info(
                    "Map coordinates between slit function and mosaicked data"
                )  # ToDo: Improve message?
                slit_response_ad = _split_mosaic_into_extensions(
                    ad, slit_response_ad, border_size=border)

            elif len(ad) == 1:

                log.info("Trim out borders")

                slit_response_ad[0].data = \
                    slit_response_ad[0].data[border:-border, border:-border]
                slit_response_ad[0].mask = \
                    slit_response_ad[0].mask[border:-border, border:-border]
                slit_response_ad[0].variance = \
                    slit_response_ad[0].variance[border:-border, border:-border]

            log.info("Update metadata and filename")
            gt.mark_history(slit_response_ad,
                            primname=self.myself(),
                            keyword=timestamp_key)

            slit_response_ad.update_filename(suffix=suffix, strip=True)
            ad_outputs.append(slit_response_ad)

            # Plotting ------
            if debug_plot:

                log.info("Creating plots")
                palette = copy(plt.cm.cividis)
                palette.set_bad('r', 0.75)

                norm = vis.ImageNormalize(data[~data.mask],
                                          stretch=vis.LinearStretch(),
                                          interval=vis.PercentileInterval(97))

                fig = plt.figure(num="Slit Response from MEF - {}".format(
                    ad.filename),
                                 figsize=(12, 9),
                                 dpi=110)

                gs = gridspec.GridSpec(nrows=2, ncols=3, figure=fig)

                # Display raw mosaicked data and its bins ---
                ax1 = fig.add_subplot(gs[0, 0])
                im1 = ax1.imshow(data,
                                 cmap=palette,
                                 origin='lower',
                                 vmin=norm.vmin,
                                 vmax=norm.vmax)

                ax1.set_title("Mosaicked Data\n and Spectral Bins",
                              fontsize=10)
                ax1.set_xlim(-1, data.shape[1])
                ax1.set_xticks([])
                ax1.set_ylim(-1, data.shape[0])
                ax1.set_yticks(bin_center)
                ax1.tick_params(axis=u'both', which=u'both', length=0)

                ax1.set_yticklabels(
                    ["Bin {}".format(i) for i in range(len(bin_center))],
                    fontsize=6)

                _ = [ax1.spines[s].set_visible(False) for s in ax1.spines]
                _ = [ax1.axhline(b, c='w', lw=0.5) for b in bin_limits]

                divider = make_axes_locatable(ax1)
                cax1 = divider.append_axes("right", size="5%", pad=0.05)
                plt.colorbar(im1, cax=cax1)

                # Display non-smoothed bins ---
                ax2 = fig.add_subplot(gs[0, 1])
                im2 = ax2.imshow(binned_data, cmap=palette, origin='lower')

                ax2.set_title("Binned, smoothed\n and normalized data ",
                              fontsize=10)
                ax2.set_xlim(0, data.shape[1])
                ax2.set_xticks([])
                ax2.set_ylim(0, data.shape[0])
                ax2.set_yticks(bin_center)
                ax2.tick_params(axis=u'both', which=u'both', length=0)

                ax2.set_yticklabels(
                    ["Bin {}".format(i) for i in range(len(bin_center))],
                    fontsize=6)

                _ = [ax2.spines[s].set_visible(False) for s in ax2.spines]
                _ = [ax2.axhline(b, c='w', lw=0.5) for b in bin_limits]

                divider = make_axes_locatable(ax2)
                cax2 = divider.append_axes("right", size="5%", pad=0.05)
                plt.colorbar(im2, cax=cax2)

                # Display reconstructed slit response ---
                vmin = slit_response_data.min()
                vmax = slit_response_data.max()

                ax3 = fig.add_subplot(gs[1, 0])
                im3 = ax3.imshow(slit_response_data,
                                 cmap=palette,
                                 origin='lower',
                                 vmin=vmin,
                                 vmax=vmax)

                ax3.set_title("Reconstructed\n Slit response", fontsize=10)
                ax3.set_xlim(0, data.shape[1])
                ax3.set_xticks([])
                ax3.set_ylim(0, data.shape[0])
                ax3.set_yticks([])
                ax3.tick_params(axis=u'both', which=u'both', length=0)
                _ = [ax3.spines[s].set_visible(False) for s in ax3.spines]

                divider = make_axes_locatable(ax3)
                cax3 = divider.append_axes("right", size="5%", pad=0.05)
                plt.colorbar(im3, cax=cax3)

                # Display extensions ---
                ax4 = fig.add_subplot(gs[1, 1])
                ax4.set_xticks([])
                ax4.set_yticks([])
                _ = [ax4.spines[s].set_visible(False) for s in ax4.spines]

                sub_gs4 = gridspec.GridSpecFromSubplotSpec(nrows=len(ad),
                                                           ncols=1,
                                                           subplot_spec=gs[1,
                                                                           1],
                                                           hspace=0.03)

                # The [::-1] is needed to put the fist extension in the bottom
                for i, ext in enumerate(slit_response_ad[::-1]):

                    ext_data, ext_mask, ext_variance = _transpose_if_needed(
                        ext.data,
                        ext.mask,
                        ext.variance,
                        transpose=dispaxis == 1)

                    ext_data = np.ma.masked_array(ext_data, mask=ext_mask)

                    sub_ax = fig.add_subplot(sub_gs4[i])

                    im4 = sub_ax.imshow(ext_data,
                                        origin="lower",
                                        vmin=vmin,
                                        vmax=vmax,
                                        cmap=palette)

                    sub_ax.set_xlim(0, ext_data.shape[1])
                    sub_ax.set_xticks([])
                    sub_ax.set_ylim(0, ext_data.shape[0])
                    sub_ax.set_yticks([ext_data.shape[0] // 2])

                    sub_ax.set_yticklabels(
                        ["Ext {}".format(len(slit_response_ad) - i - 1)],
                        fontsize=6)

                    _ = [
                        sub_ax.spines[s].set_visible(False)
                        for s in sub_ax.spines
                    ]

                    if i == 0:
                        sub_ax.set_title(
                            "Multi-extension\n Slit Response Function")

                divider = make_axes_locatable(ax4)
                cax4 = divider.append_axes("right", size="5%", pad=0.05)
                plt.colorbar(im4, cax=cax4)

                # Display Signal-To-Noise Ratio ---
                snr = data / np.sqrt(variance)

                norm = vis.ImageNormalize(snr[~snr.mask],
                                          stretch=vis.LinearStretch(),
                                          interval=vis.PercentileInterval(97))

                ax5 = fig.add_subplot(gs[0, 2])

                im5 = ax5.imshow(snr,
                                 cmap=palette,
                                 origin='lower',
                                 vmin=norm.vmin,
                                 vmax=norm.vmax)

                ax5.set_title("Mosaicked Data SNR", fontsize=10)
                ax5.set_xlim(-1, data.shape[1])
                ax5.set_xticks([])
                ax5.set_ylim(-1, data.shape[0])
                ax5.set_yticks(bin_center)
                ax5.tick_params(axis=u'both', which=u'both', length=0)

                ax5.set_yticklabels(
                    ["Bin {}".format(i) for i in range(len(bin_center))],
                    fontsize=6)

                _ = [ax5.spines[s].set_visible(False) for s in ax5.spines]
                _ = [ax5.axhline(b, c='w', lw=0.5) for b in bin_limits]

                divider = make_axes_locatable(ax5)
                cax5 = divider.append_axes("right", size="5%", pad=0.05)
                plt.colorbar(im5, cax=cax5)

                # Display Signal-To-Noise Ratio of Slit Illumination ---
                slit_response_snr = np.ma.masked_array(
                    slit_response_data / np.sqrt(slit_response_var),
                    mask=slit_response_mask)

                ax6 = fig.add_subplot(gs[1, 2])

                im6 = ax6.imshow(slit_response_snr,
                                 origin="lower",
                                 vmin=norm.vmin,
                                 vmax=norm.vmax,
                                 cmap=palette)

                ax6.set_xlim(0, slit_response_snr.shape[1])
                ax6.set_xticks([])
                ax6.set_ylim(0, slit_response_snr.shape[0])
                ax6.set_yticks([])
                ax6.set_title("Reconstructed\n Slit Response SNR")

                _ = [ax6.spines[s].set_visible(False) for s in ax6.spines]

                divider = make_axes_locatable(ax6)
                cax6 = divider.append_axes("right", size="5%", pad=0.05)
                plt.colorbar(im6, cax=cax6)

                # Save plots ---
                fig.tight_layout(rect=[0, 0, 0.95, 1], pad=0.5)
                fname = slit_response_ad.filename.replace(".fits", ".png")
                log.info("Saving plots to {}".format(fname))
                plt.savefig(fname)

        return ad_outputs
Exemple #17
0
def img_subtract_bright_star(img,
                             star,
                             x_col='x_pix',
                             y_col='y_pix',
                             gamma=5.0,
                             alpha=6.0,
                             sig=None,
                             x_buffer=4,
                             y_buffer=4,
                             img_maxsize=300):
    """Subtract a bright star from image using a Moffat model."""
    # Use the SLSQP fitter
    fitter_use = fitting.SLSQPLSQFitter()

    # Image dimension
    img_h, img_w = img.shape

    # Only fit the stars on the image
    if ((0 + x_buffer < int(star[x_col]) < img_w - x_buffer)
            and (0 + y_buffer < int(star[y_col]) < img_h - y_buffer)):
        # Get the center of the star
        x_cen, y_cen = int(star[x_col]), int(star[y_col])

        # If the image is too big, cut a part of it
        if (img_h >= img_maxsize) or (img_w >= img_maxsize):
            x_0 = int(x_cen -
                      img_maxsize / 2.0) if (x_cen -
                                             img_maxsize / 2.0) > 0 else 0
            x_1 = int(x_cen + img_maxsize /
                      2.0) if (x_cen + img_maxsize / 2.0) < img_w else (img_w -
                                                                        1)
            y_0 = int(y_cen -
                      img_maxsize / 2.0) if (y_cen -
                                             img_maxsize / 2.0) > 0 else 0
            y_1 = int(y_cen + img_maxsize /
                      2.0) if (y_cen + img_maxsize / 2.0) < img_h else (img_h -
                                                                        1)
            x_cen, y_cen = (x_cen - x_0), (y_cen - y_0)
        else:
            x_0, x_1 = 0, img_w + 1
            y_0, y_1 = 0, img_h + 1

        # Determine the weights for the fitting
        img_use = copy.deepcopy(img[y_0:y_1, x_0:x_1])

        weights = (1.0 / sig[y_0:y_1, x_0:x_1]) if (sig is not None) else None

        # X, Y grids
        y_size, x_size = img_use.shape
        y_arr, x_arr = np.mgrid[:y_size, :x_size]

        # Initial the Moffat model
        p_init = models.Moffat2D(x_0=x_cen,
                                 y_0=y_cen,
                                 amplitude=(img_use[int(x_cen),
                                                    int(y_cen)]),
                                 gamma=gamma,
                                 alpha=alpha,
                                 bounds={
                                     'x_0':
                                     [x_cen - x_buffer, x_cen + x_buffer],
                                     'y_0':
                                     [y_cen - y_buffer, y_cen + y_buffer]
                                 })

        try:
            with np.errstate(all='ignore'):
                best_fit = fitter_use(p_init,
                                      x_arr,
                                      y_arr,
                                      img_use,
                                      weights=weights,
                                      verblevel=0)

                img_new = copy.deepcopy(img)
                img_new[y_0:y_1, x_0:x_1] -= best_fit(x_arr, y_arr)

            return img_new

        except Exception:
            warnings.warn('# Star fitting failed!')
            return img
    else:
        return img
Exemple #18
0
    def fit_refraction_function(self,
                                steps=10,
                                plot=False,
                                sample=None,
                                debug=False):
        """
        Fits a refraction function using a 3rd order Legendre
        Polynomial to the x and y pixel offsets caused by
        atmospheric refraction.

        Parameters
        ----------
        steps : int
            Number of segments of the data cube to consider. The
            larger this number, the finer the detail in fitting
            offsets.
        plot : bool
            Plots the function and the corresponding data points.
        sample : tuple, (w_0, w_1)
            Wavelength interval to be considered in the fit.
        debug : bool
            Plots debugging graphs to confirm that the shifts
            provided are actually matching the reference.

        Returns
        -------
        None
        """
        data = copy.deepcopy(self.science)

        d = np.array(
            [ma.median(_, axis=0) for _ in np.array_split(data, steps)])
        planes = np.array([((_[-1] + _[0]) / 2)
                           for _ in np.array_split(self.wavelength, steps)])

        for i, j in enumerate(d):
            d[i] /= j.max()

        md = d[int(len(d) / 2.0)]
        mid_point = planes[int(len(d) / 2.0)]

        if sample is None:
            sample = self.wavelength[[0, -1]]

        sample_mask = (planes >= sample[0]) & (planes <= sample[1])
        d = d[sample_mask]
        planes = planes[sample_mask]

        x_off, y_off = self._check_registration(reference=md, images=d)
        x_off *= self.sampling
        y_off *= self.sampling

        offsets, angle = self._offsets_rotation(x_off, y_off)
        self.refraction_angle = angle

        if debug:
            print(self.file_name)
            print('OFFSETS')
            print(offsets)

        model = DifferentialRefraction(temperature=self.temperature,
                                       pressure=self.pressure,
                                       air_mass=self.air_mass,
                                       wl_0=mid_point)
        model.wl_0.fixed = True
        fitter = fitting.FittingWithOutlierRemoval(fitting.SLSQPLSQFitter(),
                                                   sigma_clip,
                                                   niter=3,
                                                   sigma=3.0)
        rejected, shift = fitter(model, planes, offsets, acc=1e-12)

        if plot:
            fig, ax = plt.subplots(1, 1, sharex='col')

            ax.scatter(planes, offsets, c=planes)
            ax.set_ylabel('Differential refraction (arcsec)')
            ax.set_xlabel('Wavelength')
            ax.plot(self.wavelength, shift(self.wavelength))
            pars = [getattr(shift, _).value for _ in ['air_mass', 'wl_0']]
            pars.append(np.rad2deg(angle))
            ax.set_title(
                'secz = {:.2f}; wl_0 = {:.0f}; angle = {:.2f}'.format(*pars))
            ax.grid()

            plt.show()

        self.atmospheric_shift = shift

        if debug:
            self._debug_plots(d, planes)
        sigma = 2.
        p0 = [amplitude, sigma]
        coeff, var_matrix = curve_fit(rayleigh, x, n, p0=p0)
        amplitude = coeff[0]
        sigma = coeff[1]
        print "A*x*np.exp(-(x)**2/(2.*sigma**2)) / sigma**2"
        print "A, sigma = ", coeff
        fit = rayleigh(x, *coeff)
        distrib = "Rayleigh"
        pl.text(10, 0.1, 'A, sigma =  %s' % (coeff), color='red', fontsize=8)

    if method == 'G':
        gg_init = models.Gaussian1D(amplitude=1, mean=260,
                                    stddev=10.) + models.Gaussian1D(
                                        amplitude=1, mean=290, stddev=10.)
        fitter = fitting.SLSQPLSQFitter()
        gg_fit = fitter(gg_init, x, n)
        print gg_fit
        fit = gg_fit(x)
        distrib = "Two gaussian"
        pl.text(220,
                0.02,
                '%s \n %s' % (gg_fit.param_names, gg_fit.parameters),
                color='red',
                fontsize=8)

    pl.plot(x, fit, 'b-', label=distrib + ' distribution')
    pl.xlabel(name)
    pl.legend()
    pl.show()