コード例 #1
0
 def test_from_obs(self):
     pha = [
         0., 6.31578947, 12.63157895, 18.94736842, 25.26315789, 31.57894737,
         37.89473684, 44.21052632, 50.52631579, 56.84210526, 63.15789474,
         69.47368421, 75.78947368, 82.10526316, 88.42105263, 94.73684211,
         101.05263158, 107.36842105, 113.68421053, 120.
     ] * u.deg
     data = [
         3.14451639, 4.06262914, 4.1154297, 4.54870242, 4.42265052,
         4.71990531, 5.1628504, 5.16098737, 5.20971821, 5.3032115,
         5.52976173, 5.64255607, 5.84536878, 6.13724017, 6.33675472,
         6.63099954, 7.2461781, 7.32734464, 8.00147425, 8.40595306
     ] * u.mag
     from astropy.modeling.fitting import LevMarLSQFitter
     fitter = LevMarLSQFitter()
     # test fit with one column
     m = HG.from_obs({'alpha': pha, 'mag': data}, fitter)
     assert isinstance(m, HG)
     assert isinstance(m.H, Parameter) & np.isclose(
         m.H.value, 3.436677) & (m.H.unit == u.mag)
     assert isinstance(m.G, Parameter) & np.isclose(
         m.G.value, 0.1857588) & (m.G.unit == u.dimensionless_unscaled)
     # test fit with one column and `init` parameters
     m = HG.from_obs({'alpha': pha, 'mag': data}, fitter, init=[3, 0.1])
     assert isinstance(m, HG)
     assert isinstance(m.H, Parameter) & np.isclose(
         m.H.value, 3.4366849) & (m.H.unit == u.mag)
     assert isinstance(m.G, Parameter) & np.isclose(
         m.G.value, 0.18576319) & (m.G.unit == u.dimensionless_unscaled)
     # test fit with more than one column
     m = HG.from_obs({
         'alpha': pha,
         'mag': data,
         'mag1': data,
         'mag2': data
     },
                     fitter,
                     fields=['mag', 'mag1', 'mag2'])
     assert isinstance(m, HG)
     assert isinstance(m.H, Parameter) & np.allclose(
         m.H.value, [3.436677] * 3) & (m.H.unit == u.mag)
     assert isinstance(m.G, Parameter) & np.allclose(
         m.G.value, [0.1857588] * 3) & (m.G.unit
                                        == u.dimensionless_unscaled)
     assert 'fields' in m.meta
     assert m.meta['fields'] == ['mag', 'mag1', 'mag2']
     # test fit with more than one column with `init` parameters
     m = HG.from_obs({
         'alpha': pha,
         'mag': data,
         'mag1': data,
         'mag2': data
     },
                     fitter,
                     fields=['mag', 'mag1', 'mag2'],
                     init=[[3., 3., 3.], [0.1, 0.1, 0.1]])
     assert isinstance(m, HG)
     assert isinstance(m.H, Parameter) & np.allclose(
         m.H.value, [3.4366849] * 3) & (m.H.unit == u.mag)
     assert isinstance(m.G, Parameter) & np.allclose(
         m.G.value, [0.18576319] * 3) & (m.G.unit
                                         == u.dimensionless_unscaled)
     assert 'fields' in m.meta
     assert m.meta['fields'] == ['mag', 'mag1', 'mag2']
コード例 #2
0
def align_coordsystems(starlist,
                       psf_stars_x,
                       psf_stars_y,
                       shift_stars_x,
                       shift_stars_y,
                       wl_image,
                       plot_flag=True):
    epsf, gauss_std, n_resample = get_psf(wl_image,
                                          psf_stars_x,
                                          psf_stars_y,
                                          do_plot='yes')
    aper_rad = 4 * gauss_std / n_resample

    phot_psf = photometry.BasicPSFPhotometry(group_maker=DAOGroup(7.),
                                             psf_model=epsf,
                                             bkg_estimator=None,
                                             fitter=LevMarLSQFitter(),
                                             fitshape=(21),
                                             aperture_radius=aper_rad)

    pos = Table(names=['x_0', 'y_0'], data=[shift_stars_x, shift_stars_y])

    # determine their positions in MUSE image by fitting their PSFs
    result_tab = phot_psf.do_photometry(image=wl_image, init_guesses=pos)
    shift_phot_x = [i for i in result_tab['x_fit']]
    shift_phot_y = [i for i in result_tab['y_fit']]

    # find closest HST star
    shift_starlist = [0] * len(shift_stars_x)

    for i in range(len(shift_phot_x)):
        distance = 1000.
        x_muse, y_muse = shift_phot_x[i], shift_phot_y[i]
        for star in starlist:
            dist = ((x_muse - star.xcoord)**2 + (y_muse - star.ycoord)**2)**0.5
            if dist < distance:
                shift_starlist[i] = star
                distance = dist

    # get array of coordinates
    x_hst_list = [i.xcoord for i in shift_starlist]
    y_hst_list = [i.ycoord for i in shift_starlist]

    # coordinate transformation parameters including translation and rotation
    params = lmfit.Parameters()
    params.add('delta_x', 1., min=-10, max=10, vary=True)
    params.add('delta_y', 1., min=-10, max=10, vary=True)
    params.add('theta', 0., min=-5., max=5., vary=True)

    # shift the stars and minimize their distance
    minimizer = lmfit.Minimizer(get_coordshift,
                                params,
                                fcn_args=(shift_phot_x, shift_phot_y,
                                          x_hst_list, y_hst_list))
    result = minimizer.minimize()
    print("Done with fitting the coordinate shift.")

    # best-fit parameters
    x_shift = result.params['delta_x']
    y_shift = result.params['delta_y']
    theta = result.params['theta']

    # adjust the coordinates of all stars
    for star in starlist:
        new_x, new_y = trans_rot(star.xcoord, star.ycoord, x_shift, y_shift,
                                 theta)
        star.xcoord, star.ycoord = new_x, new_y

    if plot_flag:
        plot_spatial(wl_image, plotfname='adjusted_coords.pdf', stars=starlist)

    # sort stars by x and y coordinate
    starlist.sort(key=lambda x: x.xcoord)
    starlist.sort(key=lambda x: x.ycoord)

    return starlist
コード例 #3
0
ファイル: fit_2d.py プロジェクト: arainot/VIP
def fit_2dgaussian(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhmx=4,
                   fwhmy=4,
                   theta=0,
                   threshold=False,
                   sigfactor=6,
                   full_output=False,
                   debug=False):
    """ Fitting a 2D Gaussian to the 2D distribution of the data with photutils.
    
    Parameters
    ----------
    array : array_like
        Input frame with a single PSF.
    crop : {False, True}, optional
        If True an square sub image will be cropped.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage. 
        If None the center of the frame is used for cropping the subframe (the 
        PSF is assumed to be ~ at the center of the frame). 
    cropsize : int, optional
        Size of the subimage.
    fwhmx, fwhmy : float, optional
        Initial values for the standard deviation of the fitted Gaussian, in px.
    theta : float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis.
    threshold : {False, True}, optional
        If True the background pixels will be replaced by small random Gaussian 
        noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian 
        noise. 
    full_output : {False, True}, optional
        If False it returns just the centroid, if True also returns the 
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    debug : {True, False}, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.
        
    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting. 
    mean_x : float
        Source centroid x position on input array from fitting.
        
    If *full_output* is True it returns:
    mean_y, mean_x : floats
        Centroid. 
    fwhm_y : float
        FHWM in Y in pixels. 
    fwhm_x : float
        FHWM in X in pixels.
    amplitude : float
        Amplitude of the Gaussian.
    theta : float
        Rotation angle.
    
    """
    if not array.ndim == 2:
        raise TypeError('Input array is not a frame or 2d array')

    # If frame size is even we drop last row and last column
    if array.shape[0] % 2 == 0:
        array = array[:-1, :].copy()
    if array.shape[1] % 2 == 0:
        array = array[:, :-1].copy()

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd  #*50
        psf_subimage[indi] = subimnoise[indi]

    yme, xme = np.where(psf_subimage == psf_subimage.max())
    # Creating the 2D Gaussian model
    gauss = models.Gaussian2D(amplitude=psf_subimage.max(),
                              x_mean=xme,
                              y_mean=yme,
                              x_stddev=fwhmx * gaussian_fwhm_to_sigma,
                              y_stddev=fwhmy * gaussian_fwhm_to_sigma,
                              theta=theta)
    # Levenberg-Marquardt algorithm
    fitter = LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(gauss, x, y, psf_subimage, maxiter=1000, acc=1e-08)

    if crop:
        mean_y = fit.y_mean.value + suby
        mean_x = fit.x_mean.value + subx
    else:
        mean_y = fit.y_mean.value
        mean_x = fit.x_mean.value
    fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
    amplitude = fit.amplitude.value
    theta = fit.theta.value

    if debug:
        if threshold: msg = 'Subimage thresholded / Model / Residuals'
        else: msg = 'Subimage (no threshold) / Model / Residuals'
        pp_subplots(psf_subimage,
                    fit(x, y),
                    psf_subimage - fit(x, y),
                    colorb=True,
                    grid=True,
                    title=msg)
        print('FWHM_y =', fwhm_y)
        print('FWHM_x =', fwhm_x)
        print()
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_mean.value)
        print('centroid x subim =', fit.x_mean.value)
        print()
        print('peak =', amplitude)
        print('theta =', theta)

    if full_output:
        return pd.DataFrame({
            'centroid_y': mean_y,
            'centroid_x': mean_x,
            'fwhm_y': fwhm_y,
            'fwhm_x': fwhm_x,
            'amplitude': amplitude,
            'theta': theta
        })
    else:
        return mean_y, mean_x
コード例 #4
0
ファイル: funcs.py プロジェクト: LejayChen/photutils
def psf_photometry(data,
                   positions,
                   psf,
                   fitshape=None,
                   fitter=LevMarLSQFitter(),
                   unit=None,
                   wcs=None,
                   error=None,
                   mask=None,
                   pixelwise_error=True,
                   mode='sequential',
                   store_fit_info=False,
                   param_uncert=False):
    """
    Perform PSF/PRF photometry on the data.

    Given a PSF or PRF model, the model is fitted simultaneously or
    sequentially to the given positions to obtain an estimate of the
    flux. If required, coordinates are also tuned to match best the
    data.

    Parameters
    ----------
    data : array-like, `~astropy.io.fits.ImageHDU`, `~astropy.io.fits.HDUList`
        The 2-d array on which to perform photometry. ``data`` should be
        background-subtracted.  Units are used during the photometry,
        either provided along with the data array, or stored in the
        header keyword ``'BUNIT'``.
    positions : array-like of shape (2 or 3, N) or `~astropy.table.Table`
        Positions at which to *start* the fit for each object, in pixel
        coordinates. If array-like, it can be either (x_0, y_0) or (x_0,
        y_0, flux_0). If a table, the columns 'x_0' and 'y_0' must be
        present.  'flux_0' can also be provided to set initial fluxes.
        Additional columns of the form '<parametername>_0' will be used
        to set the initial guess for any parameters of the ``psf`` model
        that are not fixed.
    psf : `astropy.modeling.Fittable2DModel` instance
        PSF or PRF model to fit the data. Could be one of the models in
        this package like `~photutils.psf.sandbox.DiscretePRF`,
        `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D
        model.  This function needs to identify three parameters
        (position of center in x and y coordinates and the flux) in
        order to set them to suitable starting values for each fit. The
        names of these parameters can be given as follows:

        - Set ``psf.psf_xname``, ``psf.psf_yname`` and
          ``psf.psf_fluxname`` to strings with the names of the respective
          psf model parameter.
        - If those attributes are not found, the names ``x_0``, ``y_0``
          and ``flux`` are assumed.

        `~photutils.psf.prepare_psf_model` can be used to prepare any 2D
        model to match these assumptions.
    fitshape : length-2 or None
        The shape of the region around the center of the target location
        to do the fitting in.  If None, fit the whole image without
        windowing. (See notes)
    fitter : an `astropy.modeling.fitting.Fitter` object
        The fitter object used to actually derive the fits. See
        `astropy.modeling.fitting` for more details on fitters.
    unit : `~astropy.units.UnitBase` instance, str
        An object that represents the unit associated with ``data``.
        Must be an `~astropy.units.UnitBase` object or a string
        parseable by the :mod:`~astropy.units` package. It overrides the
        ``data`` unit from the ``'BUNIT'`` header keyword and issues a
        warning if different. However an error is raised if ``data`` as
        an array already has a different unit.
    wcs : `~astropy.wcs.WCS`, optional
        Use this as the wcs transformation. It overrides any wcs
        transformation passed along with ``data`` either in the header
        or in an attribute.
    error : float or array_like, optional
        The pixel-wise Gaussian 1-sigma errors of the input ``data``.
        ``error`` is assumed to include *all* sources of error,
        including the Poisson error of the sources (see
        `~photutils.utils.calc_total_error`) .  ``error`` must have the
        same shape as the input ``data``.
    mask : array_like (bool), optional
        Mask to apply to the data.  Masked pixels are excluded/ignored.
    pixelwise_error : bool, optional
        If `True`, assume ``error`` varies significantly across the PSF
        and sum contribution from each pixel. If `False`, assume
        ``error`` does not vary significantly across the PSF and use the
        single value of ``error`` at the center of each PSF.  Default is
        `True`.
    mode : {'sequential'}
        One of the following modes to do PSF/PRF photometry:
            * 'sequential' (default)
                Fit PSF/PRF separately for the given positions.
            * (No other modes are yet implemented)
    store_fit_info : bool or list
        If False, the fitting information is discarded.  If True, the
        output table will have an additional column 'fit_message' with
        the message that came from the fit.  If a list, it will be
        populated with the ``fit_info`` dictionary of the fitter for
        each fit.
    param_uncert : bool (default=False)
        If True, the uncertainties on each parameter estimate will be
        stored in the output table. This option assumes that the fitter
        has the 'param_cov' key in its 'fit_info' dictionary.  See
        'fit_info' in `~astropy.modeling.fitting.LevMarLSQFitter`.

    Returns
    -------
    result_tab : `~astropy.table.Table`
        The results of the fitting procedure.  The fitted flux is in the
        column 'flux_fit', and the centroids are in 'x_fit' and 'y_fit'.
        If ``positions`` was a table, any columns in that table will be
        carried over to this table.  If any of the ``psf`` model
        parameters other than flux/x/y are not fixed, their results will
        be in the column '<parametername>_fit'.

    Notes
    -----
    Most fitters will not do well if ``fitshape`` is None because they
    will try to fit the whole image as just one star.

    This function is decorated with `~astropy.nddata.support_nddata` and
    thus supports `~astropy.nddata.NDData` objects as input.
    """

    (data, wcs_transformation, mask, error,
     pixelwise_error) = (_prepare_photometry_input(data, unit, wcs, mask,
                                                   error, pixelwise_error))

    # As long as models don't support quantities, we'll break that apart
    fluxunit = data.unit
    data = np.array(data)

    if (error is not None):
        warnings.warn('Uncertainties are not yet supported in PSF fitting.',
                      AstropyUserWarning)
    weights = np.ones_like(data)

    # determine the names of the model's relevant attributes
    xname, yname, fluxname = _extract_psf_fitting_names(psf)

    # Prep the index arrays and the table for output
    indices = np.indices(data.shape)
    if hasattr(positions, 'colnames'):
        # quacks like a table, so assume it's a table
        if 'x_0' not in positions.colnames:
            raise ValueError('Input table does not have x0 column')
        if 'y_0' not in positions.colnames:
            raise ValueError('Input table does not have y0 column')
        result_tab = positions.copy()
    else:
        positions = np.array(positions, copy=False)
        if positions.shape[0] < 2:
            raise ValueError('Positions should be a table or an array (2, N) '
                             'or (3, N)')
        elif positions.shape[0] > 3:
            raise ValueError('Positions should be a table or an array (2, N) '
                             'or (3, N)')

        result_tab = Table()
        result_tab['x_0'] = positions[0]
        result_tab['y_0'] = positions[1]
        if positions.shape[0] >= 3:
            result_tab['flux_0'] = positions[2]

    result_tab['x_fit'] = result_tab['x_0']
    result_tab['y_fit'] = result_tab['y_0']
    result_tab.add_column(
        Column(name='flux_fit',
               unit=fluxunit,
               data=np.empty(len(result_tab), dtype=data.dtype)))

    # prep for fitting
    psf = psf.copy()  # don't want to muck up whatever PSF the user gives us

    # maps input table name to parameter name
    pars_to_set = {'x_0': xname, 'y_0': yname}
    if 'flux_0' in result_tab.colnames:
        pars_to_set['flux_0'] = fluxname

    # maps output table name to parameter name
    pars_to_output = {'x_fit': xname, 'y_fit': yname, 'flux_fit': fluxname}

    for p, isfixed in psf.fixed.items():
        p0 = p + '_0'
        if p0 in result_tab.colnames and p not in (xname, yname, fluxname):
            pars_to_set[p0] = p
        pfit = p + '_fit'
        if not isfixed and p not in (xname, yname, fluxname):
            pars_to_output[pfit] = p

    fit_messages = None
    fit_infos = None
    if isinstance(store_fit_info, list):
        fit_infos = store_fit_info
    elif store_fit_info:
        fit_messages = []
    if param_uncert:
        if 'param_cov' in fitter.fit_info:
            uncert = []
        else:
            warnings.warn(
                'uncertainties on fitted parameters cannot be '
                'computed because fitter does not contain '
                '`param_cov` key in its `fit_info` dictionary.',
                AstropyUserWarning)
            param_uncert = False

    # Many fitters take a "weight" array, but no "mask".
    # Thus, we convert the mask to weights on 1 and 0. Unfortunately,
    # that only works if the values "behind the mask" are finite.
    if mask is not None:
        data = copy.deepcopy(data)
        data[mask] = 0
        weights[mask] = 0.

    if mode == 'sequential':
        for row in result_tab:
            for table_name, parameter_name in pars_to_set.items():
                setattr(psf, parameter_name, row[table_name])

            if fitshape is None:
                fitted = _call_fitter(fitter,
                                      psf,
                                      indices[1],
                                      indices[0],
                                      data,
                                      weights=weights)
            else:
                position = (row['y_0'], row['x_0'])
                y = extract_array(indices[0], fitshape, position)
                x = extract_array(indices[1], fitshape, position)
                sub_array_data = extract_array(data,
                                               fitshape,
                                               position,
                                               fill_value=0.)
                sub_array_weights = extract_array(weights,
                                                  fitshape,
                                                  position,
                                                  fill_value=0.)
                fitted = _call_fitter(fitter,
                                      psf,
                                      x,
                                      y,
                                      sub_array_data,
                                      weights=sub_array_weights)

            for table_name, parameter_name in pars_to_output.items():
                row[table_name] = getattr(fitted, parameter_name).value

            if fit_infos is not None:
                fit_infos.append(fitter.fit_info)
            if fit_messages is not None:
                fit_messages.append(fitter.fit_info['message'])
            if param_uncert:
                if fitter.fit_info['param_cov'] is not None:
                    uncert.append(
                        np.sqrt(np.diag(fitter.fit_info['param_cov'])))
                else:
                    warnings.warn(
                        'uncertainties on fitted parameters '
                        'cannot be computed because the fit may '
                        'be unsuccessful', AstropyUserWarning)
                    uncert.append((None, None, None))
    else:
        raise ValueError('Invalid photometry mode.')

    if fit_messages is not None:
        result_tab['fit_messages'] = fit_messages
    if param_uncert:
        uncert = np.array(uncert)
        i = 0
        for param in psf.param_names:
            if not getattr(psf, param).fixed:
                result_tab.add_column(
                    Column(name=param + "_fit_uncertainty",
                           unit=None,
                           data=uncert[:, i]))
                i += 1

    return result_tab
コード例 #5
0
ファイル: gaussian.py プロジェクト: Onoddil/photutils
def centroid_2dg(data, error=None, mask=None):
    """
    Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
    a constant) to the array.

    Non-finite values (e.g., NaN or inf) in the ``data`` or ``error``
    arrays are automatically masked. These masks are combined.

    Parameters
    ----------
    data : array_like
        The 2D data array.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    centroid : `~numpy.ndarray`
        The ``x, y`` coordinates of the centroid.
    """
    from ..morphology import data_properties  # prevent circular imports

    data = np.ma.asanyarray(data)

    if mask is not None and mask is not np.ma.nomask:
        mask = np.asanyarray(mask)
        if data.shape != mask.shape:
            raise ValueError('data and mask must have the same shape.')
        data.mask |= mask

    if np.any(~np.isfinite(data)):
        data = np.ma.masked_invalid(data)
        warnings.warn(
            'Input data contains non-finite values (e.g., NaN or '
            'infs) that were automatically masked.', AstropyUserWarning)

    if error is not None:
        error = np.ma.masked_invalid(error)
        if data.shape != error.shape:
            raise ValueError('data and error must have the same shape.')
        data.mask |= error.mask
        weights = 1.0 / error.clip(min=1.e-30)
    else:
        weights = np.ones(data.shape)

    if np.ma.count(data) < 7:
        raise ValueError('Input data must have a least 7 unmasked values to '
                         'fit a 2D Gaussian plus a constant.')

    # assign zero weight to masked pixels
    if data.mask is not np.ma.nomask:
        weights[data.mask] = 0.

    mask = data.mask
    data.fill_value = 0.
    data = data.filled()

    # Subtract the minimum of the data as a rough background estimate.
    # This will also make the data values positive, preventing issues with
    # the moment estimation in data_properties. Moments from negative data
    # values can yield undefined Gaussian parameters, e.g., x/y_stddev.
    props = data_properties(data - np.min(data), mask=mask)

    constant_init = 0.  # subtracted data minimum above
    g_init = (Const2D(constant_init) +
              Gaussian2D(amplitude=np.ptp(data),
                         x_mean=props.xcentroid,
                         y_mean=props.ycentroid,
                         x_stddev=props.semimajor_sigma.value,
                         y_stddev=props.semiminor_sigma.value,
                         theta=props.orientation.value))
    fitter = LevMarLSQFitter()
    y, x = np.indices(data.shape)
    gfit = fitter(g_init, x, y, data, weights=weights)
    return np.array([gfit.x_mean_1.value, gfit.y_mean_1.value])
コード例 #6
0
ファイル: fiber_utils.py プロジェクト: grzeimann/Remedy
def detect_sources(dx,
                   dy,
                   spec,
                   err,
                   mask,
                   def_wave,
                   psf,
                   ran,
                   scale,
                   log,
                   spec_res=5.6,
                   thresh=5.):
    '''
    Detection algorithm
    
    Parameters
    ----------
    dx : 1d numpy array
        delta_x or delta_ra in " for each fiber compared to a given 0, 0
    dy : 1d numpy array
        delta_y or delta_dec in " for each fiber compared to a given 0, 0
    spec : 2d numpy array
        spectrum (sky-subtracted) for each fiber
    err : 2d numpy array
        error spectrum for each fiber
    chi : 2d numpy array
        chi2 for each spectrum compared to a fiber profile when extracted
    ftf : 2d numpy array
        initial fiber to fiber for each fiber
    adj : 2d numpy array
        adjusted fiber to fiber for better sky subtraction
    mask : 2d numpy array
        masked spectrum values for bad fiber to fiber, bad pixels, or bad chi2
    seeing : float
        spatial seeing fwhm
    spec_res : float
        spectral resolution in pixels (2A) and refers to radius not fwhm
    
    Returns
    -------
    '''
    N1 = int((ran[1] - ran[0]) / scale) + 1
    N2 = int((ran[3] - ran[2]) / scale) + 1
    gridx, gridy = np.meshgrid(np.linspace(ran[0], ran[1], N1),
                               np.linspace(ran[2], ran[3], N2))
    T = np.array([psf[1].ravel(), psf[2].ravel()]).swapaxes(0, 1)
    I = LinearNDInterpolator(T, psf[0].ravel(), fill_value=0.0)
    cube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave)))
    errcube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave)))
    origcube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave)))
    origerrcube = np.zeros((gridx.shape[0], gridx.shape[1], len(def_wave)))
    mask = ~np.isfinite(spec)
    G = Gaussian1DKernel(spec_res / 2.35 / (def_wave[1] - def_wave[0]))
    cont = get_continuum(spec, nbins=25)
    S = spec - cont
    for i in np.arange(gridx.shape[0]):
        for j in np.arange(gridx.shape[1]):
            xg = gridx[i, j]
            yg = gridy[i, j]
            sel = np.where(np.sqrt((dx - xg)**2 + (dy - yg)**2) <= 4.0)[0]
            weights = I(dx[sel] - xg, dy[sel] - yg)
            norm = weights.sum()
            weights /= norm
            imask = ~(mask[sel])
            X = S[sel] * 1.
            X[mask[sel]] = 0.0
            Y = err[sel] * 1.
            Y[mask[sel]] = 0.0
            origcube[i,
                     j, :] = np.sum(weights[:, np.newaxis] * X * imask / Y**2,
                                    axis=0)
            origerrcube[i, j, :] = np.sqrt(
                (np.sum(weights[:, np.newaxis]**2 * imask / Y**2, axis=0)))
            w = np.sum(weights[:, np.newaxis] * imask, axis=0) * norm
            cube[i, j, w < 0.7] = np.nan
            errcube[i, j, w < 0.7] = np.nan
            WS = manual_convolution(origcube[i, j], G)
            WE = manual_convolution(origerrcube[i, j], G, error=True)
            #WS = origcube[i, j]
            #WE = origerrcube[i, j]
            cube[i, j, :] = WS
            errcube[i, j, :] = WE
    Y = cube / errcube
    bl, bm = biweight(Y.ravel(), calc_std=True)
    log.info('Error Correction: %0.2f' % bm)
    #Y[:] /= bm
    test = Y > thresh
    L = np.zeros((0, 7))
    K = np.zeros((0, len(def_wave), 3))
    log.info('Number of >thresh spaxels found: %i' % test.sum())
    if (test.sum() > 0) * (test.sum() < 2000):
        ids = np.where(test)
        Z = Y[ids[0], ids[1], ids[2]]
        sid = np.argsort(Z)[::-1]
        ids_sorted = (ids[0][sid], ids[1][sid], ids[2][sid])
        z = np.array([
            gridx[ids_sorted[0], ids_sorted[1]] * 3.,
            gridy[ids_sorted[0], ids_sorted[1]] * 3., def_wave[ids_sorted[2]]
        ]).swapaxes(0, 1)
        SN = Y[ids_sorted[0], ids_sorted[1], ids_sorted[2]]
        if z.shape[0] == 1:
            z = np.vstack([z, z])
            SN = np.hstack([SN, SN])
        clustering = AgglomerativeClustering(n_clusters=None,
                                             compute_full_tree=True,
                                             distance_threshold=50,
                                             linkage='complete').fit(z)

        z = np.array([
            gridx[ids_sorted[0], ids_sorted[1]],
            gridy[ids_sorted[0], ids_sorted[1]], def_wave[ids_sorted[2]]
        ]).swapaxes(0, 1)
        if z.shape[0] == 1:
            z = np.vstack([z, z])
        US = np.unique(clustering.labels_)
        log.info('Number of sources found: %i' % len(US))
        L = np.zeros((len(US), 7))
        K = np.zeros((len(US), len(def_wave), 3))
        fitter = LevMarLSQFitter()
        for i, ui in enumerate(US):
            sel = clustering.labels_ == ui
            L[i, 0] = np.mean(z[sel, 0])
            L[i, 1] = np.mean(z[sel, 1])
            L[i, 2] = np.mean(z[sel, 2])
            L[i, 6] = np.max(SN[sel])
            dsel = np.sqrt((gridx - L[i, 0])**2 + (gridy - L[i, 1])**2) < 2.5
            wi = int(np.interp(L[i, 2], def_wave, np.arange(len(def_wave))))
            x = gridx[dsel]
            y = gridy[dsel]
            v = cube[:, :, wi][dsel]
            fsel = np.isfinite(v)
            xc = np.sum(x[fsel] * v[fsel]) / np.sum(v[fsel])
            yc = np.sum(y[fsel] * v[fsel]) / np.sum(v[fsel])
            sel = np.where(np.sqrt((dx - xc)**2 + (dy - yc)**2) <= 4.0)[0]
            weights = I(dx[sel] - xc, dy[sel] - yc)
            imask = ~(mask[sel])
            X = S[sel] * 1.
            X[mask[sel]] = 0.0
            Y = err[sel] * 1.
            Y[mask[sel]] = 0.0
            spatial_spec = np.sum(
                weights[:, np.newaxis] * X * imask / Y**2, axis=0) / np.sum(
                    weights[:, np.newaxis]**2 * imask / Y**2, axis=0)
            spatial_spec_err = np.sqrt(
                np.sum(weights[:, np.newaxis] * imask, axis=0) /
                np.sum(weights[:, np.newaxis]**2 * imask / Y**2, axis=0))
            X = spec[sel] * 1.
            X[mask[sel]] = 0.0
            Y = err[sel] * 1.
            Y[mask[sel]] = 0.0
            spatial_spec_or = np.sum(
                weights[:, np.newaxis] * X * imask / Y**2, axis=0) / np.sum(
                    weights[:, np.newaxis]**2 * imask / Y**2, axis=0)
            spatial_spec_err_or = np.sqrt(
                np.sum(weights[:, np.newaxis] * imask, axis=0) /
                np.sum(weights[:, np.newaxis]**2 * imask / Y**2, axis=0))
            wsel = np.where(np.abs(def_wave - L[i, 2]) <= 8.)[0]
            if (~np.isfinite(spatial_spec[wsel])).sum() > 0.:
                L[i, :] = 0.0
                continue
            G = Gaussian1D(mean=L[i, 2], stddev=spec_res / 2.35)
            G.stddev.bounds = (4. / 2.35, 8. / 2.35)
            G.mean.bounds = (L[i, 2] - 4., L[i, 2] + 4.)
            fit = fitter(G, def_wave[wsel], spatial_spec[wsel])
            wc = fit.mean.value
            csel = np.where(np.abs(def_wave - L[i, 2]) <= 10.)[0]
            chi2 = (1. / (len(csel) - 3.) * np.sum(
                (fit(def_wave[csel]) - spatial_spec[csel])**2 /
                spatial_spec_err[csel]**2))
            L[i, 0] = xc
            L[i, 1] = yc
            L[i, 2] = wc
            L[i, 3] = fit.stddev.value * 2.35
            L[i, 4] = chi2
            L[i, 5] = fit.amplitude.value * 2.

            K[i, :, 0] = spatial_spec_or
            K[i, :, 1] = spatial_spec_err_or
            K[i, :, 2] = fit(def_wave)

    return cube, errcube, origcube, origerrcube, L, K
コード例 #7
0
def main():

    # setup and parse the command line
    parser = initialize_parser()
    args = parser.parse_args()

    # read in the observed spectrum
    # assumed to be astropy table compatibile and include units
    specfile = args.spectrumfile
    outputname = specfile.split(".")[0]
    if not os.path.isfile(specfile):
        pack_path = pkg_resources.resource_filename("pahfit", "data/")
        test_specfile = "{}/{}".format(pack_path, specfile)
        if os.path.isfile(test_specfile):
            specfile = test_specfile
        else:
            raise ValueError(
                "Input spectrumfile {} not found".format(specfile))

    # get the table format (from extension of filename)
    tformat = specfile.split(".")[-1]
    if tformat == "ecsv":
        tformat = "ascii.ecsv"
    obs_spectrum = Table.read(specfile, format=tformat)
    obs_x = obs_spectrum["wavelength"].to(u.micron, equivalencies=u.spectral())
    obs_y = obs_spectrum["flux"].to(u.Jy,
                                    equivalencies=u.spectral_density(obs_x))
    obs_unc = obs_spectrum["sigma"].to(u.Jy,
                                       equivalencies=u.spectral_density(obs_x))

    # strip units as the observed spectrum is in the internal units
    obs_x = obs_x.value
    obs_y = obs_y.value
    weights = 1.0 / obs_unc.value

    # read in the pack file
    packfile = args.packfile
    if not os.path.isfile(packfile):
        pack_path = pkg_resources.resource_filename("pahfit", "packs/")
        test_packfile = "{}/{}".format(pack_path, packfile)
        if os.path.isfile(test_packfile):
            packfile = test_packfile
        else:
            raise ValueError("Input packfile {} not found".format(packfile))

    pmodel = PAHFITBase(obs_x,
                        obs_y,
                        estimate_start=args.estimate_start,
                        filename=packfile)

    # pick the fitter
    fit = LevMarLSQFitter()

    # fit
    obs_fit = fit(
        pmodel.model,
        obs_x,
        obs_y,
        weights=weights,
        maxiter=200,
        epsilon=1e-10,
        acc=1e-10,
    )
    print(fit.fit_info["message"])

    # save results to fits file
    pmodel.save(obs_fit, outputname, args.saveoutput)

    # plot result
    fontsize = 18
    font = {"size": fontsize}
    mpl.rc("font", **font)
    mpl.rc("lines", linewidth=2)
    mpl.rc("axes", linewidth=2)
    mpl.rc("xtick.major", width=2)
    mpl.rc("ytick.major", width=2)

    fig, ax = plt.subplots(figsize=(15, 10))

    pmodel.plot(ax, obs_x, obs_y, obs_fit)

    ax.set_yscale("linear")
    ax.set_xscale("log")

    # use the whitespace better
    fig.tight_layout()

    # show
    if args.showplot:
        plt.show()
    # save (always)
    fig.savefig("{}.{}".format(outputname, args.savefig))
コード例 #8
0
def __fit_PSF(image_file, mask_file=None, nstars=40,                
              thresh_sigma=5.0, pixelmin=20, elongation_lim=1.4, area_max=500,             
              cutout=35, 
              astrom_sigma=5.0, psf_sigma=5.0, alim=10000, clean=True, 
              source_lim=None, 
              write_ePSF=False, ePSF_output=None, 
              plot_ePSF=True, ePSF_plotname=None, 
              plot_residuals=False, resid_plotname=None,
              verbose=False):
    """    
    Input: 
        general:
        - filename for a **BACKGROUND-SUBTRACTED** image
        - filename for a mask image (optional; default None)
        - maximum number of stars to use (optional; default 40; set to None
          to impose no limit)
          
        source detection:
        - sigma threshold for source detection with image segmentation 
          (optional; default 5.0)
        - *minimum* number of isophotal pixels (optional; default 20)
        - *maximum* allowed elongation for sources found by image segmentation 
          (optional; default 1.4)
        - *maximum* allowed area for sources found by image segmentation 
          (optional; default 500 pix**2)
        - cutout size around each star in pix (optional; default 35 pix; must 
          be ODD, rounded down if even)
        
        astrometry.net:
        - sigma threshold for astrometry.net source detection image (optional; 
          default 5.0)
        - sigma of the Gaussian PSF of the image (optional; default 5.0)
        - maximum allowed source area in pix**2 for astrometry.net for 
          deblending (optional; default 10000; only relevant if no source list 
          file is provided)
        - whether to remove files output by image2xy once finished with them 
          (optional; default True)

        misc:
        - limit on number of sources to fit with ePSF (optional; default None 
          which imposes no limit)        
                
        writing, plotting, verbosity:
        - whether to write the derived ePSF to a fits file (optional; default 
          False)
        - name for output ePSF fits file (optional; default set below)
        - whether to plot the derived ePSF (optional; default True)
        - name for output ePSF plot (optional; default set below)
        - whether to plot the residuals of the iterative PSF fitting (optional;
          default False)
        - name for output residuals plot (optional; default set below)
        - be verbose (optional; default False)
    
    Uses image segmentation to obtain a list of sources in the image with their 
    x, y coordinates. Uses EPSFBuilder to empirically obtain the ePSF of these 
    stars. Optionally writes and/or plots the obtaind ePSF. Finally, uses 
    astrometry.net to find all sources in the image, and fits them with the 
    empirically obtained ePSF.
    
    The ePSF obtained here should NOT be used in convolutions. Instead, it can 
    serve as a tool for estimating the seeing of an image. 
    
    Output: table containing the coordinates and instrumental magnitudes of the 
    detected, ePSF-fit sources
    """

    # load in data 
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file) 
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"
    pixscale = image_header["PIXSCAL1"]
    
    ### SOURCE DETECTION

    ### use image segmentation to find sources with an area > pixelmin pix**2 
    ### which are above the threshold sigma*std 
    image_data = fits.getdata(image_file) # subfile data
    image_data = np.ma.masked_where(image_data==0.0, 
                                    image_data) # mask bad pixels
    
    ## build an actual mask
    mask = (image_data==0)
    if mask_file:
        mask = np.logical_or(mask, fits.getdata(mask_file))

    ## set detection standard deviation
    try:
        std = image_header["BKGSTD"] # header written by amakihi.bkgsub fn
    except KeyError:
        # make crude source mask, get standard deviation of background
        source_mask = make_source_mask(image_data, snr=3, npixels=5, 
                                       dilate_size=15, mask=mask)
        final_mask = np.logical_or(mask, source_mask)
        std = np.std(np.ma.masked_where(final_mask, image_data))
    
    ## use the segmentation image to get the source properties 
    # use <mask>, which does not mask sources
    segm = detect_sources(image_data, thresh_sigma*std, npixels=pixelmin,
                          mask=mask) 
    cat = source_properties(image_data, segm, mask=mask)

    ## get the catalog and coordinates for sources
    try:
        tbl = cat.to_table()
    except ValueError:
        print("SourceCatalog contains no sources. Exiting.")
        return
    
    # restrict elongation and area to obtain only unsaturated stars 
    tbl = tbl[(tbl["elongation"] <= elongation_lim)]
    tbl = tbl[(tbl["area"].value <= area_max)]

    sources = Table() # build a table 
    sources['x'] = tbl['xcentroid'] # for EPSFBuilder 
    sources['y'] = tbl['ycentroid']
    sources['flux'] = tbl['source_sum'].data/tbl["area"].data   
    sources.sort("flux")
    sources.reverse()
    
    if nstars:
        sources = sources[:min(nstars, len(sources))]

    ## setup: get WCS coords for all sources 
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"],
                                                    sources["y"], 1)
     
    ## mask out edge sources: 
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:
        rad_limit = xsize/2.0
        dist_to_center = np.sqrt((sources['x']-xsize/2.0)**2 + 
                                 (sources['y']-ysize/2.0)**2)
        dmask = dist_to_center <= rad_limit
        sources = sources[dmask]
    else: 
        x_lims = [int(0.05*xsize), int(0.95*xsize)] 
        y_lims = [int(0.05*ysize), int(0.95*ysize)]
        dmask = (sources['x']>x_lims[0]) & (sources['x']<x_lims[1]) & (
                 sources['y']>y_lims[0]) & (sources['y']<y_lims[1])
        sources = sources[dmask]
        
    ## empirically obtain the effective Point Spread Function (ePSF)  
    nddata = NDData(image_data) # NDData object
    if mask_file: # supply a mask if needed 
        nddata.mask = fits.getdata(mask_file)
    if cutout%2 == 0: # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout) # extract stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars) # no. of stars used in ePSF building
    
    if nstars_epsf == 0:
        print("\nNo valid sources were found to build the ePSF with the given"+
              " conditions. Exiting.")
        return
    
    if verbose:
        print(f"\n{nstars_epsf} stars used in building the ePSF")
        
    start = timer()
    epsf_builder = EPSFBuilder(oversampling=1, maxiters=7, # build it
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data
    
    end = timer() # timing 
    time_elaps = end-start
    
    # print ePSF FWHM, if desired
    print(f"Time required for ePSF building {time_elaps:.2f} s\n")
    if verbose: 
        ePSF_FWHM(epsf_data, True)

    epsf_hdu = fits.PrimaryHDU(data=epsf_data)
    if write_ePSF: # write, if desired
        if not(ePSF_output):
            ePSF_output = image_file.replace(".fits", "_ePSF.fits")
            
        epsf_hdu.writeto(ePSF_output, overwrite=True, output_verify="ignore")
    
    psf_model = epsf # set the model
    psf_model.x_0.fixed = True # fix centroids (known beforehand) 
    psf_model.y_0.fixed = True
 
    ### USE ASTROMETRY.NET TO FIND SOURCES TO FIT  
    # -b --> no background-subtraction
    # -O --> overwrite
    # -p <astrom_sigma> --> signficance
    # -w <psf_sigma> --> estimated PSF sigma 
    # -m <alim> --> max object size for deblending is <alim>      
    options = f"-O -b -p {astrom_sigma} -w {psf_sigma}"
    options += f" -m {alim}"
    run(f"image2xy {options} {image_file}", shell=True)
    image_sources_file = image_file.replace(".fits", ".xy.fits")
    image_sources = fits.getdata(image_sources_file)
    if clean:
        run(f"rm {image_sources_file}", shell=True) # this file is not needed

    print(f'\n{len(image_sources)} stars at >{astrom_sigma}'+
          f' sigma found in image {re.sub(".*/", "", image_file)}'+
          ' with astrometry.net')   

    astrom_sources = Table() # build a table 
    astrom_sources['x_mean'] = image_sources['X'] # for BasicPSFPhotometry
    astrom_sources['y_mean'] = image_sources['Y']
    astrom_sources['flux'] = image_sources['FLUX']
    
    # initial guesses for centroids, fluxes
    pos = Table(names=['x_0', 'y_0','flux_0'], 
                data=[astrom_sources['x_mean'], astrom_sources['y_mean'], 
                      astrom_sources['flux']]) 

    ### FIT THE ePSF TO ALL DETECTED SOURCES 
    start = timer() # timing the fit 
    
    # sources separated by less than this critical separation are grouped 
    # together when fitting the PSF via the DAOGROUP algorithm
    sigma_psf = 2.0 # 2 pix
    crit_sep = 2.0*sigma_psf*gaussian_sigma_to_fwhm  # twice the PSF FWHM
    daogroup = DAOGroup(crit_sep) 

    # an astropy fitter, does Levenberg-Marquardt least-squares fitting
    fitter_tool = LevMarLSQFitter()
    
    # if we have a limit on the number of sources to fit
    if source_lim:
        try: 
            import random # pick a given no. of random sources 
            source_rows = random.choices(astrom_sources, k=source_lim)
            astrom_sources = Table(names=['x_mean', 'y_mean', 'flux'], 
                                   rows=source_rows)
            pos = Table(names=['x_0', 'y_0','flux_0'], 
                        data=[astrom_sources['x_mean'], 
                              astrom_sources['y_mean'], 
                              astrom_sources['flux']])
            
            
        except IndexError:
            print("The input source limit exceeds the number of sources"+
                  " detected by astrometry, so no limit is imposed.\n")
    
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                            bkg_estimator=None, # bg subtract already done
                            psf_model=psf_model,
                            fitter=fitter_tool,
                            fitshape=(11,11))
    
    result_tab = photometry(image=image_data, init_guesses=pos) # results
    residual_image = photometry.get_residual_image() # residuals of PSF fit
    residual_image = np.ma.masked_where(mask, residual_image)
    residual_image.fill_value = 0 # set to zero
    residual_image = residual_image.filled()

    
    end = timer() # timing 
    time_elaps = end - start
    print(f"Time required fit ePSF to all sources {time_elaps:.2f} s\n")
    
    # include WCS coordinates
    pos["ra"], pos["dec"] = w.all_pix2world(pos["x_0"], pos["y_0"], 1)
    result_tab.add_column(pos['ra'])
    result_tab.add_column(pos['dec'])
    
    # mask out negative flux_fit values in the results 
    mask_flux = (result_tab['flux_fit'] >= 0.0)
    psf_sources = result_tab[mask_flux] # PSF-fit sources 
    
    # compute magnitudes and their errors and add to the table
    # error = (2.5/(ln(10)*flux_fit))*flux_unc
    mag_fit = -2.5*np.log10(psf_sources['flux_fit']) # instrumental mags
    mag_fit.name = 'mag_fit'
    mag_unc = 2.5/(psf_sources['flux_fit']*np.log(10))
    mag_unc *= psf_sources['flux_unc']
    mag_unc.name = 'mag_unc' 
    psf_sources['mag_fit'] = mag_fit
    psf_sources['mag_unc'] = mag_unc
    
    # mask entries with large magnitude uncertainties 
    mask_unc = psf_sources['mag_unc'] < 0.4
    psf_sources = psf_sources[mask_unc]
    
    if plot_ePSF: # if we wish to see the ePSF
        plt.figure(figsize=(10,9))
        plt.imshow(epsf_data, origin='lower', aspect=1, cmap='magma',
                   interpolation="nearest")
        plt.xlabel("Pixels", fontsize=16)
        plt.ylabel("Pixels", fontsize=16)
        plt.title("Effective Point-Spread Function (1 pixel = "
                                                    +str(pixscale)+
                                                    '")', fontsize=16)
        plt.colorbar(orientation="vertical", fraction=0.046, pad=0.08)
        plt.rc("xtick",labelsize=16) # not working?
        plt.rc("ytick",labelsize=16)
        
        if not(ePSF_plotname):
            ePSF_plotname = image_file.replace(".fits", "_ePSF.png")
        plt.savefig(ePSF_plotname, bbox_inches="tight")
        plt.close()
    
    if plot_residuals: # if we wish to see a plot of the residuals
        if "WIRCam" in instrument:
            plt.figure(figsize=(10,9))
        else:
            plt.figure(figsize=(12,14))
        ax = plt.subplot(projection=w)
        plt.imshow(residual_image, cmap='magma', aspect=1, 
                   interpolation='nearest', origin='lower')
        plt.xlabel("RA (J2000)", fontsize=16)
        plt.ylabel("Dec (J2000)", fontsize=16)
        plt.title("PSF residuals", fontsize=16)
        cb = plt.colorbar(orientation='vertical', fraction=0.046, pad=0.08) 
        cb.set_label(label="ADU", fontsize=16)
        ax.coords["ra"].set_ticklabel(size=15)
        ax.coords["dec"].set_ticklabel(size=15)
        
        if not(resid_plotname):
            resid_plotname = image_file.replace(".fits", "_ePSFresiduals.png")
        plt.savefig(resid_plotname, bbox_inches="tight")
        plt.close()
    
    return psf_sources     
コード例 #9
0
    parhist[band][len(source_pars)] = []
    result4b = lmfit.minimize(residual,
                              source_pars,
                              epsfcn=epsfcn,
                              kws=dict(data=data_nodisk, model=ptsrcmodel))
    print(
        "Smoothed linear fit parameters with horizontally smeared point source (many pars held fixed):"
    )
    result4b.params.pretty_print()
    #print("red Chi^2: {0:0.3g}".format(result4.chisqr / (ndata - result4.nvarys)))
    print("red Chi^2: {0:0.3g}".format(result4b.redchi))
    print(result4b.message)
    print()

    y_, x_ = np.indices(data_nodisk.shape)
    apylmfit = LevMarLSQFitter()
    # the weights don't matter.
    # I feel like they should, but maybe there's some theorem I'm not
    # appreciating that says that the parameter constraints are independent of
    # the errors on the data...
    # if I'm wrong, this is a bug in astropy.
    astropy_fit_results = apylmfit(
        Gaussian2D(
            amplitude=source_pars['ptsrcamp'].value,
            x_mean=source_pars['ptsrcx'].value,
            y_mean=source_pars['ptsrcy'].value,
            x_stddev=2,
        ),
        x_,
        y_,
        data_nodisk,
コード例 #10
0
def epsf_phot(target, centroided_sources, plots=False):
    def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
        """
        Convert hours, minutes, seconds, and microseconds to fractional days.
        
        """
        days = sec + (micro / 1.e6)
        days = min + (days / 60.)
        days = hour + (days / 60.)
        return days / 24.
    
    def date_to_jd(year,month,day):
        """
        Convert a date to Julian Day.
        
        Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 
            4th ed., Duffet-Smith and Zwart, 2011.
        
        """
        if month == 1 or month == 2:
            yearp = year - 1
            monthp = month + 12
        else:
            yearp = year
            monthp = month
        
        # this checks where we are in relation to October 15, 1582, the beginning
        # of the Gregorian calendar.
        if ((year < 1582) or
            (year == 1582 and month < 10) or
            (year == 1582 and month == 10 and day < 15)):
            # before start of Gregorian calendar
            B = 0
        else:
            # after start of Gregorian calendar
            A = math.trunc(yearp / 100.)
            B = 2 - A + math.trunc(A / 4.)
            
        if yearp < 0:
            C = math.trunc((365.25 * yearp) - 0.75)
        else:
            C = math.trunc(365.25 * yearp)
            
        D = math.trunc(30.6001 * (monthp + 1))
        
        jd = B + C + D + day + 1720994.5
        
        return jd

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path/('Objects/'+short_name+'/reduced/')
    reduced_filenames = natsort.natsorted([x.name for x in reduced_path.glob('*.fits')])
    reduced_files = np.array([reduced_path/i for i in reduced_filenames])

    centroided_sources.columns = centroided_sources.columns.str.strip()
    source_names = natsort.natsorted(list(set([i.split(' ')[0]+' '+i.split(' ')[1] for i in centroided_sources.keys() if (i[0] == '2') or (i[0] == 'R')])))
    
    #Create output plot directories for each source.
    if plots:
        for name in source_names:
            #If the folders are already there, delete them. 
            source_path = (pines_path/('Objects/'+short_name+'/psf_phot/'+name+'/'))
            if source_path.exists():
                shutil.rmtree(source_path)
            #Create folders.
            os.mkdir(source_path)

    #Declare a new dataframe to hold the information for all targets for this .
    columns = ['Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing']
    for i in range(0, len(source_names)):
        columns.append(source_names[i]+' Flux')
        columns.append(source_names[i]+' Flux Error')
    psf_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
    output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

    for i in range(0, len(reduced_files)):
        #Read in image data/header. 
        file = reduced_files[i]
        data = fits.open(file)[0].data
        header = fits.open(file)[0].header
        print('{}, image {} of {}.'.format(file.name, i+1, len(reduced_files)))

        #Read in some supporting information.
        log_path = pines_path/('Logs/'+file.name.split('.')[0]+'_log.txt')
        log = pines_log_reader(log_path)
        date_obs = header['DATE-OBS']
        #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds. 
        if len(date_obs.split(':')[-1].split('.')[0]) == 3:
            date_obs = date_obs.split(':')[0] + ':' + date_obs.split(':')[1] + ':' + date_obs.split(':')[-1][1:]
        #Keep a try/except clause here in case other unknown DATE-OBS formats pop up. 
        try:
            date = datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f')
        except:
            print('Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.')
            pdb.set_trace()
        
        days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
        jd = date_to_jd(date.year,date.month,days)
        psf_df['Filename'][i] = file.name
        psf_df['Time UT'][i] = header['DATE-OBS']
        psf_df['Time JD'][i] = jd
        psf_df['Airmass'][i] = header['AIRMASS']
        psf_df['Seeing'][i] = log['X seeing'][np.where(log['Filename'] == file.name.split('_')[0]+'.fits')[0][0]]
        
        #Read in source centroids for this image
        x = np.zeros(len(source_names))
        y = np.zeros(len(source_names))
        for j in range(len(source_names)):
            source = source_names[j]
            x[j] = centroided_sources[source+' X'][i]
            y[j] = centroided_sources[source+' Y'][i]

        #Extract pixel cutouts of our stars, so let’s explicitly exclude stars that are too close to the image boundaries (because they cannot be extracted).
        size = 13
        hsize = (size - 1) / 2
        #mask = ((x > hsize) & (x < (data.shape[1] -1 - hsize)) & (y > hsize) & (y < (data.shape[0] -1 - hsize)) & (y > 100) & (y < 923))

        #Create table of good star positions
        stars_tbl = Table()
        stars_tbl['x'] = x
        stars_tbl['y'] = y
        
        #Subtract background (star cutouts from which we build the ePSF must have background subtracted).
        mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=2.)  
        data -= median_val
        
        #Replace nans in data using Gaussian. 
        # kernel = Gaussian2DKernel(x_stddev=0.5)
        # data = interpolate_replace_nans(data, kernel)

        #The extract_stars() function requires the input data as an NDData object. 
        nddata = NDData(data=data)  

        #Extract star cutouts.
        stars = extract_stars(nddata, stars_tbl, size=size)  
                        

        #Plot. 
        nrows = 5
        ncols = 5
        fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 10), squeeze=True)
        ax = ax.ravel()
        for j in range(len(stars)):           
            norm = simple_norm(stars[j], 'log', percent=99.)
            ax[j].imshow(stars[j].data, norm=norm, origin='lower', cmap='viridis')

        pdb.set_trace()

        #Construct the ePSF using the star cutouts.
        epsf_fitter = EPSFFitter()
        epsf_builder = EPSFBuilder(maxiters=4, progress_bar=False, fitter=epsf_fitter)   

        try:
            epsf, fitted_stars = epsf_builder(stars)
            output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

            for j in range(len(stars)):
                star = stars[j]
                source_name = source_names[j]
                sigma_psf = 1.85

                dtype = [('x_0', 'f8'), ('y_0', 'f8')]
                pos = Table(data=np.zeros(1, dtype=dtype))
                source_x = stars_tbl['x'][j]
                source_y = stars_tbl['y'][j]
                pos['x_0'] = source_x - int(source_x - size/2 + 1)
                pos['y_0'] = source_y - int(source_y - size/2 + 1)

                daogroup = DAOGroup(4.0*sigma_psf*gaussian_sigma_to_fwhm)
                mmm_bkg = MMMBackground()
                photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=epsf,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(13,13),
                                    aperture_radius=4.)
                

                result_tab = photometry(image=star, init_guesses=pos)
                residual_image = photometry.get_residual_image()
                psf_df[source_name+' Flux'][i] = result_tab['flux_fit'][0]
                psf_df[source_name+' Flux Error'][i] = result_tab['flux_unc'][0]

                if plots:
                    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,4))
                    im = ax[0].imshow(star, origin='lower')
                    divider = make_axes_locatable(ax[0])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im, cax=cax, orientation='vertical')
                    ax[0].plot(result_tab['x_fit'][0], result_tab['y_fit'][0], 'rx')
                    ax[0].set_title('Data')

                    im2 = ax[1].imshow(epsf.data, origin='lower')
                    ax[1].set_title('EPSF Model')
                    divider = make_axes_locatable(ax[1])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im2, cax=cax, orientation='vertical')

                    im3 = ax[2].imshow(residual_image, origin='lower')
                    ax[2].set_title('Residual Image')
                    divider = make_axes_locatable(ax[2])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im3, cax=cax, orientation='vertical')
                    plt.suptitle(source_name+'\n'+reduced_files[i].name+', image '+str(i+1)+' of '+str(len(reduced_files)))
                    plt.subplots_adjust(wspace=0.5, top=0.95, bottom = 0.05)
                    plot_output_name = pines_path/('Objects/'+short_name+'/psf_phot/'+source_name+'/'+str(i).zfill(4)+'.jpg')
                    plt.savefig(plot_output_name)
                    plt.close()
        except:
            print('')
            print('EPSF BUILDER FAILED, SKIPPING IMAGE.')
            print('')
        #Plot the ePSF. 
        # plt.figure()
        # norm = simple_norm(epsf.data, 'log', percent=99.)
        # plt.imshow(epsf.data, norm=norm, origin='lower', cmap='viridis')
        # cb = plt.colorbar()
        # plt.tight_layout()   

        

    print('Saving psf photometry output to {}.'.format(output_filename))
    with open(output_filename, 'w') as f:
        for j in range(len(psf_df)):
            if j == 0:
                f.write('{:>21s}, {:>22s}, {:>17s}, {:>7s}, {:>7s}, '.format('Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing'))
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        f.write('{:>20s}, {:>26s}, '.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))
                    else:
                        f.write('{:>20s}, {:>26s}\n'.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))

            format_string = '{:21s}, {:22s}, {:17.9f}, {:7.2f}, {:7.1f}, '

            #If the seeing value for this image is 'nan' (a string), convert it to a float. 
            #TODO: Not sure why it's being read in as a string, fix that. 
            if type(psf_df['Seeing'][j]) == str:
                psf_df['Seeing'][j] = float(psf_df['Seeing'][j])

            #Do a try/except clause for writeout, in case it breaks in the future. 
            try:
                f.write(format_string.format(psf_df['Filename'][j], psf_df['Time UT'][j], psf_df['Time JD'][j], psf_df['Airmass'][j], psf_df['Seeing'][j]))
            except:
                print('Writeout failed! Inspect quantities you are trying to write out.')
                pdb.set_trace()
            for i in range(len(source_names)):                    
                if i != len(source_names) - 1:
                    format_string = '{:20.11f}, {:26.11f}, '
                else:
                    format_string = '{:20.11f}, {:26.11f}\n'
                
                f.write(format_string.format(psf_df[source_names[i]+' Flux'][j], psf_df[source_names[i]+' Flux Error'][j]))
    print('')    
    return
           
コード例 #11
0
        def SubmitEvent(self):

            #Not a fan of globals but this is the easiest way to grab the file location
            global fileLocation
            #sigma_psf = 2.88
            #Grab the Sigma from the Entry box in the GUI
            SigmaPSF = SigmaPSFentry.get()
            #Turn the string into a float
            sigma_psf = float(SigmaPSF)
            #Grab the number of iterations from Entry box in GUI
            N_iters1 = nitersEntry.get()
            #Turn the string into a float
            N_iters = float(N_iters1)
            #Test cases to make sure that information was flowing from the GUI to the program
            #print(SigmaPSF)
            #print(N_iters)

            #Open the file as a fits (allows us to handle it) then turn that into readable data.
            with fits.open(fileLocation) as hdul:
                image = hdul[0].data

            #automatically gathered information needed to run the Star Finder
            bkgrms = MADStdBackgroundRMS()
            std = bkgrms(image)

            #Find the stars
            iraffind = IRAFStarFinder(threshold=3.5 * std,
                                      fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                                      minsep_fwhm=0.01,
                                      roundhi=5.0,
                                      roundlo=-5.0,
                                      sharplo=0.0,
                                      sharphi=2.0)
            #Group the stars
            daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)

            #More automatically gathered info needed for IS-PSFPhotometry to take places
            mmm_bkg = MMMBackground()
            fitter = LevMarLSQFitter()
            #Grabbed from the user input
            psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
            #Run IS-PSFPhotometry
            photometry = IterativelySubtractedPSFPhotometry(
                finder=iraffind,
                group_maker=daogroup,
                bkg_estimator=mmm_bkg,
                psf_model=psf_model,
                fitter=LevMarLSQFitter(),
                niters=N_iters,
                fitshape=(11, 11))
            #Do photometry on the image
            result_tab = photometry(image=image)
            #grab the resiudal image
            residual_image = photometry.get_residual_image()

            #Get the results of the photometry and print the aspects we want.
            phot_results = photometry(image)
            with open("output.txt", "w") as text_file:
                print(phot_results['x_fit', 'y_fit', 'flux_fit'],
                      file=text_file)
            print(phot_results['x_fit', 'y_fit', 'flux_fit'])
            print("Sum of pixels: {}".format(sum(sum(residual_image))))
            #Plot images made#
            #Start by creating plots.
            plt.subplot(1, 5, 1)
            #Show the first plot (which is just the raw image)
            plt.imshow(image,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('Raw')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
            #Create the second plot
            plt.subplot(1, 5, 2)
            #Show the residual_image
            plt.imshow(residual_image,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('PSF')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
            #Draw in the sum of pixels.
            plt.text(0,
                     65,
                     "Sum of pixels: {}".format(sum(sum(residual_image))),
                     fontsize=7)
            #Create the third plot which is the subtracted images combined.
            sb = image - residual_image
            plt.subplot(1, 5, 3)
            plt.imshow(sb,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('PSF-S')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)

            with open("AP_RI.txt", "w") as f:
                for _ in range(len(residual_image)):
                    f.write(str(residual_image[_]))
            with open("AP_BS.txt", "w") as f:
                for _ in range(len(sb)):
                    f.write(str(sb[_]))

            print("Starting creation of CSV")
            subprocess.run(['py', 'create_CSV.py'], shell=False)

            print("Starting creation of Stats")
            subprocess.run(['py', 'create_info.py'], shell=False)

            print("Starting Threshold")
            subprocess.run(['py', 'threshold.py'], shell=False)

            with open("APC_Res.csv", "r") as f:
                APC_Res = f.read()
            APC_Res = APC_Res.split(",")
            APC_Res = [float(i) for i in APC_Res]

            #Every (SquareRoot of the Pixels) datapoints create a new array. Into a 2D Array.
            #I'm going to use the Correct_Res list as the main list and store the temp list every Sqrt(pix) in it,
            #then reset that list and continue until the pixel count is met.
            #Have an internal counter. Reset that every Sqrt(Pix)
            temp_list = np.array([])
            SqrPixels = math.sqrt(len(APC_Res))
            internal_counter = 0
            #print(SqrPixels)
            #print(len(APC_Res))
            Corrected_Res = np.array([[]])

            for _ in range(len(APC_Res)):
                if internal_counter <= SqrPixels - 2:
                    try:
                        temp_list = np.append(temp_list, APC_Res[_ - 1])
                        #print(_)
                        if _ + 1 == (int(SqrPixels) * int(SqrPixels)):
                            Corrected_Res = np.append(Corrected_Res, temp_list)
                    except:
                        print("Not right 2.0")
                    internal_counter = internal_counter + 1
                else:
                    internal_counter = 0
                    #print(temp_list)
                    Corrected_Res = np.append(Corrected_Res, temp_list)
                    temp_list = []
                    temp_list = np.append(temp_list, APC_Res[_ - 1])
                    #print("Resetting Counter & List {}".format(_))
                    if _ + 1 == (int(SqrPixels) * int(SqrPixels)):
                        Corrected_Res = np.append(Corrected_Res, temp_list)
                        #print(_+1)
                    #print("Iteration {}".format(_))
            #print(residual_image)
            #print("\n")
            #print(Corrected_Res)
            Corrected_Res = np.reshape(Corrected_Res,
                                       (int(SqrPixels), int(SqrPixels)))

            Correct_BS = image - Corrected_Res
            plt.subplot(1, 5, 4)
            plt.imshow(Corrected_Res,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('CPSF')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)

            plt.subplot(1, 5, 5)
            plt.imshow(Correct_BS,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('CPSF-S')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)

            #Number of bins
            n_bins = 20
            #Not super sure why this works the way that it does if I’m being truthful, took tinkering to work, and lots of documentation examples.
            fig, axs = plt.subplots(1, 2)

            # We can set the number of bins with the `bins` kwarg
            axs[0].hist(residual_image, bins=n_bins)
            plt.title('Residual Image Hist')
            axs[1].hist(sb, bins=n_bins)
            plt.title('Background Subtracted Hist')
            #plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
            #All Pixels from residual image

            fig = plt.figure()
            ax = fig.add_subplot(111, projection='3d')
            delta = (6 * (1 / len(sb)))

            nx = ny = np.arange(-3.0, 3.0, delta)
            X, Y = np.meshgrid(nx, ny)
            #print(X)
            #print(Y)
            x, y, z = X * len(sb), Y * len(sb), sb
            ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis')

            figi = plt.figure()
            axi = figi.add_subplot(111, projection='3d')
            deltai = (6 * (1 / len(sb)))

            nxi = nyi = np.arange(-3.0, 3.0, deltai)
            Xi, Yi = np.meshgrid(nxi, nyi)
            #print(X)
            #print(Y)
            xi, yi, zi = Xi * len(Correct_BS), Yi * len(Correct_BS), Correct_BS
            axi.plot_surface(xi, yi, zi, rstride=1, cstride=1, cmap='viridis')

            plt.show()
コード例 #12
0
ファイル: nikamap.py プロジェクト: abeelen/nikamap
    def phot_sources(self, sources=None, peak=True, psf=True):

        if sources is None:
            sources = self.sources

        xx, yy = self.wcs.world_to_pixel_values(sources["ra"], sources["dec"])

        x_idx = np.floor(xx + 0.5).astype(int)
        y_idx = np.floor(yy + 0.5).astype(int)

        if peak:
            # Crude Peak Photometry
            # From pixel indexes to array indexing

            sources["flux_peak"] = Column(self.data[y_idx, x_idx], unit=self.unit * u.beam).to(u.mJy)
            sources["eflux_peak"] = Column(self.uncertainty.array[y_idx, x_idx], unit=self.unit * u.beam).to(u.mJy)

        if psf:
            # BasicPSFPhotometry with fixed positions

            sigma_psf = self.beam.sigma_pix.value

            # Using an IntegratedGaussianPRF can cause biais in the photometry
            # TODO: Check the NIKA2 calibration scheme
            # from photutils.psf import IntegratedGaussianPRF
            # psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
            psf_model = CircularGaussianPSF(sigma=sigma_psf)

            psf_model.x_0.fixed = True
            psf_model.y_0.fixed = True

            daogroup = DAOGroup(3 * self.beam.fwhm_pix.value)
            mmm_bkg = MedianBackground()

            photometry = BasicPSFPhotometry(group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), fitshape=9)

            positions = Table([Column(xx, name="x_0"), Column(yy, name="y_0"), Column(self.data[y_idx, x_idx], name="flux_0")])

            # Fill the mask with nan to perform correct photometry on the edge
            # of the mask, and catch numpy & astropy warnings
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", AstropyWarning)
                warnings.simplefilter("ignore", RuntimeWarning)
                result_tab = photometry(image=np.ma.array(self.data, mask=self.mask).filled(np.nan), init_guesses=positions)

            result_tab.sort("id")
            for _source, _tab in zip(["flux_psf", "eflux_psf"], ["flux_fit", "flux_unc"]):
                sources[_source] = Column(result_tab[_tab] * psf_model(0, 0), unit=self.unit * u.beam).to(u.mJy)
            sources["group_id"] = result_tab["group_id"]

        self.sources = sources
コード例 #13
0
    def Flux(self,x,y):
        x = int(x)
        y = int(y)
        r = 25
        data = self.hdulist[self.fz].data[x-r:x+r,y-r:y+r]
        data = (lacosmic.lacosmic(data,2,10,10, effective_gain = self.gain, readnoise = self.readnoise))[0]
        bkgrms = MADStdBackgroundRMS()
        std = bkgrms(data)
        iraffind = IRAFStarFinder(threshold=self.limit*std,
                                   fwhm=self.sigma_psf*gaussian_sigma_to_fwhm,
                                   minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0,
                                   sharplo=0.0, sharphi=2.0)
        daogroup = DAOGroup(2.0*self.sigma_psf*gaussian_sigma_to_fwhm)
        mmm_bkg = MMMBackground()
        psf_model = IntegratedGaussianPRF(sigma=self.sigma_psf)
        from photutils.psf import IterativelySubtractedPSFPhotometry
        photometry = IterativelySubtractedPSFPhotometry(finder=iraffind,
                                                         group_maker=daogroup,
                                                         bkg_estimator=mmm_bkg,
                                                         psf_model=psf_model,
                                                         fitter=LevMarLSQFitter(),
                                                         niters=1, fitshape=(21,21))
        

        
        result_tab = photometry(image=data)   
        
        """
        if plot == 1:
            residual_image = photometry.get_residual_image()
            print(result_tab['x_fit','y_fit'])
            plt.figure(self.filename+' data')
            plt.imshow(data, cmap='viridis',
                       aspect=1, interpolation='nearest', origin='lower')
            plt.show()
            plt.figure(self.filename+' residual')
            plt.imshow(residual_image, cmap='viridis',
                       aspect=1, interpolation='nearest', origin='lower')
            plt.show()
            plt.figure(self.filename+' PSF')
            plt.imshow(data-residual_image, cmap='viridis',
                       aspect=1, interpolation='nearest', origin='lower')
            plt.show()
        """
        
        if len(result_tab) > 5:
            return(0,0) 
        if len(result_tab) ==0:
            print('None')
            return(0,0) 
        result_tab['Minus'] = np.zeros(len(result_tab))
        for i in range(len(result_tab)):
            if 18.5 < result_tab['x_fit'][i] < 28.5 and 18.5 < result_tab['y_fit'][i] < 28.5:
            #if 15 < result_tab['x_fit'][i] < 25 and 15 < result_tab['y_fit'][i] < 25:
                result_tab['Minus'][i] = 1
            else:
                result_tab['Minus'][i] = 0
        mask = result_tab['Minus'] == 1.0
        result_tab = result_tab[mask]
        if len(result_tab) != 1:
            return(0,0)   
        flux_counts = float(result_tab['flux_fit'][0])
        flux_unc = float(result_tab['flux_unc'][0])
        flux_unc = flux_unc/flux_counts
        return(flux_counts,flux_unc)
コード例 #14
0
          * starts with "lin": Linear (`LinearLSQFitter`)
          * starts with "sim": Simplex (`SimplexLSQFitter`)
          * starts with "sl": SLSQP (`SLSQPLSQFitter`)
          * starts with "jo": Joint (`JointFitter`)

    **kwargs :
        Keyword arguments for the fitter (name and astropy default values)::

          * `LinearLSQFitter`: `calc_uncertainties=False`
          * `LevMarLSQFitter`: `calc_uncertainties=False`
          * `SimplexLSQFitter`, `SLSQPLSQFitter`: N/A
          * `JointFitter`: `models`, jointparameters`, `initvals` (must be given)
    """
    if fitter_name.lower().startswith("lev") or fitter_name.lower().startswith(
            "lm"):
        return LevMarLSQFitter(**kwargs)
    elif fitter_name.lower().startswith("lin"):
        return LinearLSQFitter(**kwargs)
    elif fitter_name.lower().startswith("sl"):
        return SLSQPLSQFitter(**kwargs)
    elif fitter_name.lower().startswith("sim"):
        return SimplexLSQFitter(**kwargs)
    elif fitter_name.lower().startswith("jo"):
        return JointFitter(**kwargs)
    else:
        return fitter_name(**kwargs)
        # ^ assume the `fitter_name` is already an astropy fitter


def get_model(model_name, *args, **kwargs):
    """ Finds and returns the model with the given name.
コード例 #15
0
ファイル: project2.py プロジェクト: YeongKyunJeong/JYK264
       zoom='',
       save='',
       quit='',
       grid='',
       yscale='',
       xscale='',
       all_axes='')


#%%
DISPAXIS = 2  # 1 = line = python_axis_1 // 2 = column = python_axis_0
FONTSIZE = 12  # Change it on your computer if you wish.
rcParams.update({'font.size': FONTSIZE})
COMPIMAGE = ppdpath / 'Comp-master.fits'  # Change directory if needed!
OBJIMAGE = ppdpath / 'NGC676-0001.fits'
LINE_FITTER = LevMarLSQFitter()
#%%
# Parameters for IDENTIFY
FITTING_MODEL_ID = 'Chebyshev'
ORDER_ID = 4
NSUM_ID = 10
FWHM_ID = 4  # rough guess of FWHM of lines in IDENTIFY (pixels)

# Parameters for REIDENTIFY
FITTING_MODEL_REID = 'Chebyshev'  # 2-D fitting function
ORDER_SPATIAL_REID = 6
ORDER_WAVELEN_REID = 6
STEP_REID = 15  # Reidentification step size in pixels (spatial direction)
NSUM_REID = 10
TOL_REID = 5  # tolerence to lose a line in pixels
コード例 #16
0
def test_NFW_fit():
    """Test linear fitting of NFW model."""
    # Fixed parameters
    redshift = 0.63
    cosmo = cosmology.Planck15

    # Radial set
    r = np.array([
        1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
        7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
    ]) * u.kpc

    # 200c Overdensity
    massfactor = ("critical", 200)

    density_r = np.array([
        1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
        1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
        7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
        7.39336808e+01
    ]) * (u.solMass / u.kpc**3)

    fitter = LevMarLSQFitter()

    n200c = NFW(mass=1.8E15 * u.M_sun,
                concentration=7.0,
                redshift=redshift,
                cosmo=cosmo,
                massfactor=massfactor)
    n200c.redshift.fixed = True

    n_fit = fitter(n200c, r, density_r, maxiter=1000)

    assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
    assert_quantity_allclose(n_fit.concentration, 8.5)

    # 200m Overdensity
    massfactor = ("mean", 200)

    density_r = np.array([
        1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
        1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
        7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
        7.34674416e+01
    ]) * (u.solMass / u.kpc**3)

    fitter = LevMarLSQFitter()

    n200m = NFW(mass=1.8E15 * u.M_sun,
                concentration=7.0,
                redshift=redshift,
                cosmo=cosmo,
                massfactor=massfactor)
    n200m.redshift.fixed = True

    n_fit = fitter(n200m, r, density_r, maxiter=1000)

    assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
    assert_quantity_allclose(n_fit.concentration, 8.5)

    # Virial mass
    massfactor = ("virial", 200)

    density_r = np.array([
        1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
        1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
        7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
        7.35821787e+01
    ]) * (u.solMass / u.kpc**3)

    fitter = LevMarLSQFitter()

    nvir = NFW(mass=1.8E15 * u.M_sun,
               concentration=7.0,
               redshift=redshift,
               cosmo=cosmo,
               massfactor=massfactor)
    nvir.redshift.fixed = True

    n_fit = fitter(nvir, r, density_r, maxiter=1000)

    assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
    assert_quantity_allclose(n_fit.concentration, 8.5)
コード例 #17
0
def gaussian_fit_xc(x_correlation):
    """
    Using astropypy's modelling and fitting modules to fit a gaussian
    to the cross-correlation of two epochs in the JCMT transient survey.
    
    """
    import numpy as np
    from astropy.modeling.models import Gaussian2D
    from astropy.modeling.fitting import LevMarLSQFitter
    # figuring out where i need to clip to
    y_center = x_correlation.shape[0] // 2
    x_center = x_correlation.shape[
        1] // 2  # centre of the Cross-Corr maps default: (200,200)
    width = 7
    y_max, x_max = np.where(x_correlation == x_correlation.max())
    y_max = int(y_max)
    x_max = int(x_max)

    # clipping map further to better fit a gaussian profile to it
    x_correlation = x_correlation[y_max - width:y_max + width + 1,
                                  x_max - width:x_max + width + 1]
    # subtracting half the side to then add the mean values after
    x_max -= x_correlation.shape[1] // 2
    y_max -= x_correlation.shape[0] // 2
    # generating the gaussian to fit.

    x_mesh, y_mesh = np.meshgrid(np.arange(x_correlation.shape[0]),
                                 np.arange(x_correlation.shape[1]))
    gauss_init = Gaussian2D(
        amplitude=x_correlation.max(),
        x_mean=np.where(x_correlation == x_correlation.max())
        [1],  # location to start fitting gaussian
        y_mean=np.where(x_correlation == x_correlation.max())
        [0],  # location to start fitting gaussian
        # fixed={},  # any fixed parameters
        bounds={
            # 'amplitude': (x_correlation.max() * 0.90, x_correlation.max() * 1.10),
            'x_mean':
            (int(np.where(x_correlation == x_correlation.max())[1]) - 1,
             int(np.where(x_correlation == x_correlation.max())[1]) + 1),
            'y_mean':
            (int(np.where(x_correlation == x_correlation.max())[0]) - 1,
             int(np.where(x_correlation == x_correlation.max())[0]) + 1)
        },  # allowing var in amplitude to better fit gauss
    )
    fitting_gauss = LevMarLSQFitter(
    )  # Fitting method; Levenberg-Marquardt Least Squares algorithm
    best_fit_gauss = fitting_gauss(gauss_init, x_mesh, y_mesh,
                                   x_correlation)  # The best fit for the map
    gauss_model = best_fit_gauss(
        x_mesh, y_mesh)  # the model itself (if we want to plot it

    # now we can get the location of our peak fitted gaussian and add them back to get a total offset
    y_max += best_fit_gauss.y_mean.value  # Finding the distance from 0,0 to the centre gaussian
    x_max += best_fit_gauss.x_mean.value  # and y.
    try:
        x_correlation_error = np.sqrt(
            np.diag(fitting_gauss.fit_info['param_cov']))
    except:
        x_correlation_error = np.ones(10) * -5
    offset = (x_center - x_max, y_center - y_max)
    offset_err = (x_correlation_error[1], x_correlation_error[2])
    return offset, offset_err
コード例 #18
0
def model_keplerian(positions,
                    velocities,
                    v_lsr=None,
                    fit_method=None,
                    flag_singularity=True,
                    flag_radius=None,
                    flag_intervals=None,
                    return_stddevs=True,
                    plot=False,
                    debug=False):
    """Fit a Keplerian velocity profile to position-velocity-data.

    Args:
        positions (np.ndarray or Quantity):
            PVdata object to compute the data from.
        velocities (np.ndarray or Quantity):
            Set as multiples of PVdata.noise (for instance 3sigma)
        v_lsr (float):
            Systemic velocity in units of km/ s.
        fit_method (any, optional):
            Method to fit the model to the data.
        flag_singularity (bool, optional):
            Flag the zero position data points, to avoid running in trouble there during fitting.
        flag_radius (astropy.units.Quantity, optional):
            If given, then all data points within this given radius from the position_reference are flagged.
        flag_intervals (list of tupels of astropy.units.Quantity, optional):
            Similar to flag_radius, but arbitrary intervals may be flagged. Each interval is
            given as a tuple of two radial distances from the position_reference.
        return_stddevs (boolean, optional):
            The fit method LevMarLSQFitter is able to return the standard deviation of the fit parameters. Default is
            True.
        plot (boolean, optional):
            If True, the fit will be displayed as a matplotlib pyplot.
        debug (bool, optional):
            Stream debugging information to the terminal.

    Returns:
        best_fit (astropy.modelling.models.custom_model):
            Best fitting model.
        stddevs (numpy.array):
            Only if return_stddevs is True. The array entries correspond to the best_fit instance parameters in the
            same order.
        chi2 (float):
            chi-squared residual of the fit to the unflagged data.
    """

    # Transform Quantities to correct units
    if isinstance(positions, Quantity):
        positions = positions.to('AU').value
    if isinstance(velocities, Quantity):
        velocities = velocities.to('km/ s').value

    # Apply fall back values
    if fit_method is None:
        fit_method = LevMarLSQFitter()
    if v_lsr is None:
        v_lsr = 0

    # Create masked arrays
    xdata = np.ma.masked_array(positions, np.zeros(positions.shape,
                                                   dtype=bool))
    ydata = np.ma.masked_array(velocities,
                               np.zeros(velocities.shape, dtype=bool))

    # Mask the desired flags and intervals
    if flag_singularity:
        print('Flagging the singularity')
        singularity_mask = np.ma.masked_less(np.abs(xdata), 1e-3).mask
        xdata.mask = np.logical_or(xdata.mask, singularity_mask)
        ydata.mask = np.logical_or(ydata.mask, singularity_mask)
        print(f">> Done")
    else:
        print("Not masking the singularity")

    if flag_radius is not None:
        print(f"Flagging towards a radial distance of {flag_radius}")
        if isinstance(flag_radius, Quantity):
            flag_radius = flag_radius.to('au').value
        xdata = np.ma.masked_inside(xdata, -flag_radius, flag_radius)
        ydata.mask = np.logical_or(ydata.mask, xdata.mask)
        print(f">> Done")
        print(f"The mask is {xdata.mask}")
    else:
        print("No flag radius provided")

    if flag_intervals is not None:
        print('Flagging intervals...')
        for interval in flag_intervals:
            xdata = np.ma.masked_inside(xdata, interval[0], interval[1])
            ydata.mask = np.logical_or(ydata.mask, xdata.mask)
        print(f">> Flagged {np.sum(xdata.mask)} elements")
    else:
        print("No flag intervals provided")

    if debug:
        print('x data:', xdata)
        print('y data:', ydata)

    # Initialize the fit model
    print("Initializing the model...")
    model = Keplerian1D(mass=10., v0=v_lsr, r0=0, bounds={'mass': (0.0, None)})
    if debug:
        print(f"Initialize the model: {model}")

    # Fit the chosen model to the data
    print("Fitting the model to the data...")
    best_fit = fit_method(model, xdata.compressed(), ydata.compressed())
    if debug:
        print(fit_method.fit_info['message'])

    # Estimate chi2
    print("Computing the chi-squared value...")
    chi2 = np.sum(np.square(best_fit(xdata.compressed()) - ydata.compressed()))

    # Plot
    if plot:
        plt.plot(positions, velocities, 'o', label='data')
        plt.xlabel('Position offset (AU)')
        plt.ylabel('Velocity (km/ s)')
        plt.axhline(v_lsr, c='k', ls='--', label=r'$v_\mathrm{LSR}$')
        plt.plot(xdata, best_fit(xdata), label='model')
        plt.fill_between(xdata,
                         best_fit(xdata),
                         best_fit.v0,
                         facecolor='tab:orange',
                         alpha=.5)
        if debug:
            plt.plot(xdata, model(xdata), label='init')
        plt.grid()
        plt.legend()
        plt.show()
        plt.close()

    # Prepare the return
    stddevs = None
    if not isinstance(fit_method, LevMarLSQFitter):
        return_stddevs = False
    if return_stddevs:
        covariance = fit_method.fit_info['param_cov']
        if covariance is None:
            print(
                f"[ERROR] Unable to compute the covariance matrix and fit parameter uncertainties!"
            )
        else:
            stddevs = np.sqrt(np.diag(covariance))

    return best_fit, stddevs, chi2
コード例 #19
0
def jwst_camera_fpa_data(data_dir,
                         pattern,
                         standardized_data_dir,
                         parameters,
                         overwrite_source_extraction=False):
    """Generate standardized focal plane alignment (fpa) data
       based on JWST camera image.
    """

    save_plot = parameters['save_plot']

    file_list = glob.glob(os.path.join(data_dir, '*{}'.format(pattern)))

    if len(file_list) == 0:
        raise RuntimeError('No data found')

    file_list.sort()
    for f in file_list:

        plt.close('all')

        print()
        print('Data directory: {}'.format(data_dir))
        print('Image being processed: {}'.format(f))

        im = datamodels.open(f)
        if hasattr(im, 'data') is False:
            im.data = fits.getdata(f)
            #im.dq    = np.zeros(im.data.shape)

        header_info = OrderedDict()

        for attribute in 'telescope'.split():
            header_info[attribute] = getattr(im.meta, attribute)

        # observations
        for attribute in 'date time visit_number visit_id visit_group activity_id program_number'.split(
        ):
            header_info['observation_{}'.format(attribute)] = getattr(
                im.meta.observation, attribute)

        header_info['epoch_isot'] = '{}T{}'.format(
            header_info['observation_date'], header_info['observation_time'])

        #  instrument
        for attribute in 'name filter pupil detector'.split():
            header_info['instrument_{}'.format(attribute)] = getattr(
                im.meta.instrument, attribute)

        # subarray
        for attribute in 'name'.split():
            header_info['subarray_{}'.format(attribute)] = getattr(
                im.meta.subarray, attribute)

        # aperture
        for attribute in 'name position_angle pps_name'.split():
            try:
                value = getattr(im.meta.aperture, attribute)
            except AttributeError:
                value = None

            header_info['aperture_{}'.format(attribute)] = value

        header_info['INSTRUME'] = header_info['instrument_name']
        header_info['SIAFAPER'] = header_info['aperture_name']

        instrument_name = getattr(im.meta.instrument, 'name')
        instrument_detector = getattr(im.meta.instrument, 'detector')
        instrument_filter = getattr(im.meta.instrument, 'filter')

        # temporary solution, this should come from populated aperture attributes
        #if header_info['subarray_name'] == 'FULL':
        #    master_apertures = pysiaf.read.read_siaf_detector_layout()
        #    if header_info['instrument_name'].lower() in ['niriss', 'miri']:
        #        header_info['SIAFAPER'] = master_apertures['AperName'][np.where(master_apertures['InstrName']==header_info['instrument_name'])[0][0]]
        #    elif header_info['instrument_name'].lower() in ['fgs']:
        #        header_info['SIAFAPER'] = 'FGS{}_FULL'.format(header_info['instrument_detector'][-1])
        #    elif header_info['instrument_name'].lower() in ['nircam']:
        #        header_info['SIAFAPER'] = header_info['aperture_name']
        #else:
        #    sys.exit('Only FULL arrays are currently supported.')

        # target
        for attribute in 'ra dec catalog_name proposer_name'.split():
            header_info['target_{}'.format(attribute)] = getattr(
                im.meta.target, attribute)

        # pointing
        for attribute in 'ra_v1 dec_v1 pa_v3'.split():
            try:
                value = getattr(im.meta.pointing, attribute)
            except AttributeError:
                value = None
            header_info['pointing_{}'.format(attribute)] = value

        # add HST style keywords
        header_info['PROGRAM_VISIT'] = '{}_{}'.format(
            header_info['observation_program_number'],
            header_info['observation_visit_id'])
        header_info['PROPOSID'] = header_info['observation_program_number']
        header_info['DATE-OBS'] = header_info['observation_date']
        header_info['TELESCOP'] = header_info['telescope']
        header_info['INSTRUME'] = header_info['instrument_name']
        try:
            header_info['APERTURE'] = header_info['SIAFAPER']
        except KeyError:
            header_info['APERTURE'] = None
        header_info['CHIP'] = 0

        # TBD: Need to remove making yet another directory
        #extracted_sources_dir = os.path.join(standardized_data_dir, 'extraction')
        #if os.path.isdir(extracted_sources_dir) is False:
        #    os.makedirs(extracted_sources_dir)
        extracted_sources_file = os.path.join(
            standardized_data_dir,  #extracted_sources_dir,
            '{}_extracted_sources.fits'.format(
                os.path.basename(f).split('.')[0]))

        mask_extreme_slope_values = False
        parameters['maximum_slope_value'] = 1000.

        # Check if extracted_sources_file exists, or overwrite_source_extraction is set to True
        if (not os.path.isfile(extracted_sources_file)) or (
                overwrite_source_extraction):
            data = copy.deepcopy(im.data)
            #dq = copy.deepcopy(im.dq)

            # Convert image data to counts per second
            photmjsr = getattr(im.meta.photometry, 'conversion_megajanskys')
            data_cps = data / photmjsr

            if mask_extreme_slope_values:
                # clean up extreme slope values
                bad_index = np.where(
                    np.abs(data) > parameters['maximum_slope_value'])
                data[bad_index] = 0.
                dq[bad_index] = -1

            bkgrms = MADStdBackgroundRMS()
            mmm_bkg = MMMBackground()
            bgrms = bkgrms(data_cps)
            bgavg = mmm_bkg(data_cps)

            # Default parameters that generally works for NIRCam/NIRISS images
            sigma_factor = 10
            round_lo, round_hi = 0.0, 0.6
            sharp_lo, sharp_hi = 0.3, 1.4
            fwhm_lo, fwhm_hi = 1.0, 20.0
            fwhm = 2.0
            minsep_fwhm = 7  # NOTE: minsep_fwhm>5 to reject artifacts around saturated stars
            flux_percent_lo, flux_percent_hi = 10, 99

            # if 'sharp_lo' in parameters:
            #    sharp_lo = parameters['sharp_lo']

            ###
            ### TBD1: Relocate params below to config parts/files
            ###
            # Use different criteria for selecting good stars
            if parameters['nominalpsf']:
                # If using Nominal PSF models
                if instrument_name == 'NIRISS':
                    #fwhm_lo, fwhm_hi = 1.0, 2.0
                    sharp_lo, sharp_hi = 0.6, 1.4
                elif instrument_name == 'FGS':
                    #fwhm_lo, fwhm_hi = 1.0, 1.4
                    sharp_lo, sharp_hi = 0.6, 1.4
                elif instrument_name == 'NIRCAM':
                    sharp_lo, sharp_hi = 0.6, 1.4
                elif instrument_name == 'MIRI':
                    sharp_lo, sharp_hi = 0.8, 1.0
                    fwhm_lo, fwhm_hi = 1.5, 2.2
                    sigma_factor = 3
                elif instrument_name == 'NIRSPEC':
                    sharp_lo, sharp_hi = 0.6, 0.8
                    round_lo, round_hi = 0.0, 0.3
                    fwhm_lo, fwhm_hi = 1.0, 1.75
            else:
                ###
                ### For OTE commissioning, tweak the params below after finding
                ### the correct ranges by runnin the photometry notebook.
                ###

                # If using Commissioning (non-phased) PSF models
                if instrument_name == 'NIRISS':
                    sharp_lo, sharp_hi = 0.6, 1.4
                    fwhm_lo, fwhm_hi = 1.4, 2.4

################################################################################
################################################################################
################################################################################

                elif instrument_name == 'FGS':
                    sigma_factor = 10
                    minsep_fwhm = 2.5
                    sharp_lo, sharp_hi = 0.45, 0.7
                    round_lo, round_hi = 0.0, 0.3
                    flux_percent_lo, flux_percent_hi = 2, 99
                    fwhm = 4

################################################################################
################################################################################
################################################################################

# Below works well for F200W and F356W images

                elif instrument_name == 'NIRCAM':
                    sigma_factor = 3
                    minsep_fwhm = 2.5
                    sharp_lo, sharp_hi = 0.5, 0.7
                    round_lo, round_hi = 0.0, 0.2
                    flux_percent_lo, flux_percent_hi = 2, 99
                    if 'F200W' in instrument_filter:
                        fwhm = 10
                    elif 'F356W' in instrument_filter:
                        fwhm = 8
                    elif 'F090W' in instrument_filter:
                        fwhm = 5.5
                    elif 'F277W' in instrument_filter:
                        fwhm = 6.5
                    else:
                        fwhm = 3


################################################################################
################################################################################
################################################################################

                elif instrument_name == 'MIRI':
                    sharl_lo, sharp_hi = 0.5, 1.0
                    fwhm_lo, fwhm_hi = 1.5, 2.2
                    sigma_factor = 3
                elif instrument_name == 'NIRSPEC':
                    sharp_lo, sharp_hi = 0.5, 0.8
                    round_lo, round_hi = 0.0, 0.3
                    fwhm_lo, fwhm_hi = 1.0, 1.75

            # Use IRAFStarFinder for source detection
            iraffind = IRAFStarFinder(threshold=sigma_factor * bgrms + bgavg,
                                      fwhm=fwhm,
                                      minsep_fwhm=minsep_fwhm,
                                      roundlo=round_lo,
                                      roundhi=round_hi,
                                      sharplo=sharp_lo,
                                      sharphi=sharp_hi)

            # Create default mask with all False values
            datamask = np.zeros(
                data_cps.shape,
                dtype=bool)  # This creates an array with all False

            # Mask the left (for NRS1) and right regions (for NRS2) for NIRSpec
            if instrument_detector == 'NRS1':
                datamask[:, :1023] = True  # Mask everything on the left side
            elif instrument_detector == 'NRS2':
                datamask[:, 1024:] = True  # Mask everything on the right side

            iraf_extracted_sources = iraffind(data_cps, mask=datamask)

            # Perform some basic filtering

            # Remove sources based on flux percentile
            # 10-99% works well for filtering out too faint or saturated sources
            flux_min = np.percentile(iraf_extracted_sources['flux'],
                                     flux_percent_lo)
            flux_max = np.percentile(iraf_extracted_sources['flux'],
                                     flux_percent_hi)
            iraf_extracted_sources.remove_rows(
                np.where(iraf_extracted_sources['flux'] < flux_min))
            iraf_extracted_sources.remove_rows(
                np.where(iraf_extracted_sources['flux'] > flux_max))

            # Also remove sources based on fwhm
            ###
            ### Don't use below for now - 2/23/2022 (Don't use it unless we get lots of bad sources)
            ###
            #iraf_extracted_sources.remove_rows(np.where(iraf_extracted_sources['fwhm']<fwhm_lo))
            #iraf_extracted_sources.remove_rows(np.where(iraf_extracted_sources['fwhm']>fwhm_hi))

            # Now improve the positions by re-running centroiding algorithm if necessary.
            # NOTE: For now, re-centroiding will be turned off

            ###
            ### TBD2: Add re-centroiding algorithm adopted from Paul here
            ###
            #xarr = sources_masked['xcentroid']
            #yarr = sources_masked['ycentroid']
            #newx, newy = centroid_sources(data_cps, xarr, yarr, box_size=5, centroid_func=centroid_2dg)
            #coords = np.column_stack((newx, newy))
            #srcaper = CircularAnnulus(coords, r_in=1, r_out=3)
            #srcaper_masks = srcaper.to_mask(method='center')
            #satflag = np.zeros((len(newx),),dtype=int)
            #i = 0
            #for mask in srcaper_masks:
            #    srcaper_dq = mask.multiply(dqarr)
            #    srcaper_dq_1d = srcaper_dq[mask.data>0]
            #    badpix = np.logical_and(srcaper_dq_1d>2, srcaper_dq_1d<7)
            #    reallybad = np.where(srcaper_dq_1d==1)
            #    if ((len(srcaper_dq_1d[badpix]) > 1) or (len(srcaper_dq_1d[reallybad]) > 0)):
            #        satflag[i] = 1
            #        i =+1
            #goodx = newx[np.where(satflag==0)]
            #goody = newy[np.where(satflag==0)]
            #print('Number of sources before removing saturated or bad pixels: ', len(xarr))
            #print('Number of sources without saturated or bad pixels: ', len(goodx))
            #print(' ')
            #coords = np.column_stack((goodx,goody))

            print('Number of extracted sources after filtering: {} sources'.
                  format(len(iraf_extracted_sources)))

            if parameters['use_epsf'] is True:
                size = 25
                hsize = (size - 1) / 2
                x = iraf_extracted_sources['xcentroid']
                y = iraf_extracted_sources['ycentroid']
                mask = ((x > hsize) & (x < (data_cps.shape[1] - 1 - hsize)) &
                        (y > hsize) & (y < (data_cps.shape[0] - 1 - hsize)))
                stars_tbl = Table()
                stars_tbl['x'] = x[mask]
                stars_tbl['y'] = y[mask]
                print('Using {} stars to build epsf'.format(len(stars_tbl)))

                data_cps_bkgsub = data_cps.copy()
                data_cps_bkgsub -= bgavg
                nddata = NDData(data=data_cps_bkgsub)
                stars = extract_stars(nddata, stars_tbl, size=size)

                #
                # Figure - PSF stars
                #
                nrows = 10
                ncols = 10
                fig, ax = plt.subplots(nrows=nrows,
                                       ncols=ncols,
                                       figsize=(20, 20),
                                       squeeze=True)
                ax = ax.ravel()
                for i in range(nrows * ncols):
                    if i <= len(stars) - 1:
                        norm = simple_norm(stars[i], 'log', percent=99.)
                        ax[i].imshow(stars[i],
                                     norm=norm,
                                     origin='lower',
                                     cmap='viridis')
                plt.title('{} sample stars for epsf'.format(
                    header_info['APERTURE']))
                if save_plot:
                    figname = os.path.join(
                        extracted_sources_dir, '{}_sample_psfs.pdf'.format(
                            os.path.basename(f).split('.')[0]))
                    plt.savefig(figname)
                if parameters['show_extracted_sources']:
                    plt.show()

                #
                # Timer for ePSF construction
                #
                tic = time.perf_counter()
                epsf_builder = EPSFBuilder(oversampling=4,
                                           maxiters=3,
                                           progress_bar=False)
                print("Building ePSF ...")
                epsf, fitted_stars = epsf_builder(stars)
                toc = time.perf_counter()
                print("Time elapsed for building ePSF:", toc - tic)

                #
                # Figure - ePSF plot
                #
                norm_epsf = simple_norm(epsf.data, 'log', percent=99.)
                plt.figure()
                plt.imshow(epsf.data,
                           norm=norm_epsf,
                           origin='lower',
                           cmap='viridis')
                plt.colorbar()
                plt.title('{} epsf using {} stars'.format(
                    header_info['APERTURE'], len(stars_tbl)))
                if save_plot:
                    figname = os.path.join(
                        extracted_sources_dir, '{}_epsf.pdf'.format(
                            os.path.basename(f).split('.')[0]))
                    plt.savefig(figname)
                if parameters['show_extracted_sources']:
                    plt.show()

                daogroup = DAOGroup(5.0 * 2.0)
                psf_model = epsf.copy()

                tic = time.perf_counter()
                photometry = IterativelySubtractedPSFPhotometry(
                    finder=iraffind,
                    group_maker=daogroup,
                    bkg_estimator=mmm_bkg,
                    psf_model=psf_model,
                    fitter=LevMarLSQFitter(),
                    niters=1,
                    fitshape=(11, 11),
                    aperture_radius=5)
                print('Performing source extraction and photometry ...')
                epsf_extracted_sources = photometry(data_cps)
                toc = time.perf_counter()
                print("Time elapsed for PSF photometry:", toc - tic)
                print('Final source extraction with epsf: {} sources'.format(
                    len(epsf_extracted_sources)))

                epsf_extracted_sources['xcentroid'] = epsf_extracted_sources[
                    'x_fit']
                epsf_extracted_sources['ycentroid'] = epsf_extracted_sources[
                    'y_fit']
                extracted_sources = epsf_extracted_sources
                extracted_sources.write(extracted_sources_file, overwrite=True)

                norm = simple_norm(data_cps, 'sqrt', percent=99.)
                diff = photometry.get_residual_image()
                plt.figure()
                ax1 = plt.subplot(1, 2, 1)
                plt.xlabel("X [pix]")
                plt.ylabel("Y [pix]")
                ax1.imshow(data_cps, norm=norm, cmap='Greys')
                ax2 = plt.subplot(1, 2, 2)
                plt.xlabel("X [pix]")
                plt.ylabel("Y [pix]")
                ax2.imshow(diff, norm=norm, cmap='Greys')
                plt.title('PSF subtracted image for {}'.format(
                    os.path.basename(f)))
                if save_plot:
                    figname = os.path.join(
                        extracted_sources_dir,
                        '{}_psfsubtracted_image.pdf'.format(
                            os.path.basename(f).split('.')[0]))
                    plt.savefig(figname)
                if parameters['show_psfsubtracted_image']:
                    plt.show()

            else:

                extracted_sources = iraf_extracted_sources
                extracted_sources.write(extracted_sources_file, overwrite=True)

            positions = np.transpose((extracted_sources['xcentroid'],
                                      extracted_sources['ycentroid']))
            apertures = CircularAperture(positions, r=10)
            norm = simple_norm(data_cps, 'sqrt', percent=99.)

            plt.figure(figsize=(12, 12))
            plt.xlabel("X [pix]")
            plt.ylabel("Y [pix]")
            plt.imshow(data_cps, norm=norm, cmap='Greys', origin='lower')
            apertures.plot(color='blue', lw=1.5, alpha=0.5)
            title_string = '{}: {} selected sources'.format(
                os.path.basename(f), len(extracted_sources))
            plt.title(title_string)
            plt.tight_layout()
            if save_plot:
                figname = os.path.join(
                    standardized_data_dir, '{}_extracted_sources.pdf'.format(
                        os.path.basename(f).split('.')[0]))
                plt.savefig(figname)
            if parameters['show_extracted_sources']:
                plt.show()
            plt.close()

        else:
            extracted_sources = Table.read(extracted_sources_file)

        print('Extracted {} sources from {}'.format(len(extracted_sources), f))
        impose_positive_flux = True
        if impose_positive_flux and parameters['use_epsf']:
            extracted_sources.remove_rows(
                np.where(extracted_sources['flux_fit'] < 0)[0])
            print('Only {} sources have positve flux'.format(
                len(extracted_sources)))

        astrometry_uncertainty_mas = 5

        if len(extracted_sources) > 0:
            # Cal images are in DMS coordinates which correspond to the SIAF Science (SCI) frame
            extracted_sources['x_SCI'], extracted_sources[
                'y_SCI'] = extracted_sources['xcentroid'], extracted_sources[
                    'ycentroid']

            # For now, astrometric uncertainty defaults to 5 mas for each source.
            extracted_sources['sigma_x_mas'] = np.ones(
                len(extracted_sources)) * astrometry_uncertainty_mas
            extracted_sources['sigma_y_mas'] = np.ones(
                len(extracted_sources)) * astrometry_uncertainty_mas

        # transfer info to astropy table header
        for key, value in header_info.items():
            extracted_sources.meta[key] = value

        extracted_sources.meta['DATAFILE'] = os.path.basename(f)
        extracted_sources.meta['DATAPATH'] = os.path.dirname(f)
        extracted_sources.meta['EPOCH'] = header_info['epoch_isot']

        out_file = os.path.join(
            standardized_data_dir, '{}_FPA_data.fits'.format(
                extracted_sources.meta['DATAFILE'].split('.')[0]))

        print('Writing {}'.format(out_file))
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', AstropyWarning, append=True)
            extracted_sources.write(out_file, overwrite=True)

    return im
コード例 #20
0
ファイル: core.py プロジェクト: jacquesalice/photoutils
def fit_2dgaussian(data, error=None, mask=None):
    """
    Fit a 2D Gaussian plus a constant to a 2D image.

    Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
    arrays are automatically masked.  The mask for invalid values
    represents the combination of the invalid-value masks for the
    ``data`` and ``error`` arrays.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    result : A `GaussianConst2D` model instance.
        The best-fitting Gaussian 2D model.
    """

    from ..morphology import data_properties  # prevent circular imports

    data = np.ma.asanyarray(data)

    if mask is not None and mask is not np.ma.nomask:
        mask = np.asanyarray(mask)
        if data.shape != mask.shape:
            raise ValueError('data and mask must have the same shape.')
        data.mask |= mask

    if np.any(~np.isfinite(data)):
        data = np.ma.masked_invalid(data)
        warnings.warn(
            'Input data contains input values (e.g. NaNs or infs), '
            'which were automatically masked.', AstropyUserWarning)

    if error is not None:
        error = np.ma.masked_invalid(error)
        if data.shape != error.shape:
            raise ValueError('data and error must have the same shape.')
        data.mask |= error.mask
        weights = 1.0 / error.clip(min=1.e-30)
    else:
        weights = np.ones(data.shape)

    if np.ma.count(data) < 7:
        raise ValueError('Input data must have a least 7 unmasked values to '
                         'fit a 2D Gaussian plus a constant.')

    # assign zero weight to masked pixels
    if data.mask is not np.ma.nomask:
        weights[data.mask] = 0.

    mask = data.mask
    data.fill_value = 0.0
    data = data.filled()

    # Subtract the minimum of the data as a crude background estimate.
    # This will also make the data values positive, preventing issues with
    # the moment estimation in data_properties (moments from negative data
    # values can yield undefined Gaussian parameters, e.g. x/y_stddev).
    props = data_properties(data - np.min(data), mask=mask)

    init_const = 0.  # subtracted data minimum above
    init_amplitude = np.ptp(data)
    g_init = GaussianConst2D(constant=init_const,
                             amplitude=init_amplitude,
                             x_mean=props.xcentroid.value,
                             y_mean=props.ycentroid.value,
                             x_stddev=props.semimajor_axis_sigma.value,
                             y_stddev=props.semiminor_axis_sigma.value,
                             theta=props.orientation.value)
    fitter = LevMarLSQFitter()
    y, x = np.indices(data.shape)
    gfit = fitter(g_init, x, y, data, weights=weights)

    return gfit
コード例 #21
0
ファイル: get_fwhm.py プロジェクト: rag9704/PTS
    def __init__(self,
                 psf_array,
                 amplitude,
                 x0,
                 y0,
                 subsampling=5,
                 recenter=True,
                 dx=None,
                 dy=None,
                 amp_err=None,
                 neighbor_positions=[],
                 neighbor_amplitudes=[]):
        # Set parameters defined by the psf to None so that they show
        # up as class attributes. self.set_psf_array will set all
        # of these parameters automatically
        self._subpixel_width = None
        self._width = None
        self._radius = None
        self._subsampling = None
        self._psf_array = None

        self.recenter = recenter
        self.fitter = LevMarLSQFitter()

        # New CrowdedPSF attributes
        if len(neighbor_positions) != len(neighbor_amplitudes):
            raise Exception("neighbor_positions and neighbors amplitudes "
                            "lengths {0},{1} are nor equal".format(
                                len(neighbor_positions),
                                len(neighbor_amplitudes)))
        self.src_count = len(neighbor_positions)
        self.x0_names = ['nx_{0}'.format(n) for n in range(self.src_count)]
        self.y0_names = ['ny_{0}'.format(n) for n in range(self.src_count)]
        self.amp_names = ['namp{0}'.format(n) for n in range(self.src_count)]
        self._param_names = tuple(['amplitude', 'x0', 'y0'] + self.x0_names +
                                  self.y0_names + self.amp_names)

        super(SinglePSF, self).__init__(n_models=1,
                                        x0=x0,
                                        y0=y0,
                                        amplitude=amplitude)

        # Choose whether or not the position of the PSF can be moved
        if False:  # fit_position:
            self.x0.fixed = False
            self.y0.fixed = False
            if pos_range > 0:
                self.x0.bounds = (self.x0 - pos_range, self.x0 + pos_range)
                self.y0.bounds = (self.y0 - pos_range, self.y0 + pos_range)
        else:
            pass
            # self.x0.fixed = True
            # self.y0.fixed = True
        self.set_psf_array(psf_array, subsampling=subsampling)
        amplitudes = neighbor_amplitudes
        # Set parameters for neighboring sources (if any exist)
        if self.src_count > 0:
            pos_array = np.array(neighbor_positions)
            x = pos_array[:, 0]
            y = pos_array[:, 1]
            kwargs = OrderedDict()

            for n in range(self.src_count):
                x0_name = self.x0_names[n]
                y0_name = self.y0_names[n]
                amp_name = self.amp_names[n]

                setattr(self, x0_name, x[n])
                setattr(self, y0_name, y[n])
                setattr(self, amp_name, amplitudes[n])

                if dx is not None:
                    x0 = getattr(self, x0_name)
                    x0.bounds = (x[n] - dx, x[n] + dx)
                if dy is not None:
                    y0 = getattr(self, y0_name)
                    y0.bounds = (y[n] - dy, y[n] + dy)
                if amp_err is not None:
                    amp = getattr(self, amp_name)
                    amp.bounds = (amplitudes[n] * (1 - amp_err),
                                  amplitudes[n] * (1 + amp_err))
コード例 #22
0
ファイル: galaxy.py プロジェクト: cwfinn/igmtools
def line_measurements(name,
                      spec1d,
                      z,
                      sky=None,
                      spec2d=None,
                      resolution=200,
                      yposition=None,
                      sky_threshold=None,
                      fit_o2_doublet=False,
                      plot=False,
                      show_plot=False,
                      plot_directory=None,
                      sky_in_counts=False):
    """
    Measures emission line fluxes and equivalent widths from a galaxy
    spectrum with redshift z.

    Parameters
    ----------
    name : str
        Object ID.

    spec1d : `igmtools.data.Spectrum1D`
        1D spectrum.

    z : float
        Galaxy redshift.

    sky : array
        1D sky spectrum.

    spec2d : `igmtools.data.Spectrum2D`
        2D spectrum.

    R : int, optional
        Spectral resolution (default = 200).

    yposition : tuple, optional
        y-coordinates of the object on the 2D spectrum
        (edge 1, centre, edge 2).

    sky_threshold : float, optional
        Sky counts/flux value above which flag 3 is raised (useful for
        eliminating zero-order contamination and regions of bad sky
        subtraction).

    fit_o2_doublet : bool, optional
        Option to fit two Gaussian components to the OII line
        (default = False).

    plot : bool, optional
        Option to plot continuum estimates across bands where a measurement
        is executed, and the Gaussian fit, if performed (default = False).

    show_plot : bool, optional
        Option to show each plot in an interactive window (default = False).

    plot_directory : str, optional
        If specified, plots will be saved in this directory as PNG files.

    sky_in_counts : bool, optional
        Set to True if the sky spectrum is in counts rather than flux units.

    Returns
    -------
    measurements : `astropy.table.Table`
        The line measurements.

    Notes
    -----
    Measurements are made using the following line indicies:

    OII : (3655.0, 3705.0, 3708.5, 3748.5, 3750.0, 3800.0)
    Hd : (4030.0, 4080.0, 4082.0, 4122.0, 4125.0, 4170.0)
    Hg : (4230.0, 4270.0, 4321.5, 4361.5, 4365.0, 4400.0)
    Hb : (4785.0, 4820.0, 4842.5, 4882.5, 5030.0, 5100.0)
    OIII : (4785.0, 4820.0, 4988.0, 5028.0, 5030.0, 5100.0)
    Ha : (6460.0, 6520.0, 6544.5, 6584.5, 6610.0, 6670.0),
    SII : (6640.0, 6700.0, 6713.0, 6753.0, 6760.0, 6810.0)

    These line indicies are optimised for spectra with R ~ 200, and have
    measurement windows 20 Angstroms wide. We assume the maximum intrinsic
    line width to be that of a Gaussian with a standard deviation of 10
    Angstroms in the rest frame. Convolved with the instrument line spread
    function, this corresponds to measurement windows of width close to
    the maximum expected standard deviation of the Gaussian line profile.
    For specified instrument resolutions different to the default value of
    200, measurement windows are scaled to preserve this feature.

    Lines with integrated fluxes measured at greater than 3 sigma
    significance are fitted with Gaussians. The measurements are then
    taken from the Gaussian fitting parameters and their errors computed
    from a Monte-Carlo type estimation. The maximum allowed Gaussian
    standard deviation corresponds to 10 Angstroms in the intrinsic line
    profile, and the minimum to 0.5 times that of that instrumental line
    spread function.

    If Hdelta is absorption dominated at a greater than 3 sigma level,
    a Gaussian absorption profile is fitted. This is motivated by the idea
    that Hdelta may be used as a proxy for the Balmer absorption correction.

    Equivalent widths are positive for emission lines, and negative for
    absorption lines.

    Warning flags are defined as follows:

    0 : No warnings.
    1 : Measurement may be affected by the OH forest between 8600 and 8700
        Angstrom.
    2 : Line was fit with the maximum/minimum allowed Gaussian standard
        deviation.
    3 : Line coincides with region above the specified sky threshold.
    4 : Line may be affected by O2 telluric absorption (7580 - 7680 Angstrom).
    5 : Bad continuum reduced chi squared (> 10).
    6 : No spectral coverage, or human verification failed.

    No measurement is recorded for flag 6 - all values are set to -99.0.

    """

    plot = True if show_plot else plot

    bands = {
        'OII': [3655.0, 3705.0, 3708.5, 3748.5, 3750.0, 3800.0],
        'Hd': [4030.0, 4080.0, 4082.0, 4122.0, 4125.0, 4170.0],
        'Hg': [4230.0, 4270.0, 4321.5, 4361.5, 4365.0, 4400.0],
        'Hb': [4785.0, 4820.0, 4842.5, 4882.5, 5030.0, 5100.0],
        'OIII': [4785.0, 4820.0, 4988.0, 5028.0, 5030.0, 5100.0],
        'Ha': [6460.0, 6520.0, 6544.5, 6584.5, 6610.0, 6670.0],
        'SII': [6640.0, 6700.0, 6713.0, 6753.0, 6760.0, 6810.0]
    }

    # Modify measurement windows if appropriate:
    if resolution != 200:

        sigma_max200 = 18.8  # Max Gaussian sigma for R = 200 (approx)
        dlambda = 7500 / resolution
        sigma_lsf = dlambda / 2.35482
        sigma_max_convolved = np.sqrt(sigma_lsf**2 + sigma_max**2)
        scale_factor = sigma_max_convolved / sigma_max200

        for key in bands.keys():
            window = bands[key][3] - bands[key][2]
            window0 = window * scale_factor
            bands[key][1] += (window - window0) / 2
            bands[key][2] += (window - window0) / 2
            bands[key][3] -= (window - window0) / 2
            bands[key][4] -= (window - window0) / 2

    # Initialise dictionaries:
    (line_flux, continuum_flux, eqw, sn, continuum_params, line_params,
     flags) = {}, {}, {}, {}, {}, {}, {}

    # 1D spectrum arrays:
    wavelength = spec1d.wavelength.value
    flux = spec1d.flux.value
    error = spec1d.flux.uncertainty.value

    # Clean the spectrum:
    wavelength, flux, error, cond = clean_spectrum(wavelength, flux, error)

    if sky is not None:
        sky = sky[cond]

    # Do measurements:
    for key in bands.keys():

        # Initialise dictionary for continuum parameters:
        continuum_params[key] = {}

        # Line groupings:
        if (key == 'OII') and fit_o2_doublet:
            lines = ['OIIR', 'OIIB']
            rest_wavelengths = o2_doublet

        elif key == 'OIII':
            lines = ['OIIIR', 'OIIIB']
            rest_wavelengths = [
                transition_wavelengths['OIIIR'],
                transition_wavelengths['OIIIB']
            ]

        elif key == 'Ha':
            lines = ['NIIR', 'Ha', 'NIIB']
            rest_wavelengths = [
                transition_wavelengths['NIIR'], transition_wavelengths['Ha'],
                transition_wavelengths['NIIB']
            ]

        elif key == 'SII':
            lines = ['SIIR', 'SIIB']
            rest_wavelengths = [
                transition_wavelengths['SIIR'], transition_wavelengths['SIIB']
            ]

        else:
            lines = [key]
            rest_wavelengths = [transition_wavelengths[key]]

        # Initialise dictionaries for line parameters:
        for line in lines:
            line_params[line] = {}

        # Observed wavelengths of the lines:
        observed_wavelengths = [item * (1 + z) for item in rest_wavelengths]

        # Fitting/measurement regions:
        co_blue = ((wavelength >= bands[key][0] * (1 + z)) &
                   (wavelength < bands[key][1] * (1 + z)))
        co_red = ((wavelength >= bands[key][4] * (1 + z)) &
                  (wavelength < bands[key][5] * (1 + z)))
        co_region = co_red | co_blue
        line_region = ((wavelength >= bands[key][2] * (1 + z)) &
                       (wavelength <= bands[key][3] * (1 + z)))

        # Extended region around the measurement. Used for excluding
        # measurements affected by zero orders:
        centre = ((bands[key][2] * (1 + z)) + (bands[key][3] * (1 + z))) / 2
        centre_band = ((wavelength >= centre - 100) &
                       (wavelength <= centre + 100))

        # The full fitting region:
        region = ((wavelength >= bands[key][0] * (1 + z)) &
                  (wavelength < bands[key][5] * (1 + z)))

        # Masks to identify regions potentially affected by 7600A O2 telluric
        # absorption:
        o2_blue = ((wavelength[co_blue] >= o2[0]) &
                   (wavelength[co_blue] <= o2[1]))
        o2_red = (wavelength[co_red] >= o2[0]) & (wavelength[co_red] <= o2[1])
        o2_line = ((wavelength[line_region] >= o2[0]) &
                   (wavelength[line_region] <= o2[1]))

        # Masks to identify regions potentially affected by the OH forest:
        oh_blue = ((wavelength[co_blue] >= oh[0]) &
                   (wavelength[co_blue] <= oh[1]))
        oh_red = (wavelength[co_red] >= oh[0]) & (wavelength[co_red] <= oh[1])
        oh_line = ((wavelength[line_region] >= oh[0]) &
                   (wavelength[line_region] <= oh[1]))

        # Assume the measurement will be good at first:
        flags[key] = 0

        # Check that we have spectral coverage:
        if ((np.sum(co_blue) < 5) | (np.sum(co_red) < 5) |
            (flux[co_blue] == 0).all() | (flux[co_red] == 0).all()):

            # If no coverage, mark all measurements as -99.0 and assign flag
            # 6, then go to next iteration of the loop:
            for line in lines:
                line_flux[line] = (-99.0, -99.0)
                continuum_flux[line] = (-99.0, -99.0)
                eqw[line] = (-99.0, -99.0)
                line_params[line]['amplitude'] = -99.0
                line_params[line]['mean'] = -99.0
                line_params[line]['stddev'] = -99.0

            continuum_params[key]['gradient'] = -99.0
            continuum_params[key]['intercept'] = -99.0
            continuum_params[key]['chi2norm'] = -99.0
            sn[key] = -99.0
            flags[key] = 6
            continue

        # See if we're affected by 7600A O2 telluric absorption:
        if ((np.sum(o2_blue) > 0) | (np.sum(o2_red) > 0) |
            (np.sum(o2_line) > 0)):
            flags[key] = 4

        # See if we're affected by OH forest:
        if ((np.sum(oh_blue) > 0) | (np.sum(oh_red) > 0) |
            (np.sum(oh_line) > 0)):
            flags[key] = 1

        # Assign sky threshold flag if a value is specified and it exceeds
        # this:
        if sky_threshold is not None:
            if any(sky0 > sky_threshold for sky0 in sky[centre_band]):
                flags[key] = 3

        # Sigma clip the continuum, to ensure it's not affected by nearby
        # absorption features:
        filtered_blue = sigma_clip(flux[co_blue], 1.5)
        filtered_red = sigma_clip(flux[co_red], 1.5)

        # Take the mean value of the sigma clipped continuum either side of the
        # line:
        co_level1 = np.mean(filtered_blue)
        co_level1_error = np.std(filtered_blue)
        co_level2 = np.mean(filtered_red)
        co_level2_error = np.std(filtered_red)

        # Linearly interpolate between these values:
        continuum = (
            (co_level2 - co_level1) /
            (np.mean(wavelength[co_red]) - np.mean(wavelength[co_blue])) *
            (wavelength - np.mean(wavelength[co_blue])) + co_level1)
        continuum_error = np.sqrt(co_level1_error**2 + co_level2_error**2) / 2

        # Continuum gradient:
        gradient = ((continuum[1] - continuum[0]) /
                    (wavelength[1] - wavelength[0]))
        continuum_params[key]['gradient'] = gradient

        # Continuum intercept:
        intercept = continuum[0] - gradient * wavelength[0]
        continuum_params[key]['intercept'] = intercept

        # Flag if normalised continuum chi squared > 10:
        cont_chi2norm = (np.sum(
            (flux[co_region] - continuum[co_region])**2 / error[co_region]**2)
                         / (len(flux[co_region]) - 3))

        if cont_chi2norm > 10:
            flags[key] = 5

        continuum_params[key]['chi2norm'] = cont_chi2norm

        # Estimate integrated line flux and equivalent width (observed,
        # not rest frame):
        dl = np.mean(wavelength[line_region][1:] -
                     wavelength[line_region][:-1])
        n = np.sum(line_region)

        line_flux_value = dl * np.sum(flux[line_region] -
                                      continuum[line_region])
        line_flux_error = dl * np.sqrt(
            np.sum(error[line_region]**2) + n * continuum_error**2)

        eqw_value = dl * np.sum(flux[line_region] / continuum[line_region]) - n
        eqw_error = dl * np.sqrt(
            np.sum(error[line_region]**2 / continuum[line_region]**2) +
            n * continuum_error**2 *
            np.sum(flux[line_region]**2 / continuum[line_region]**4))

        # Continuum flux at the line centre:
        ind = np.abs(wavelength - observed_wavelengths[0]).argmin()
        centre_flux = continuum[ind]
        centre_flux_error = error[ind]

        # Estimate signal-to-noise ratio around the line:
        sn_blue = (filtered_blue[~filtered_blue.mask] /
                   error[co_blue][~filtered_blue.mask])
        sn_red = (filtered_red[~filtered_red.mask] /
                  error[co_red][~filtered_red.mask])
        sn_value = np.average(np.concatenate([sn_blue, sn_red]))

        # Calculate minimum and maximum allowed Gaussian standard
        # deviations:
        dlambda = rest_wavelengths[0] / resolution
        sigma_lsf = dlambda / 2.35482
        min_stddev = sigma_lsf / 2
        max_stddev = np.sqrt(sigma_lsf**2 + sigma_max**2) * (1 + z)

        # Fit Gaussian component(s) if the integrated line flux is
        # positive and has greater than 3 sigma significance:
        if (line_flux_value > 0) & ((line_flux_value / line_flux_error) > 3):

            amplitude = np.max(flux[line_region] - continuum[line_region])

            if ((key in ('Hg', 'Hd')) |
                ((key == 'OII') and not fit_o2_doublet)):

                # One component Gaussian fit for Hg, Hd, OII:
                # -------------------------------------------
                mean = observed_wavelengths[0]

                g_init = Gaussian1D(amplitude, mean, min_stddev)

                g_init.amplitude.min = 0.0

                g_init.mean.min = mean - (1000 * mean / c_kms)
                g_init.mean.max = mean + (1000 * mean / c_kms)

                g_init.stddev.min = min_stddev
                g_init.stddev.max = max_stddev
                # -------------------------------------------

            elif (key == 'OII') and fit_o2_doublet:

                # Optional two component Gaussian fit for OII:
                # --------------------------------------------
                mean_0 = observed_wavelengths[0]
                mean_1 = observed_wavelengths[1]

                tied_params = {'stddev_1': tie_sigma}

                g_init = TwoGaussians(amplitude,
                                      mean_0,
                                      min_stddev,
                                      amplitude,
                                      mean_1,
                                      min_stddev,
                                      tied=tied_params)

                g_init.amplitude_0.min = 0.0
                g_init.amplitude_1.min = 0.0

                g_init.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
                g_init.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
                g_init.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
                g_init.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)

                g_init.stddev_0.min = min_stddev
                g_init.stddev_0.max = max_stddev
                g_init.stddev_1.min = min_stddev
                g_init.stddev_1.max = max_stddev
                # --------------------------------------------

            elif key == 'SII':

                # Two component Gaussian fit for SII:
                # -----------------------------------
                mean_0 = observed_wavelengths[0]
                mean_1 = observed_wavelengths[1]

                tied_params = {'amplitude_1': tie_sii, 'stddev_1': tie_sigma}

                g_init = TwoGaussians(amplitude,
                                      mean_0,
                                      min_stddev,
                                      amplitude,
                                      mean_1,
                                      min_stddev,
                                      tied=tied_params)

                g_init.amplitude_0.min = 0.0
                g_init.amplitude_1.min = 0.0

                g_init.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
                g_init.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
                g_init.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
                g_init.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)

                g_init.stddev_0.min = min_stddev
                g_init.stddev_0.max = max_stddev
                g_init.stddev_1.min = min_stddev
                g_init.stddev_1.max = max_stddev
                # -----------------------------------

            elif key in ('Hb', 'OIII'):

                # Three component fit over Hb/OIII region:
                # ----------------------------------------
                mean_0 = observed_wavelengths[0]

                if key == 'Hb':
                    mean_1 = transition_wavelengths['OIIIB'] * (1 + z)
                    mean_2 = transition_wavelengths['OIIIR'] * (1 + z)

                    tied_params = {
                        'stddev_1': tie_sigma,
                        'stddev_2': tie_sigma
                    }

                else:
                    mean_1 = transition_wavelengths['OIIIB'] * (1 + z)
                    mean_2 = transition_wavelengths['Hb'] * (1 + z)

                    tied_params = {
                        'amplitude_1': tie_oiii,
                        'stddev_1': tie_sigma,
                        'stddev_2': tie_sigma
                    }

                g_init = ThreeGaussians(amplitude,
                                        mean_0,
                                        min_stddev,
                                        amplitude,
                                        mean_1,
                                        min_stddev,
                                        amplitude,
                                        mean_2,
                                        min_stddev,
                                        tied=tied_params)

                g_init.amplitude_0.min = 0.0
                g_init.amplitude_1.min = 0.0
                g_init.amplitude_2.min = 0.0

                g_init.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
                g_init.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
                g_init.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
                g_init.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)
                g_init.mean_2.min = mean_2 - (1000 * mean_2 / c_kms)
                g_init.mean_2.max = mean_2 + (1000 * mean_2 / c_kms)

                g_init.stddev_0.min = min_stddev
                g_init.stddev_0.max = max_stddev
                g_init.stddev_1.min = min_stddev
                g_init.stddev_1.max = max_stddev
                g_init.stddev_2.min = min_stddev
                g_init.stddev_2.max = max_stddev
                # ----------------------------------------

            else:

                # Try one and three component fit over Ha/NII region:
                # ---------------------------------------------------
                mean_1 = observed_wavelengths[1]

                g_init = Gaussian1D(amplitude, mean_1, min_stddev)

                g_init.amplitude.min = 0.0

                g_init.mean.min = mean_1 - (1000 * mean_1 / c_kms)
                g_init.mean.max = mean_1 + (1000 * mean_1 / c_kms)

                g_init.stddev.min = min_stddev
                g_init.stddev.max = max_stddev

                mean_0 = observed_wavelengths[0]
                mean_2 = observed_wavelengths[2]

                tied_params = {
                    'amplitude_2': tie_nii,
                    'stddev_1': tie_sigma,
                    'stddev_2': tie_sigma
                }

                g_init2 = ThreeGaussians(amplitude,
                                         mean_0,
                                         min_stddev,
                                         amplitude,
                                         mean_1,
                                         min_stddev,
                                         amplitude,
                                         mean_2,
                                         min_stddev,
                                         tied=tied_params)

                g_init2.amplitude_0.min = 0.0
                g_init2.amplitude_1.min = 0.0
                g_init2.amplitude_2.min = 0.0

                g_init2.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
                g_init2.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
                g_init2.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
                g_init2.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)
                g_init2.mean_2.min = mean_2 - (1000 * mean_2 / c_kms)
                g_init2.mean_2.max = mean_2 + (1000 * mean_2 / c_kms)

                g_init2.stddev_0.min = min_stddev
                g_init2.stddev_0.max = max_stddev
                g_init2.stddev_1.min = min_stddev
                g_init2.stddev_1.max = max_stddev
                g_init2.stddev_2.min = min_stddev
                g_init2.stddev_2.max = max_stddev
                # ---------------------------------------------------

            # Do the fitting:
            fit_g = LevMarLSQFitter()
            g = fit_g(g_init, wavelength[region],
                      flux[region] - continuum[region])

            # Chi2 on the fit:
            line_chi2 = np.sum(
                (flux[region] - continuum[region] - g(wavelength)[region])**2 /
                error[region]**2)

            # Monte carlo error estimation:
            g_errors = monte_carlo_error(wavelength[region], flux[region],
                                         error[region], continuum[region],
                                         fit_g, g_init)

            ha_3comp = False

            # Compare chi squared values for the two Ha/NII fits and adopt
            # the one that has the minimum chi squared:
            if key == 'Ha':
                # Three component fit of Ha/NII region:
                fit_g2 = LevMarLSQFitter()
                g2 = fit_g2(g_init2, wavelength[region],
                            flux[region] - continuum[region])

                # Monte carlo error estimation:
                g2_errors = monte_carlo_error(wavelength[region], flux[region],
                                              error[region], continuum[region],
                                              fit_g2, g_init2)

                # Chi2 on the fit:
                line_chi2_2 = np.sum(
                    (flux[region] - continuum[region] - g2(wavelength[region]))
                    **2 / (g2(wavelength[region]) + continuum[region]))

                # Compare chi2:
                if line_chi2 > line_chi2_2:
                    g = g2
                    g_errors = g2_errors
                    ha_3comp = True

            # Get lists of best-fit Gaussian parameters:
            if ((key in ('Hg', 'Hd')) |
                ((key == 'OII') and not fit_o2_doublet)):
                amplitudes = [g.amplitude.value]
                amplitude_errors = [g_errors['amplitude']]
                means = [g.mean.value]
                stddevs = [g.stddev.value]
                stddev_errors = [g_errors['stddev']]

            elif ((key == 'OII') and fit_o2_doublet) | (key == 'SII'):
                amplitudes = [g.amplitude_0.value, g.amplitude_1.value]
                amplitude_errors = [
                    g_errors['amplitude_0'], g_errors['amplitude_1']
                ]
                means = [g.mean_0.value, g.mean_1.value]
                stddevs = [g.stddev_0.value, g.stddev_1.value]
                stddev_errors = [g_errors['stddev_0'], g_errors['stddev_1']]

            elif ((key == 'Ha') and ha_3comp) | (key in ('Hb', 'OIII')):
                amplitudes = [
                    g.amplitude_0.value, g.amplitude_1.value,
                    g.amplitude_2.value
                ]
                amplitude_errors = [
                    g_errors['amplitude_0'], g_errors['amplitude_1'],
                    g_errors['amplitude_2']
                ]
                means = [g.mean_0.value, g.mean_1.value, g.mean_2.value]
                stddevs = [g.stddev_0.value, g.stddev_1.value, g.mean_2.value]
                stddev_errors = [
                    g_errors['stddev_0'], g_errors['stddev_1'],
                    g_errors['stddev_2']
                ]

            else:
                amplitudes = [g.amplitude.value, -99.0, -99.0]
                amplitude_errors = [g_errors['amplitude'], -99.0, -99.0]
                means = [g.mean.value, -99.0, -99.0]
                stddevs = [g.stddev.value, -99.0, -99.0]
                stddev_errors = [g_errors['stddev'], -99.0, -99.0]

            # Log these line by line:
            for i, line in enumerate(lines):

                # Log the line fitting parameters:
                line_params[line]['amplitude'] = amplitudes[i]
                line_params[line]['mean'] = means[i]
                line_params[line]['stddev'] = stddevs[i]

                # Only adopt the measurements if the fitted amplitude is
                # non-zero, otherwise, measurements from direct integration
                # of pixels are retained:
                if amplitudes[i] != 0:

                    # Integrated line flux:
                    line_flux_value = (amplitudes[i] * stddevs[i] *
                                       np.sqrt(2 * np.pi))

                    # Error on the integrated line flux:
                    line_flux_error = line_flux_value * np.sqrt(
                        (amplitude_errors[i] / amplitudes[i])**2 +
                        (stddev_errors[i] / stddevs[i])**2)

                    # Re-evaluate the continuum flux at the line centre:
                    ind = np.abs(wavelength - means[i]).argmin()
                    centre_flux = continuum[ind]
                    centre_flux_error = error[ind]

                    # Equivalent width:
                    eqw_value = line_flux_value / centre_flux

                    # Error on the equivalent width:
                    eqw_error = eqw_value * np.sqrt(
                        (line_flux_error / line_flux_value)**2 +
                        (centre_flux_error / centre_flux)**2)

                # Log the line flux, continuum flux and equivalent width:
                line_flux[line] = (line_flux_value, line_flux_error)
                continuum_flux[line] = (centre_flux, centre_flux_error)
                eqw[line] = (eqw_value, eqw_error)

            fit = True
            fit_hd = False

        # Fit single Gaussian absorption component to Hd if the integrated
        # line flux is negative and has greater than 3 sigma significance:
        elif ((key == 'Hd') & (line_flux_value < 0) &
              ((line_flux_value / line_flux_error) < -3)):

            amplitude = np.max(1 - flux[line_region] / continuum[line_region])
            mean = transition_wavelengths[key] * (1 + z)
            dm = 1000 * mean / c_kms

            g_init = GaussianAbsorption1D(amplitude, mean, min_stddev)

            g_init.mean.min = mean - dm
            g_init.mean.max = mean + dm

            g_init.stddev.min = min_stddev
            g_init.stddev.max = max_stddev

            # Do the fitting:
            fit_g = LevMarLSQFitter()
            g = fit_g(g_init, wavelength[region],
                      flux[region] / continuum[region])

            # Monte carlo error estimation:
            g_errors = monte_carlo_error(wavelength[region],
                                         flux[region],
                                         error[region],
                                         continuum[region],
                                         fit_g,
                                         g_init,
                                         absorption=True)

            # Equivalent width:
            eqw_value = (-g.amplitude.value * g.stddev.value *
                         np.sqrt(2 * np.pi) / (1 + z))

            # Error on the equivalent width:
            eqw_error = fabs(eqw_value) * np.sqrt(
                (g_errors['amplitude'] / g.amplitude.value)**2 +
                (g_errors['stddev'] / g.stddev.value)**2)

            for line in lines:

                # Log the line fitting parameters:
                line_params[line]['amplitude'] = g.amplitude.value
                line_params[line]['mean'] = g.mean.value
                line_params[line]['stddev'] = g.stddev.value

                # Log the line flux, continuum flux and equivalent width:
                line_flux[line] = (line_flux_value, line_flux_error)
                continuum_flux[line] = (centre_flux, centre_flux_error)
                eqw[line] = (eqw_value, eqw_error)

            fit = False
            fit_hd = True

        # Otherwise we won't do any line fitting:
        else:

            for line in lines:

                # Set all line fitting parameters to -99:
                line_params[line]['amplitude'] = -99.0
                line_params[line]['mean'] = -99.0
                line_params[line]['stddev'] = -99.0

                # Log the line flux, continuum flux and equivalent width:
                line_flux[line] = (line_flux_value, line_flux_error)
                continuum_flux[line] = (centre_flux, centre_flux_error)
                eqw[line] = (eqw_value, eqw_error)

            fit = False
            fit_hd = False

        sn[key] = sn_value

        # Make plots if that option is turned on:
        if plot:

            if sky is not None and spec2d is not None:
                n = 3
                p = Plot(n, 1, n, aspect=1, width=5.9, fontsize=12)

            elif ((sky is not None and spec2d is None) |
                  (spec2d is not None and sky is None)):
                n = 2
                p = Plot(n, 1, n, aspect=0.8, width=5.9, fontsize=12)

            else:
                n = 1
                p = Plot(n, 1, n, aspect=0.6, width=5.9, fontsize=12)

            centre = (bands[key][0] * (1 + z) + bands[key][5] * (1 + z)) / 2
            cond = (wavelength > centre - 250) & (wavelength < centre + 250)

            if spec2d is not None:
                cond2 = ((spec2d.wavelength.value > centre - 250) &
                         (spec2d.wavelength.value < centre + 250))

            # 2D spectrum plot:
            if spec2d is not None:

                n = 3 if n == 3 else 2

                # 2D spectrum parameters for plotting:
                i = min(spec2d.data.shape[0] // 2, 3)
                v1 = np.percentile(spec2d.data[i:-1, :].ravel(), 90)
                wdelt = spec2d.wavelength.value[1] - spec2d.wavelength.value[0]
                yvals = np.arange(spec2d.data.shape[0]) * wdelt

                p.axes[n - n].pcolormesh(spec2d.wavelength.value[cond2],
                                         yvals,
                                         spec2d.data[:, cond2],
                                         vmin=-v1 / 5,
                                         vmax=2 * v1,
                                         cmap=pl.cm.hot)

                if yposition is not None:
                    p.axes[n - n].axhline(wdelt * yposition[0],
                                          ls='--',
                                          lw=2,
                                          color='LawnGreen')
                    p.axes[n - n].axhline(wdelt * yposition[2],
                                          ls='--',
                                          lw=2,
                                          color='LawnGreen')

            # 1D spectrum plot:
            if (n == 3) | ((n == 2) & (sky is not None and spec2d is None)):
                n = 2

            else:
                n = 1

            p.axes[n - n].plot(wavelength[cond],
                               flux[cond] / 1e-16,
                               drawstyle='steps-mid',
                               color='k')
            p.axes[n - n].plot(wavelength[cond],
                               error[cond] / 1e-16,
                               drawstyle='steps-mid',
                               color='r')
            p.axes[n - n].plot(wavelength[region],
                               continuum[region] / 1e-16,
                               lw=3,
                               color='RoyalBlue')

            if fit:
                p.axes[n - n].plot(
                    wavelength[region],
                    (g(wavelength[region]) + continuum[region]) / 1e-16,
                    color='m',
                    lw=2)

            if fit_hd:
                p.axes[n - n].plot(
                    wavelength[region],
                    (g(wavelength[region]) * continuum[region]) / 1e-16,
                    color='m',
                    lw=2)

            p.axes[n - n].axvspan(bands[key][2] * (1 + z),
                                  bands[key][3] * (1 + z),
                                  facecolor='g',
                                  edgecolor='none',
                                  alpha=0.5)
            p.axes[n - n].annotate(key,
                                   xy=(0.05, 0.8),
                                   xycoords='axes fraction',
                                   horizontalalignment='left',
                                   fontsize=12,
                                   bbox=bbox,
                                   color='k')
            p.axes[n - n].annotate(name,
                                   xy=(0.95, 0.8),
                                   xycoords='axes fraction',
                                   horizontalalignment='right',
                                   fontsize=12,
                                   bbox=bbox,
                                   color='k')

            # Sky spectrum plot:
            if sky is not None:

                if sky_in_counts:
                    p.axes[n - 1].plot(wavelength[cond],
                                       sky[cond],
                                       drawstyle='steps-mid',
                                       color='MidnightBlue')

                else:
                    p.axes[n - 1].plot(wavelength[cond],
                                       sky[cond] / 1e-16,
                                       drawstyle='steps-mid',
                                       color='MidnightBlue')

                p.axes[n - 1].annotate('sky',
                                       xy=(0.05, 0.8),
                                       xycoords='axes fraction',
                                       horizontalalignment='left',
                                       fontsize=12,
                                       bbox=bbox,
                                       color='k')

            # Axis limits and labels, tidy up and display:
            region_min = np.min(error[region])
            region_max = np.max(flux[region])
            sn_spec = np.median(flux / error)

            for i in range(0, n):
                p.axes[i].set_xlim(wavelength[cond][0], wavelength[cond][-1])

            p.axes[n - 2].set_ylim(
                (region_min - 0.2 * sn_spec * region_min) / 1e-16,
                (region_max + 0.8 * region_max) / 1e-16)

            xlabel = 'Wavelength ($\AA$)'

            if spec1d.flux.unit == erg / s / cm**2 / angstrom:
                ylabel = 'Flux ($10^{-16}$ erg s$^{-1}$ cm$^{-2}$ $\AA$)'

            else:
                ylabel = 'Flux ({0})'.format(
                    spec1d.flux.unit.to_string(format='latex'))

            p.tidy(shared_axes=True)
            p.labels(xlabel, ylabel)

            if plot_directory is not None:
                p.savefig('{0}/{1}_{2}.png'.format(plot_directory, name, key))

            if show_plot:
                p.display()

    return (line_flux, continuum_flux, eqw, sn, continuum_params, line_params,
            flags)
コード例 #23
0
ファイル: oned.py プロジェクト: ennerchung/zachopy
def peaks(x,
          y,
          plot=False,
          xsmooth=30,
          threshold=100,
          edgebuffer=10,
          widthguess=1,
          maskwidth=3,
          returnfiltered=False):
    '''Return the significant peaks in a 1D array.

			required:
				x, y = two 1D arrays

			optional:
				plot		# should we show a plot?
				xsmooth		# half-width for median smoothing
				threshold	# how many MADs above background for peaks?
				edgebuffer	# reject peaks with this distance of an edge
				widthguess	# about how wide will the peaks be?
				maskwidth   # peak fits use x's within (maskwidth)*(widthguess)

			If returnfiltered==True, then will return filtered arrays:
				(xPeaks, yPeaks, xfiltered, yfiltered).

			If returnfiltered==False, then only returns the peaks:
				(xPeaks, yPeaks)
	'''

    # calculate a smoothed version of the curve
    smoothed = mediansmooth(x, y, xsmooth=xsmooth)

    filtered = (y - smoothed)

    # calculate the mad of the whole thing
    mad = np.median(np.abs(filtered))

    # normalize the filtered timeseries
    filtered /= mad

    # calculate the derivatives
    derivatives = (filtered[1:] - filtered[:-1]) / (x[1:] - x[:-1])

    # estimate peaks as zero crossings
    guesses = np.zeros_like(x).astype(np.bool)
    guesses[1:-1] = (derivatives[:-1] > 0) * (derivatives[1:] <= 0)

    # make sue the peak is high enough to be interesting
    guesses *= filtered > threshold

    # make sure the peak isn't too close to an edge
    guesses *= (x > np.min(x) + edgebuffer) * (x < np.max(x) - edgebuffer)

    if plot:
        # turn on interactive plotting
        plt.ion()

        # create a figure and gridspec
        fi = plt.figure('peak finding')
        gs = plt.matplotlib.gridspec.GridSpec(2, 1, hspace=0.03)

        # create axes for two kinds of plots
        ax_raw = plt.subplot(gs[0])
        plt.setp(ax_raw.get_xticklabels(), visible=False)
        ax_filtered = plt.subplot(gs[1], sharex=ax_raw)

        # plot the input vector
        kw = dict(alpha=1, color='gray', linewidth=1)
        ax_raw.plot(x, y, **kw)
        ax_filtered.plot(x, filtered, **kw)

        # plot the threshold
        kw = dict(alpha=0.5, color='royalblue', linewidth=1)
        ax_raw.plot(x, threshold * mad + smoothed, **kw)
        ax_filtered.plot(x, threshold + np.zeros_like(x), **kw)

        # set the scale
        ax_raw.set_yscale('log')
        ax_filtered.set_yscale('log')
        ax_filtered.set_ylim(mad, np.max(filtered))

        # plot the peak guesses
        markerkw = dict(marker='o',
                        markersize=6,
                        color='none',
                        markeredgecolor='tomato',
                        alpha=0.5)
        ax_raw.plot(x[guesses], y[guesses], **markerkw)
        ax_filtered.plot(x[guesses], filtered[guesses], **markerkw)

        # create an empty plot object for showing the fits in progress
        fitplotter = ax_filtered.plot([], [],
                                      alpha=0.5,
                                      color='red',
                                      linewidth=1)[0]

        plt.draw()
        a = raw_input("how 'bout them peaks?")

    # create empty lists of peaks
    xPeaks, yPeaks = [], []

    # create a fitter object
    fitter = LevMarLSQFitter()
    for g in np.nonzero(guesses)[0]:

        # initialize an approximate Gaussian
        gauss = Gaussian1D(mean=x[g], amplitude=filtered[g], stddev=widthguess)

        # which points are relevant to this fit?
        mask = np.abs(x - x[g]) <= maskwidth * widthguess

        # use LM to fit the peak position and width
        fit = fitter(gauss, x[mask], filtered[mask])

        # store the peak values
        distancemoved = np.abs((fit.mean.value - x[g]) / fit.stddev.value)
        if distancemoved <= 3.0:
            xPeaks.append(fit.mean.value)
            yPeaks.append(fit.amplitude.value)

            if plot:

                # update the Gaussian's parameters, and plot it
                gauss.parameters = fit.parameters
                xfine = np.linspace(*minmax(x[mask]), num=50)
                fitplotter.set_data(xfine, gauss(xfine))

                # plot the fitted peak
                markerkw['color'] = markerkw['markeredgecolor']
                markerkw['alpha'] = 1
                ax_filtered.plot(xPeaks[-1], yPeaks[-1], **markerkw)

                # set the xlimits
                #ax_filtered.set_xlim(*minmax(x[mask]))

                plt.draw()
                a = raw_input('  and this one in particular?')

    if returnfiltered:
        return np.array(xPeaks), np.array(yPeaks), x, filtered
    else:
        return np.array(xPeaks), np.array(yPeaks)
    '''a = raw_input('?')
コード例 #24
0
def pahfit(input_dir, output, max_iter, error, packfile, exclude_bf):

    try:
        os.mkdir(output)
    except:
        pass

    for subdir, dirs, files in os.walk(fr'./{input_dir}'):
        for specfile in files:
            try:
                if specfile.split('.')[-1] == 'txt' or specfile.split(
                        '.')[-1] == 'ipac':

                    filepath = subdir + os.sep + specfile

                    outputname = specfile.split('.')[0]
                    typer.echo(
                        f'Performing PAHFit for {typer.style(outputname, bold=True, fg=typer.colors.MAGENTA)}'
                    )

                    obs_spectrum = Table.read(filepath, format='ipac')
                    obs_x = obs_spectrum['wavelength'].to(
                        u.micron, equivalencies=u.spectral())
                    obs_y = obs_spectrum['flux'].to(
                        u.Jy, equivalencies=u.spectral_density(obs_x))
                    obs_unc = obs_spectrum['sigma'].to(
                        u.Jy, equivalencies=u.spectral_density(obs_x))

                    obs_x = obs_x.value
                    obs_y = obs_y.value
                    weights = 1. / obs_unc.value

                    # packfile = fr'C:\Users\bpart\OneDrive\Desktop\astro\MASSIVE\data\packFile.ipac'

                    pmodel = PAHFITBase(obs_x, obs_y, filename=packfile)

                    # pick the fitter
                    fit = LevMarLSQFitter()

                    # fit
                    obs_fit = fit(pmodel.model,
                                  obs_x,
                                  obs_y,
                                  weights=weights,
                                  maxiter=max_iter,
                                  epsilon=1e-10,
                                  acc=error)

                    pmodel.save(obs_fit, fr'{output}\{outputname}', 'ipac')

                    # plot result
                    fontsize = 18
                    font = {'size': fontsize}
                    mpl.rc('font', **font)
                    mpl.rc('lines', linewidth=2)
                    mpl.rc('axes', linewidth=2)
                    mpl.rc('xtick.major', width=2)
                    mpl.rc('ytick.major', width=2)

                    fig, ax = plt.subplots(figsize=(15, 10))

                    # modelfig, modelax = plt.subplots(figsize=(15, 10))

                    # modelax.plot(obs_x, obs_fit(obs_x) /obs_x, "g-")

                    # modelfig.tight_layout()

                    modelcsv = open(fr'{output}\{outputname}_model.csv', 'w')

                    modelcsv.write(
                        'observed wavelength (microns), observed flux (Jy), model flux (Jy)\n'
                    )
                    ys = obs_fit(obs_x)
                    for i in range(len(obs_x)):

                        modelcsv.write(
                            f'{str(obs_x[i])}, {str(obs_y[i])}, {str(ys[i])}\n'
                        )
                    modelcsv.close()

                    pmodel.plot(ax, obs_x, obs_y, obs_fit)
                    ax.plot(obs_x, obs_y / obs_x, "ks", fillstyle="full")

                    ax.set_yscale('linear')
                    ax.set_xscale('log')

                    # use the whitespace better
                    fig.tight_layout()

                    fig.savefig(fr'{output}\{outputname}.png')

                    # chiSquared = calc_reduced_chi_square(obs_fit(obs_x), obs_x, obs_y, obs_unc, len(obs_x), 139)

                    print("SUCCESS", outputname)
            except:
                print('PAHFIT FAILED FOR', specfile)
コード例 #25
0
ファイル: gaussian.py プロジェクト: Onoddil/photutils
def centroid_1dg(data, error=None, mask=None):
    """
    Calculate the centroid of a 2D array by fitting 1D Gaussians to the
    marginal ``x`` and ``y`` distributions of the array.

    Non-finite values (e.g., NaN or inf) in the ``data`` or ``error``
    arrays are automatically masked. These masks are combined.

    Parameters
    ----------
    data : array_like
        The 2D data array.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    centroid : `~numpy.ndarray`
        The ``x, y`` coordinates of the centroid.
    """
    data = np.ma.asanyarray(data)

    if mask is not None and mask is not np.ma.nomask:
        mask = np.asanyarray(mask)
        if data.shape != mask.shape:
            raise ValueError('data and mask must have the same shape.')
        data.mask |= mask

    if np.any(~np.isfinite(data)):
        data = np.ma.masked_invalid(data)
        warnings.warn(
            'Input data contains non-finite values (e.g., NaN or '
            'inf) that were automatically masked.', AstropyUserWarning)

    if error is not None:
        error = np.ma.masked_invalid(error)
        if data.shape != error.shape:
            raise ValueError('data and error must have the same shape.')
        data.mask |= error.mask
        error.mask = data.mask

        xy_error = [np.sqrt(np.ma.sum(error**2, axis=i)) for i in (0, 1)]
        xy_weights = [(1.0 / xy_error[i].clip(min=1.e-30)) for i in (0, 1)]
    else:
        xy_weights = [np.ones(data.shape[i]) for i in (1, 0)]

    # assign zero weight where an entire row or column is masked
    if np.any(data.mask):
        bad_idx = [np.all(data.mask, axis=i) for i in (0, 1)]
        for i in (0, 1):
            xy_weights[i][bad_idx[i]] = 0.

    xy_data = [np.ma.sum(data, axis=i).data for i in (0, 1)]

    constant_init = np.ma.min(data)
    centroid = []
    for (data_i, weights_i) in zip(xy_data, xy_weights):
        params_init = gaussian1d_moments(data_i)
        g_init = Const1D(constant_init) + Gaussian1D(*params_init)
        fitter = LevMarLSQFitter()
        x = np.arange(data_i.size)
        g_fit = fitter(g_init, x, data_i, weights=weights_i)
        centroid.append(g_fit.mean_1.value)

    return np.array(centroid)
コード例 #26
0
def fit_2dgaussian(data, error=None, mask=None):
    """
    Fit a 2D Gaussian plus a constant to a 2D image.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    result : A `GaussianConst2D` model instance.
        The best-fitting Gaussian 2D model.
    """

    if data.size < 7:
        warnings.warn(
            'data array must have a least 7 values to fit a 2D '
            'Gaussian plus a constant', AstropyUserWarning)
        return None

    if error is not None:
        weights = 1.0 / error
    else:
        weights = None

    if mask is not None:
        mask = np.asanyarray(mask)
        if weights is None:
            weights = np.ones_like(data)
        # down-weight masked pixels
        weights[mask] = 1.e-30

    # Subtract the minimum of the data as a crude background estimate.
    # This will also make the data values positive, preventing issues with
    # the moment estimation in data_properties (moments from negative data
    # values can yield undefined Gaussian parameters, e.g. x/y_stddev).
    shift = np.min(data)
    data = np.copy(data) - shift
    props = data_properties(data, mask=mask)
    init_values = np.array([
        props.xcentroid.value, props.ycentroid.value,
        props.semimajor_axis_sigma.value, props.semiminor_axis_sigma.value,
        props.orientation.value
    ])

    init_const = 0.  # subtracted data minimum above
    init_amplitude = np.nanmax(data) - np.nanmin(data)
    g_init = GaussianConst2D(init_const, init_amplitude, *init_values)
    fitter = LevMarLSQFitter()
    y, x = np.indices(data.shape)
    gfit = fitter(g_init, x, y, data, weights=weights)
    gfit.amplitude_0 = gfit.amplitude_0 + shift
    return gfit
コード例 #27
0
gauss = 0
            
if(np.ma.count(image) >= 7):
    gauss = photutils.fit_2dgaussian(image[40 : 61, 40 : 61], mask = None)

fwhm = 0
if gauss != 0:
    fwhm = abs(gauss.x_stddev) * gaussian_sigma_to_fwhm

print(fwhm)

    
daogroup = DAOGroup(5.0*sigma_psf*gaussian_sigma_to_fwhm)
mmm_bkg = MMMBackground()
fitter = LevMarLSQFitter()
psf_model = IntegratedGaussianPRF(sigma=abs(gauss.x_stddev))

fitshape = (int(3. * fwhm), int(3. * fwhm))
if int(3. * fwhm) % 2 == 0:
    fitshape = (int(3. * fwhm) + 1, int(3. * fwhm) + 1)

photometry = IterativelySubtractedPSFPhotometry(finder=iraffind,
                                                group_maker=daogroup,
                                                bkg_estimator=mmm_bkg,
                                                psf_model=psf_model,
                                                fitter=LevMarLSQFitter(),
                                                niters=1, fitshape=fitshape)
result_tab = photometry(image=image)
residual_image = photometry.get_residual_image()
コード例 #28
0
    def __init__(self,
                 spectrumIn,
                 wline,
                 band,
                 polyOrder=1,
                 widthFactor=4.,
                 verbose=0):
        # =======================================================================
        # Initial check in spectral_axis
        # =======================================================================
        if not (isinstance(spectrumIn.spectral_axis, u.Quantity)
                and isinstance(spectrumIn.flux, u.Quantity)):
            raise ValueError("Spectral axis must be a `Quantity` object.")
            if not spectrumIn.spectral_axis.unit == u.um:
                raise ValueError("Spectral axis is not in units of microns")

        self.polyOrder = polyOrder
        resv = self.get_spec_resolution(int(band[1]),
                                        np.array(wline, dtype=float))
        # =======================================================================
        # Convert delta velocity to delta lambda in microns with astropy units equivalencies
        # =======================================================================
        # c_kms = const.c.to('km/s')
        # fwhmum = (resv * u.kilometer/ u.s / c_kms) * wline
        rest_wline = wline * u.um
        fwhmum_q = (resv * u.km / u.s).to(
            u.um, equivalencies=u.doppler_optical(rest_wline)) - rest_wline
        fwhmum = fwhmum_q.value
        fwhm_to_sigma = 1. / (8 * np.log(2))**0.5
        lineWidthEstimate = fwhmum * fwhm_to_sigma
        wmin, wmax = wline[0] - (widthFactor *
                                 fwhmum), wline[-1] + (widthFactor * fwhmum)
        if verbose:
            print("wline, wmin, wmax, resv, fwhmum, lineWidthEstimate: ",
                  wline, wmin, wmax, resv, fwhmum, lineWidthEstimate)

        self.wmin = wmin
        self.wmax = wmax
        self.spectrumIn = spectrumIn
        # =======================================================================
        # mask non finite elements
        # =======================================================================
        spectrum = self.__finite(spectrumIn, verbose=verbose)
        self.spectrum = spectrum

        wunit = self.spectrum.spectral_axis.unit
        region = SpectralRegion(self.wmin * wunit, self.wmax * wunit)
        spectrum_region = extract_region(self.spectrum, region)

        # =======================================================================
        # Compute peak flux estimates for model parameters starting values in the region
        # =======================================================================
        peakFluxEstimate = []
        for wsline in wline:
            wave = spectrum_region.spectral_axis.value  #just the ndarray and not Quantity
            flux = spectrum_region.flux.value
            wdist = np.abs(wsline - wave)
            if verbose: print(wsline, min(wdist), max(wdist))
            indexLine = np.where(wdist == min(wdist))[0][0]
            if verbose: print("indexLine= {}".format(indexLine))
            peakEstimate = np.mean(flux[indexLine - 1:indexLine + 1])
            if verbose:
                print('Estimates for peak init {}'.format(peakEstimate))
            cont_sample = np.concatenate((flux[:5], flux[-5:]), axis=None)
            continuumEstimate = np.median(
                np.concatenate((flux[:5], flux[-5:]), axis=None))
            peakFluxEstimate = np.append(peakFluxEstimate,
                                         peakEstimate - continuumEstimate)
            if verbose:
                print('Estimates for peak & continuum {}, {}'.format(
                    peakFluxEstimate, continuumEstimate))

        # =======================================================================
        # Construct model compound (branching off lines+continuum or continuum)
        # =======================================================================

        try:
            lineModel_init = models.Polynomial1D(self.polyOrder,
                                                 c0=continuumEstimate,
                                                 name='cont')
            for xi in range(len(wline)):
                lineModel_init += models.Gaussian1D(
                    amplitude=peakFluxEstimate[xi],
                    mean=wline[xi],
                    stddev=lineWidthEstimate[xi],
                    name='g{}'.format(xi + 1))
            fitter = LevMarLSQFitter()
            lineModel = fit_lines(self.spectrum,
                                  lineModel_init,
                                  fitter=fitter,
                                  window=region)
            fitResult = lineModel(self.spectrum.spectral_axis)
            findLine = 1

            self.flux = []
            self.sigma = []

            for idx in range(len(wline)):
                #momentarily taking advantage of astropy units for conversion
                line_amp = (lineModel.unitless_model[idx + 1].amplitude.value *
                            u.Jy).to(u.Watt / u.m**2 / u.Hz)
                line_sig = (lineModel.unitless_model[idx + 1].stddev.value *
                            u.um).to(u.Hz, equivalencies=u.spectral())
                self.flux = np.append(self.flux, (line_amp * line_sig *
                                                  np.sqrt(2. * np.pi)).value)
                self.sigma = np.append(self.sigma, line_sig.value)
        except:
            if verbose: print('Exception')
            lineModel_init = models.Polynomial1D(self.polyOrder,
                                                 c0=continuumEstimate,
                                                 name='cont')
            fitter = LevMarLSQFitter()
            lineModel = fit_lines(
                self.spectrum, lineModel_init, fitter=fitter, window=region
            )  #the problem is narrow window where the contribution of the continuum sample is small
            fitResult = lineModel(self.spectrum.spectral_axis)
            findLine = 0
        self.model = lineModel
        self.fitResult = fitResult
        self.findLine = findLine
        self.fitter = fitter
        # =======================================================================
        # Preserve continuum Polynomial model
        # =======================================================================
        # there are two types of models, those that are based on
        # `~astropy.modeling.models.PolynomialModel` and therefore
        # require the ``degree`` parameter when instantiating the
        # class , and "everything else" that does not require an
        # "extra" parameter for class instantiation.
        compound_model = lineModel.n_submodels > 1
        if compound_model:
            self.continuumModel = lineModel.unitless_model[0]
        else:
            self.continuumModel = lineModel.unitless_model
        if findLine:
            self.continuum = []
            self.peak = []
            self.centre = []
            self.sigma = []
            self.fwhm = []
            self.chiSquared = (self.fitter.fit_info['fvec']**2).sum() / (
                len(self.fitter.fit_info['fvec']) -
                len(self.fitter.fit_info['param_cov'].data))
            self.stddev = np.sqrt(
                np.diag(fitter.fit_info['cov_x']
                        ))  #standard deviations pertaining to all parameters.
            params_idx = [
                int(param.split('_', -1)[-1])
                for param in self.model.param_names
            ]
            self.fluxError = []
            self.fluxErrorRelative = []
            for idx in range(len(wline)):
                self.continuum = np.append(
                    self.continuum,
                    self.continuumModel(
                        lineModel.unitless_model[idx + 1].mean.value))
                self.peak = np.append(
                    self.peak,
                    lineModel.unitless_model[idx + 1].amplitude.value)
                self.centre = np.append(
                    self.centre, lineModel.unitless_model[idx + 1].mean.value)
                self.sigma = np.append(
                    self.sigma, lineModel.unitless_model[idx + 1].stddev.value)
                self.fwhm = np.append(self.fwhm, self.sigma / fwhm_to_sigma)
                line_amp = (lineModel.unitless_model[idx + 1].amplitude.value *
                            u.Jy).to(u.Watt / u.m**2 / u.Hz)
                line_sig = (lineModel.unitless_model[idx + 1].stddev.value *
                            u.um).to(u.Hz, equivalencies=u.spectral())
                param_idx = [
                    i for i, value in enumerate(params_idx)
                    if value == (idx + 1)
                ]
                self.fluxErrorRelative = np.append(
                    self.fluxErrorRelative,
                    np.sqrt(
                        np.sum((self.stddev / self.model.parameters)[np.array(
                            [param_idx])][np.array([0, -1])]**2.)))
            self.fluxError = self.fluxErrorRelative * self.flux
            self.width = self.fwhm
        else:
            self.continuum = np.array(
                [np.median(flux) for i in range(len(wline))])
            self.flux = self.peak = self.sigma = self.fwhm = self.width = np.array(
                [0. for i in range(len(wline))])
            self.centre = wline
            if verbose:
                print('Line Not Detected. Continuum: {}'.format(
                    self.continuum))
        self.line_spec = self.get_line_spec()
        self.chiSquared = (self.fitter.fit_info['fvec']**2).sum() / (
            len(self.fitter.fit_info['fvec']) -
            len(self.fitter.fit_info['param_cov'].data))

        return
コード例 #29
0
                "amplitude": [0.0, None],
                "x_0": [1.0 / 22.0, 1.0 / 17.0],
                "fwhm": [0.0001, 0.5],
            },
        ))

    ponly.c0_0 = -1.0 * av_guess
    ponly.c0_0.bounds = (None, -1.0 * av_guess)
    ponly.c1_0 = 0.005
    ponly.c2_0 = -0.1
    ponly.c3_0 = 0.5
    ponly.c4_0 = -0.2
    ponly.c5_0 = 0.03

    # pick the fitter
    fit = LevMarLSQFitter()

    # fit the data to the P92 model using the fitter
    # p92_fit = fit(p92_init, x, y, weights=1.0 / y_unc, maxiter=1000)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)
        p92_fit = fit(ponly,
                      x,
                      y,
                      weights=1.0 / y_unc,
                      maxiter=1000,
                      epsilon=0.001)

    for k, cur_pname in enumerate(p92_fit.param_names):
        print("{:12} {:6.4e}".format(cur_pname, p92_fit.parameters[k]))
コード例 #30
0
ファイル: std1dspec.py プロジェクト: monodera/FOCASIFU
def std1dspec(infile, startz=2000, nsigma=5, overwrite=False):
    print('\n#############################')
    print('Making 1D spectrum')
                              
    hdl = fits.open(infile)
    hdr = hdl[0].header
    basename = hdr['FRAMEID']
    outfile = basename + '.1dspec.fits'
    if os.path.isfile(outfile) and not overwrite:
        print('\t 1D data already exits. '+outfile)
        print('\t This procedure is skipped.')
        return outfile, True
    
    scidata = hdl[0].data
    binfac1 = hdr['BIN-FCT1']

    # Showing the image 
    aspect = 0.43/(0.104*binfac1)
    fig=plt.figure()
    plt.title('Click on the star. ')
    plt.imshow(scidata[startz,:,:], aspect=aspect, \
               interpolation='nearest', origin='lower')

    global xc,yc
    xc = 0.0
    yc = 0.0

    def star_center(event):
        global xc,yc
        xc= event.xdata
        yc = event.ydata
        plt.close()
        return

    
    cid = fig.canvas.mpl_connect('button_press_event', star_center)
    print('\n\t Click near the star center.')
    plt.show()

    print('\t Initial star location: (%.2f, %.2f)'%(xc,yc))
    initc = np.array((xc,yc))
    cutdata, initp = cutout(scidata[startz,:,:], initc ,w=10)
    g_init = Gaussian2D(amplitude=np.max(cutdata),
                         x_mean=initc[0]-initp[0],
                         y_mean=initc[1]-initp[1],
                         x_stddev=2.0,
                         y_stddev=1.0,
                         theta=3.1416/2)
    g_init.theta.fixed = True
    fitter = LevMarLSQFitter()
    y, x = np.indices(cutdata.shape)
    gfit = fitter(g_init, x, y, cutdata)
    print('\t Initial 2D Gaussian fitting result:')
    print(gfit)
    position0 = np.array([gfit.x_mean.value, gfit.y_mean.value])
    position0 = position0 + initp
    position = position0

    a = gfit.x_stddev.value * nsigma
    b = gfit.y_stddev.value * nsigma
    theta = gfit.theta.value

    plt.imshow(scidata[startz,:,:], aspect=aspect, \
               interpolation='nearest', origin='lower')
    apertures = EllipticalAperture(position, a=a ,b=b,theta=theta)
    apertures.plot()
    print('\n\t Check the aperture, and close the plot window.')
    plt.title('Check the aperture')
    plt.show()

    global coords, ii, std1ddata, lam

    std1ddata = np.zeros(scidata.shape[0], dtype=np.float32)

    # Aperture photometry with incleasing wavelength pix from startz 
    for i in range(startz,scidata.shape[0]):
        cutdata, initp = cutout(scidata[i,:,:],position)
        if np.min(cutdata) == np.max(cutdata):
            print('\t Cutdata is empty at '+str(i)+' pix.')
            break
        position_pre = position
        position = centroid_com(cutdata)
        position = position + initp
        if np.linalg.norm(position-position_pre) > 2.:
            print('\t Cetroid is not good at '+str(i)+' pix.')
            break
        apertures = EllipticalAperture(position, a=a ,b=b,theta=theta)
        phot_table = aperture_photometry(scidata[i,:,:], apertures)   
        std1ddata[i] = phot_table['aperture_sum'].data[0]

    # Aperture photometry with decreasing wavelength pix from startz 
    position = position0
    for i in range(startz-1,0,-1):
        cutdata, initp = cutout(scidata[i,:,:],position)
        if np.min(cutdata) == np.max(cutdata):
            print('\t Cutdata is empty! at ' + str(i) + ' pix.')
            break
        position_pre = position
        position = centroid_com(cutdata)
        position = position + initp
        if np.linalg.norm(position-position_pre) > 2.:
            print('\t Cetroid is not good at ' + str(i) + ' pix.')
            break
        apertures = EllipticalAperture(position, a=a ,b=b,theta=theta)
        phot_table = aperture_photometry(scidata[i,:,:], apertures)   
        std1ddata[i] = phot_table['aperture_sum'].data[0]


    # Plotting the 1D data & selecting the spectral range.
    crpix = hdr['CRPIX3']
    crval = hdr['CRVAL3']
    #cdelt = hdr['CDELT3']
    cdelt = hdr['CD3_3']
    object_name = hdr['OBJECT']

    npix = len(std1ddata)
    start = crval - (crpix-1)*cdelt
    stop = crval + (npix - crpix + 0.5)*cdelt
    lam = np.arange(start ,stop, cdelt)
    
    coords = np.zeros((2,2))
    ii=0

    print('\n\t Press any key except \'q\' to specify a required range')
    
    def select_range(event):
        global coords, ii, std1ddata, lam
        if event.key == 'q':
            plt.close()
        elif ii == 0:
            coords[0,0] = event.xdata
            coords[0,1] = event.ydata
            ii = 1
            print('\t Press any key again except \'q\'')            
        elif ii == 1:
            coords[1,0] = event.xdata
            coords[1,1] = coords[0,1] 
            plt.plot(coords[:,0], coords[:,1])
            plt.draw()
            ii = 2
            print('\t Press \'q\' to quit, or any other key to redo.')
        elif ii == 2:
            plt.cla()
            plt.plot(lam, std1ddata)
            plt.draw()
            coords[0,0] = event.xdata
            coords[0,1] = event.ydata
            ii = 1
            print('\t Press any key except \'q\' to specify a required range')
        return

    fig=plt.figure()
    cid = fig.canvas.mpl_connect('key_press_event', select_range)
    plt.plot(lam,std1ddata)
    plt.title(object_name)
    plt.xlabel('Lambda (Angstrom)')
    plt.ylabel('Count')
    plt.show()

    num = 0
    while coords[0,0] > lam[num]:
        num += 1
    x1 = num
    crval = lam[num]

    while coords[1,0]> lam[num]:
        num += 1
    x2 = num

    if x1 > x2:
        temp = x1
        x1 = x2
        x2 = temp

    # Saving the output fits file
    outhdu = fits.PrimaryHDU(data=std1ddata[x1:x2+1])
    outhdl = fits.HDUList([outhdu])
    
    outhdr = hdl[0].header
    outhdr['CTYPE1']  = hdl[0].header['CTYPE3'] 
    outhdr['CRVAL1']  = crval
    outhdr['CRPIX1']  = 1
    #outhdr['CDELT1']  = hdl[0].header['CDELT3']
    outhdr['CD1_1'] = hdl[0].header['CD3_3']
    outhdr['DISPAXIS'] = 1
    outhdr['WCSDIM'] = 1
    
    outhdr['XSTDDEV'] = (gfit.x_stddev.value, \
                         'X stddev of the star radial profile')
    outhdr['YSTDDEV'] = (gfit.y_stddev.value, \
                         'Y stddev of the star radial profile')
    outhdr['APNSIG'] = (nsigma, 'Number of sigmas for integration aperture')
    
    outhdr.remove('CTYPE2')
    outhdr.remove('CRVAL2') 
    outhdr.remove('CRPIX2') 
    #outhdr.remove('CDELT2') 
    outhdr.remove('CD2_2')
    outhdr.remove('LTM2_2')
    #outhdr.remove('CD1_1')
    outhdr.remove('LTM1_1')
    outhdr.remove('CTYPE3')
    outhdr.remove('CRVAL3') 
    outhdr.remove('CRPIX3') 
    #outhdr.remove('CDELT3') 
    outhdr.remove('CD3_3')
    outhdr.remove('LTM3_3')
    
    outhdl[0].header = outhdr
    outhdl.writeto(outfile, overwrite=overwrite)
    print('\t 1D data file: '+outfile)

    outhdl.close()
    hdl.close()
    return outfile, True