예제 #1
0
    def sqrt(self):
        '''Computes the square root of the image data values'''
        # Grab the data
        selfData = self.data
        outData = np.sqrt(selfData)

        # Propagate uncertainty if it exists
        if self.uncertainty is not None:
            selfUncert = self.uncertainty
            outUncert = StdDevUncertainty(selfUncert / (2 * outData))
        else:
            outUncert = None

        # Attempt to take the square root of the units
        try:
            outUnit = np.sqrt(u.Quantity(1, self.unit)).unit
        except:
            outUnit = u.dimensionless_unscaled

        # Compute the sqare root and store the propagated uncertainty
        outImg = self.copy()
        outImg._BaseImage__fullData = NDDataArray(
            outData,
            uncertainty=outUncert,
            unit=outUnit,
            wcs=self._BaseImage__fullData.wcs)

        return outImg
예제 #2
0
    def log10(self):
        '''Computes the base-10 log of the image data values'''
        # Grab the data if posible
        if not self.has_dimensionless_units:
            raise TypeError(
                'Can only apply `log10` function to dimensionless quantities')

        selfData = self.data
        outData = np.log10(selfData)

        # Propagate uncertainty if it exists
        if self.uncertainty is not None:
            selfUncert = self.uncertainty
            outUncert = StdDevUncertainty(selfUncert / (selfData * np.log(10)))
        else:
            outUncert = None

        # Compute the base-10 log and store the propagated uncertainty
        outImg = self.copy()
        outImg._BaseImage__fullData = NDDataArray(
            outData,
            uncertainty=outUncert,
            unit=u.dimensionless_unscaled,
            wcs=self._BaseImage__fullData.wcs)

        return outImg
예제 #3
0
    def arctan(self):
        '''Computes the arctan of the image data values'''
        # Check if the image is a dimensionless quantity
        if not self.has_dimensionless_units:
            raise TypeError(
                'Can only apply `arctan` function to dimensionless quantities')

        # Grab the data
        selfData = self.data

        # Propagate uncertainty if it exists
        if self.uncertainty is not None:
            selfUncert = self.uncertainty
            outUncert = StdDevUncertainty(selfUncert / (1.0 + selfData**2))
        else:
            outUncert = None

        # Compute the arctan and store the propagated uncertainty
        outImg = self.copy()
        outImg._BaseImage__fullData = NDDataArray(
            np.arccos(selfData),
            uncertainty=outUncert,
            unit=u.rad,
            wcs=self._BaseImage__fullData.wcs)

        return outImg
예제 #4
0
def _calculate_size_of_image(ccd, combine_uncertainty_function):
    # If uncertainty_func is given for combine this will create an uncertainty
    # even if the originals did not have one. In that case we need to create
    # an empty placeholder.
    if ccd.uncertainty is None and combine_uncertainty_function is not None:
        ccd.uncertainty = StdDevUncertainty(np.zeros(ccd.data.shape))

    size_of_an_img = ccd.data.nbytes
    try:
        size_of_an_img += ccd.uncertainty.array.nbytes
    # In case uncertainty is None it has no "array" and in case the "array" is
    # not a numpy array:
    except AttributeError:
        pass
    # Mask is enforced to be a numpy.array across astropy versions
    if ccd.mask is not None:
        size_of_an_img += ccd.mask.nbytes
    # flags is not necessarily a numpy array so do not fail with an
    # AttributeError in case something was set!
    # TODO: Flags are not taken into account in Combiner. This number is added
    #       nevertheless for future compatibility.
    try:
        size_of_an_img += ccd.flags.nbytes
    except AttributeError:
        pass

    return size_of_an_img
예제 #5
0
    def sum_combine(self, sum_func=ma.sum, scale_to=None,
                    uncertainty_func=ma.std):
        """
        Sum combine together a set of arrays.

        A `~astropy.nddata.CCDData` object is returned with the data property
        set to the sum of the arrays. If the data was masked or any
        data have been rejected, those pixels will not be included in the
        sum. A mask will be returned, and if a pixel has been
        rejected in all images, it will be masked. The uncertainty of
        the combined image is set by the multiplication of summation of
        standard deviation of the input by square root of number of images.
        Because sum_combine returns 'pure sum' with masked pixels ignored, if
        re-scaled sum is needed, average_combine have to be used with
        multiplication by number of images combined.

        Parameters
        ----------
        sum_func : function, optional
            Function to calculate the sum. Defaults to
            `numpy.ma.sum`.

        scale_to : float or None, optional
            Scaling factor used in the sum combined image. If given,
            it overrides `scaling`. Defaults to ``None``.

        uncertainty_func : function, optional
            Function to calculate uncertainty. Defaults to `numpy.ma.std`.

        Returns
        -------
        combined_image: `~astropy.nddata.CCDData`
            CCDData object based on the combined input of CCDData objects.
        """
        # set up the data
        data = sum_func(self._get_scaled_data(scale_to), axis=0)

        # set up the mask
        masked_values = self.data_arr.mask.sum(axis=0)
        mask = (masked_values == len(self.data_arr))

        # set up the deviation
        uncertainty = uncertainty_func(self.data_arr, axis=0)
        # Divide uncertainty by the number of pixel (#309)
        uncertainty /= np.sqrt(len(self.data_arr) - masked_values)
        # Convert uncertainty to plain numpy array (#351)
        uncertainty = np.asarray(uncertainty)
        # Multiply uncertainty by square root of the number of images
        uncertainty *= len(self.data_arr) - masked_values

        # create the combined image with a dtype that matches the combiner
        combined_image = CCDData(np.asarray(data.data, dtype=self.dtype),
                                 mask=mask, unit=self.unit,
                                 uncertainty=StdDevUncertainty(uncertainty))

        # update the meta data
        combined_image.meta['NCOMBINE'] = len(self.data_arr)

        # return the combined image
        return combined_image
예제 #6
0
def test_with_uncert_from_knots():
    wave_val = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
    flux_val = np.array([2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
    uncert_val = flux_val / 10.

    uncert = StdDevUncertainty(uncert_val * u.mJy)

    input_spectrum = Spectrum1D(spectral_axis=wave_val * u.AA,
                                flux=flux_val * u.mJy,
                                uncertainty=uncert)

    spline_knots = [3.5, 4.7, 6.8, 7.1] * u.AA

    # When replacing directly at spline knots, the spectral region is
    # redundant and must be omitted.
    result = model_replace(input_spectrum, None, model=spline_knots)

    assert isinstance(result.uncertainty, StdDevUncertainty)
    assert result.flux.unit == result.uncertainty.unit

    assert_quantity_allclose(result.uncertainty.quantity, uncert_val * u.mJy)

    # Now try with the non-default no-uncertainty mode: result should
    # have no uncertainty even when input has.
    result = model_replace(input_spectrum,
                           None,
                           model=spline_knots,
                           interpolate_uncertainty=False)

    assert result.uncertainty is None
예제 #7
0
def aspcapStar_loader(file_obj, **kwargs):
    """
    Loader for APOGEE aspcapStar files.

    Parameters
    ----------
    file_obj: str or file-like
        FITS file name or object (provided from name by Astropy I/O Registry).

    Returns
    -------
    data: Spectrum1D
        The spectrum that is represented by the data in this table.
    """

    with read_fileobj_or_hdulist(file_obj, **kwargs) as hdulist:
        header = hdulist[0].header
        meta = {'header': header}
        wcs = WCS(hdulist[1].header)

        data = hdulist[1].data  # spectrum in the first extension
        unit = def_unit('arbitrary units')

        uncertainty = StdDevUncertainty(hdulist[2].data)

    # dispersion from the WCS but convert out of logspace
    dispersion = 10**wcs.all_pix2world(np.arange(data.shape[0]), 0)[0]
    dispersion_unit = Unit('Angstrom')

    return Spectrum1D(data=data * unit,
                      uncertainty=uncertainty,
                      spectral_axis=dispersion * dispersion_unit,
                      meta=meta,
                      wcs=wcs)
예제 #8
0
def test_from_fitted_model():
    wave_val = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
    flux_val = np.array([1, 1.1, 0.9, 4., 10., 5., 2., 1., 1.2, 1.1])
    uncert_val = flux_val / 10.

    uncert = StdDevUncertainty(uncert_val * u.mJy)

    input_spectrum = Spectrum1D(spectral_axis=wave_val * u.AA,
                                flux=flux_val * u.mJy,
                                uncertainty=uncert)

    model = models.Gaussian1D(10, 5.6, 1.2)
    fitted_model = fit_lines(input_spectrum, model)
    region = SpectralRegion(3.5 * u.AA, 7.1 * u.AA)

    result = model_replace(input_spectrum, region, model=fitted_model)

    assert result.uncertainty is None
    assert result.flux.unit == input_spectrum.flux.unit

    expected_flux = np.array([
        1., 1.1, 0.9, 4.40801804, 9.58271877, 5.61238054, 0.88556096, 1., 1.2,
        1.1
    ]) * u.mJy

    assert_quantity_allclose(result.flux, expected_flux)
    assert_quantity_allclose(result.spectral_axis,
                             input_spectrum.spectral_axis)
예제 #9
0
    def median_combine(self, median_func=ma.median, scale_to=None):
        """Median combine a set of arrays.

           A `~ccdproc.CCDData` object is returned
           with the data property set to the median of the arrays.  If the data
           was masked or any data have been rejected, those pixels will not be
           included in the median.   A mask will be returned, and if a pixel
           has been rejected in all images, it will be masked.   The
           uncertainty of the combined image is set by 1.4826 times the median
           absolute deviation of all input images.

           Parameters
           ----------
           median_func : function, optional
               Function that calculates median of a `~numpy.ma.masked_array`.
               Default is to use `numpy.ma.median` to calculate median.

           scale_to : float, optional
               Scaling factor used in the average combined image. If given,
               it overrides ``CCDData.scaling``. Defaults to None.

           Returns
           -------
           combined_image: `~ccdproc.CCDData`
               CCDData object based on the combined input of CCDData objects.

           Warnings
           --------
           The uncertainty currently calculated using the median absolute
           deviation does not account for rejected pixels.

        """
        if scale_to is not None:
            scalings = scale_to
        elif self.scaling is not None:
            scalings = self.scaling
        else:
            scalings = 1.0

        # set the data
        data = median_func(scalings * self.data_arr, axis=0)

        # set the mask
        mask = self.data_arr.mask.sum(axis=0)
        mask = (mask == len(self.data_arr))

        # set the uncertainty
        uncertainty = 1.4826 * median_absolute_deviation(self.data_arr.data,
                                                         axis=0)

        # create the combined image with a dtype matching the combiner
        combined_image = CCDData(np.asarray(data.data, dtype=self.dtype),
                                 mask=mask, unit=self.unit,
                                 uncertainty=StdDevUncertainty(uncertainty))

        # update the meta data
        combined_image.meta['NCOMBINE'] = len(self.data_arr)

        # return the combined image
        return combined_image
예제 #10
0
def test_bounding_region(simulated_spectra):
    np.random.seed(42)

    spectrum = simulated_spectra.s1_um_mJy_e1
    uncertainty = StdDevUncertainty(
        0.1 * np.random.random(len(spectrum.flux)) * u.mJy)
    spectrum.uncertainty = uncertainty

    region = SpectralRegion([(0.6 * u.um, 0.8 * u.um),
                             (0.86 * u.um, 0.89 * u.um)])

    extracted_spectrum = extract_bounding_spectral_region(spectrum, region)

    # Confirm the end points are correct
    assert quantity_allclose(extracted_spectrum.spectral_axis[[0, -1]],
                             [0.6035353535353536, 0.8858585858585859] * u.um)

    flux_expected = [
        FLUX_ARRAY + [
            948.81864554, 1197.84859443, 1069.75268943, 1118.27269184,
            1301.7695563, 1206.62880648, 1518.16549319, 1256.84259015,
            1638.76791267, 1562.05642302, 1337.65312465, 1263.48914109,
            1589.81797876, 1548.46068415
        ]
    ] * u.mJy

    assert quantity_allclose(extracted_spectrum.flux, flux_expected)

    # also ensure this works if the multi-region is expressed as a single
    # Quantity
    region2 = SpectralRegion([(0.6, 0.8), (0.86, 0.89)] * u.um)
    extracted_spectrum2 = extract_bounding_spectral_region(spectrum, region2)
    assert quantity_allclose(extracted_spectrum2.spectral_axis[[0, -1]],
                             [0.6035353535353536, 0.8858585858585859] * u.um)
    assert quantity_allclose(extracted_spectrum2.flux, flux_expected)
예제 #11
0
def test_line_flux_masked():

    np.random.seed(42)

    N = 100

    wavelengths = np.linspace(0.4, 1.05, N) * u.um

    g = models.Gaussian1D(amplitude=2000 * u.mJy,
                          mean=0.56 * u.um,
                          stddev=0.01 * u.um)
    flux = g(wavelengths) + 1000 * u.mJy
    noise = 400 * np.random.random(flux.shape) * u.mJy
    flux += noise

    spectrum = Spectrum1D(spectral_axis=wavelengths, flux=flux)
    spectrum.uncertainty = StdDevUncertainty(noise)

    spectrum_masked = snr_threshold(spectrum, 10.)

    # Ensure we have at least 50% of the data being masked.
    assert len(np.where(spectrum_masked.mask)[0]) > N / 2

    result = line_flux(spectrum_masked)

    assert result.unit.is_equivalent(u.Jy * u.um)

    assert quantity_allclose(result.value, 720.52992, atol=0.001)

    # With flux conserving resampler
    result = line_flux(spectrum_masked,
                       mask_interpolation=FluxConservingResampler)
    assert quantity_allclose(result.value, 720.61116, atol=0.001)
예제 #12
0
    def tan(self):
        '''Computes the tangent of the image data values'''
        # Check if the image has angle units
        if not self.has_angle_units:
            raise TypeError(
                'Can only apply `tan` function to quantities with angle units')

        selfRadData = (self.data * self.unit).to(u.rad)
        selfData = selfRadData.value

        # Propagate uncertainty if it exists
        if self.uncertainty is not None:
            selfRadUncert = (self.uncertainty * self.unit).to(u.rad)
            selfUncert = selfRadUncert.value
            outUncert = StdDevUncertainty(selfUncert / (np.cos(selfData)**2))
        else:
            outUncert = None

        # Compute the sine and store the propagated uncertainty
        outImg = self.copy()
        outImg._BaseImage__fullData = NDDataArray(
            np.tan(selfData),
            uncertainty=outUncert,
            unit=u.dimensionless_unscaled,
            wcs=self.wcs)

        return outImg
예제 #13
0
def test_centroid(simulated_spectra):
    """
    Test the simple version of the spectral centroid.
    """

    np.random.seed(42)

    #
    #  Set up the data and add the uncertainty and calculate the expected SNR
    #

    spectrum = simulated_spectra.s1_um_mJy_e1
    uncertainty = StdDevUncertainty(
        0.1 * np.random.random(len(spectrum.flux)) * u.mJy)
    spectrum.uncertainty = uncertainty

    wavelengths = spectrum.spectral_axis
    flux = spectrum.flux

    spec_centroid_expected = np.sum(flux * wavelengths) / np.sum(flux)

    #
    # SNR of the whole spectrum
    #

    spec_centroid = centroid(spectrum, None)

    assert isinstance(spec_centroid, u.Quantity)
    assert np.allclose(spec_centroid.value, spec_centroid_expected.value)
예제 #14
0
def test_centroid_masked(simulated_spectra):
    """
    Test centroid with masked spectrum.
    """

    np.random.seed(42)

    # Same as in test for unmasked spectrum, but using
    # masked version of same spectrum.
    spectrum = simulated_spectra.s1_um_mJy_e1_masked
    uncertainty = StdDevUncertainty(
        0.1 * np.random.random(len(spectrum.flux)) * u.mJy)
    spectrum.uncertainty = uncertainty

    # Use masked flux and dispersion arrays to compute
    # the expected value for centroid.
    wavelengths = spectrum.spectral_axis[~spectrum.mask]
    flux = spectrum.flux[~spectrum.mask]

    spec_centroid_expected = np.sum(flux * wavelengths) / np.sum(flux)

    spec_centroid = centroid(spectrum, None)

    assert isinstance(spec_centroid, u.Quantity)
    assert np.allclose(spec_centroid.value, spec_centroid_expected.value)
예제 #15
0
def test_snr(simulated_spectra):
    """
    Test the simple version of the spectral SNR.
    """

    np.random.seed(42)

    #
    #  Set up the data and add the uncertainty and calculate the expected SNR
    #

    spectrum = simulated_spectra.s1_um_mJy_e1
    uncertainty = StdDevUncertainty(
        0.1 * np.random.random(len(spectrum.flux)) * u.mJy)
    spectrum.uncertainty = uncertainty

    flux = spectrum.flux

    spec_snr_expected = np.mean(flux / (uncertainty.array * uncertainty.unit))

    #
    # SNR of the whole spectrum
    #

    spec_snr = snr(spectrum)

    assert isinstance(spec_snr, u.Quantity)
    assert np.allclose(spec_snr.value, spec_snr_expected.value)
예제 #16
0
def test_snr_single_region(simulated_spectra):
    """
    Test the simple version of the spectral SNR over a region of the spectrum.
    """

    np.random.seed(42)

    region = SpectralRegion(0.52 * u.um, 0.59 * u.um)

    #
    #  Set up the data
    #

    spectrum = simulated_spectra.s1_um_mJy_e1
    uncertainty = StdDevUncertainty(
        0.1 * np.random.random(len(spectrum.flux)) * u.mJy)
    spectrum.uncertainty = uncertainty

    wavelengths = spectrum.spectral_axis
    flux = spectrum.flux

    # +1 because we want to include it in the calculation
    l = np.nonzero(wavelengths > region.lower)[0][0]
    r = np.nonzero(wavelengths < region.upper)[0][-1] + 1

    spec_snr_expected = np.mean(flux[l:r] /
                                (uncertainty.array[l:r] * uncertainty.unit))

    #
    # SNR of the whole spectrum
    #

    spec_snr = snr(spectrum, region)

    assert np.allclose(spec_snr.value, spec_snr_expected.value)
예제 #17
0
    def __flex_load__(cls, header, data):
        ext = MultipleDataExtension._parse(header, data)

        wavelength = ext.data["wavelength"]
        wavelength = wavelength << ext.header["wavelength_unit"]

        flux = ext.data["flux"]
        flux = flux << ext.header["flux_unit"]

        if "uncertainty" in ext.data:
            uncertainty = ext.data["uncertainty"]
            uncertainty = uncertainty << ext.header["uncertainty_unit"]
            uncertainty = StdDevUncertainty(uncertainty)
        else:
            uncertainty = None

        exceptions = [
            "__module__",
            "__class__",
            "__header__",
            "uncertainty_unit",
            "wavelength_unit",
            "flux_unit",
        ]
        meta = {k: v for k, v in ext.header.items() if k not in exceptions}
        meta["spectral_axis"] = wavelength
        meta["flux"] = flux
        meta["uncertainty"] = uncertainty

        return cls(**meta)
예제 #18
0
def test_tabular_fits_2d(tmpdir, spectral_axis):
    wlu = {
        'wavelength': u.AA,
        'frequency': u.GHz,
        'energy': u.eV,
        'wavenumber': u.cm**-1
    }
    # Create a small data set with 2D flux + uncertainty
    disp = np.arange(1, 1.1, 0.01) * wlu[spectral_axis]
    flux = np.ones(
        (3, len(disp))) * np.arange(1,
                                    len(disp) + 1)**2 * 1.e-14 * u.Jy
    unc = StdDevUncertainty(0.01 * np.random.rand(3, len(disp)))
    if spectral_axis not in ('wavelength', ):
        disp = np.flip(disp)

    spectrum = Spectrum1D(flux=flux, spectral_axis=disp, uncertainty=unc)
    tmpfile = str(tmpdir.join('_tst.fits'))
    spectrum.write(tmpfile, format='tabular-fits')

    # Read it in and check against the original
    spec = Spectrum1D.read(tmpfile)
    assert spec.flux.unit == spectrum.flux.unit
    assert spec.spectral_axis.unit == spectrum.spectral_axis.unit
    assert spec.flux.shape == flux.shape
    assert spec.uncertainty.array.shape == flux.shape
    assert quantity_allclose(spec.spectral_axis, spectrum.spectral_axis)
    assert quantity_allclose(spec.flux, spectrum.flux)
    assert quantity_allclose(spec.uncertainty.quantity,
                             spectrum.uncertainty.quantity)
예제 #19
0
def muscles_sed(file_name, **kwargs):
    """
    Load spectrum from a MUSCLES SED FITS file.

    Parameters
    ----------
    file_name: str
        The path to the FITS file.

    Returns
    -------
    data: Spectrum1D
        The spectrum that is represented by the data in this table.
    """
    # name is not used; what was it for?
    # name = os.path.basename(file_name.rstrip(os.sep)).rsplit('.', 1)[0]

    with fits.open(file_name, **kwargs) as hdulist:
        header = hdulist[0].header

        tab = Table.read(hdulist)

        meta = {'header': header}
        uncertainty = StdDevUncertainty(tab["ERROR"])
        data = tab["FLUX"]
        wavelength = tab["WAVELENGTH"]

    return Spectrum1D(flux=data, spectral_axis=wavelength,
                      uncertainty=uncertainty, meta=meta,
                      unit=data.unit,
                      spectral_axis_unit=wavelength.unit)
예제 #20
0
def test_collection_without_optional_arguments():
    # Without uncertainties
    flux = u.Quantity(np.random.sample((5, 10)), unit='Jy')
    spectral_axis = u.Quantity(np.arange(50).reshape((5, 10)), unit='AA')
    uncertainty = StdDevUncertainty(np.random.sample((5, 10)), unit='Jy')
    wcs = np.array([gwcs_from_array(x) for x in spectral_axis])
    mask = np.ones((5, 10)).astype(bool)
    meta = [{'test': 5, 'info': [1, 2, 3]} for i in range(5)]

    spec_coll = SpectrumCollection(flux=flux,
                                   spectral_axis=spectral_axis,
                                   wcs=wcs,
                                   mask=mask,
                                   meta=meta)

    # Without mask
    spec_coll = SpectrumCollection(flux=flux,
                                   spectral_axis=spectral_axis,
                                   uncertainty=uncertainty,
                                   wcs=wcs,
                                   meta=meta)

    # Without meta
    spec_coll = SpectrumCollection(flux=flux,
                                   spectral_axis=spectral_axis,
                                   uncertainty=uncertainty,
                                   wcs=wcs,
                                   mask=mask)
예제 #21
0
def test_nikamap_init_uncertainty():
    data = np.array([1, 2, 3])
    uncertainty = np.array([1, 1, 1])

    # Default to StdDevUncertainty...
    nm = NikaMap(data, uncertainty=uncertainty)
    assert isinstance(nm.uncertainty, StdDevUncertainty)
    assert np.all(nm.uncertainty.array == np.array([1, 1, 1]))
    assert nm.unit == nm.uncertainty.unit

    nm_mean = nm.add(nm).divide(2)
    assert np.all(nm_mean.data == nm.data)
    npt.assert_allclose(nm_mean.uncertainty.array,
                        np.array([1, 1, 1]) / np.sqrt(2))

    # Wrong size
    with pytest.raises(ValueError):
        nm = NikaMap(data, uncertainty=uncertainty[1:])

    # Wrong TypeError
    with pytest.raises(TypeError):
        nm = NikaMap(data, uncertainty=list(uncertainty))

    # Different Units
    st_uncertainty = StdDevUncertainty(uncertainty * 1e-3, unit=u.Jy)
    nm = NikaMap(data * u.mJy, uncertainty=st_uncertainty)
    assert nm.uncertainty.unit == nm.unit
    npt.assert_equal(nm.uncertainty.array, uncertainty)
예제 #22
0
 def sum(self,ims, return_list=False, **kwargs) :
     """ Coadd input images
     """
     allcube = self.getcube(ims, **kwargs)
     nframe = len(allcube)
     
     out=[]
     for chip in range(self.nchip) :
         datacube = []
         varcube = []
         maskcube = []
         for im in range(nframe) :
             datacube.append(allcube[im][chip].data)
             varcube.append(allcube[im][chip].uncertainty.array**2)
             maskcube.append(allcube[im][chip].mask)
         sum = np.sum(np.array(datacube),axis=0)
         sig = np.sqrt(np.sum(np.array(varcube),axis=0))
         mask = np.any(maskcube,axis=0)
         out.append(CCDData(sum,uncertainty=StdDevUncertainty(sig),mask=mask,unit='adu'))
     
     # return the frame
     if len(out) == 1 : 
        if return_list : return [out[0]]
        else : return out[0]
     else : return out
예제 #23
0
def snr_spec(flux, wl, n):

    sample = len(wl)
    noise = n * np.asarray(random.sample(range(0, len(wl)), sample)) / len(wl)
    unc = StdDevUncertainty(noise)

    fluxn = [[] for i in range(len(wl))]
    i = 0
    for inc in unc:
        fluxn[i] = flux[i] + noise[i]
        i = i + 1

    spec1d = Spectrum1D(spectral_axis=wl * u.AA,
                        flux=fluxn * u.Jy,
                        uncertainty=unc)

    #ax = plt.subplots()[1]
    #ax.plot(spec1d.spectral_axis, spec1d.flux)
    #ax.set_xlim([3520,3550])

    sn1 = snr(spec1d, SpectralRegion(3070 * u.AA, 3090 * u.AA))
    #sn  = snr_derived(spec1d,SpectralRegion(3070*u.AA, 3090*u.AA))
    #print('SNR1: '+ str(snr(spec1d)), SpectralRegion(3500*u.AA, 3550*u.AA))
    print('S/N: ' + str(sn1))
    #print('SNR: '+ str(sn))
    #print('FWHM:'+str(fwhm(spec1d)))

    #0.042 = snr 50
    #

    try:
        return fluxn
    except:
        raise Exception('Check S/N function')
예제 #24
0
def test_nddata_input():
    data = np.arange(400).reshape((20, 20))
    error = np.sqrt(data)
    mask = np.zeros((20, 20), dtype=bool)
    mask[8:13, 8:13] = True
    unit = 'adu'
    wcs = make_wcs(data.shape)
    try:
        skycoord = wcs.pixel_to_world(10, 10)
    except AttributeError:
        # for Astropy < 3.1
        skycoord = pixel_to_skycoord(10, 10, wcs)

    aper = SkyCircularAperture(skycoord, r=0.7 * u.arcsec)

    tbl1 = aperture_photometry(data * u.adu,
                               aper,
                               error=error * u.adu,
                               mask=mask,
                               wcs=wcs)

    uncertainty = StdDevUncertainty(error)
    nddata = NDData(data,
                    uncertainty=uncertainty,
                    mask=mask,
                    wcs=wcs,
                    unit=unit)
    tbl2 = aperture_photometry(nddata, aper)

    for column in tbl1.columns:
        if column == 'sky_center':  # cannot test SkyCoord equality
            continue
        assert_allclose(tbl1[column], tbl2[column])
예제 #25
0
def test_block_replicate():
    ccd = CCDData(np.ones((4, 4)),
                  unit='adu',
                  meta={'testkw': 1},
                  mask=np.zeros((4, 4), dtype=bool),
                  uncertainty=StdDevUncertainty(np.ones((4, 4))),
                  wcs=np.zeros((4, 4)))
    with catch_warnings(AstropyUserWarning) as w:
        ccd_repl = block_replicate(ccd, (2, 2))
    assert len(w) == 1
    assert 'following attributes were set' in str(w[0].message)

    assert isinstance(ccd_repl, CCDData)
    assert np.all(ccd_repl.data == 0.25)
    assert ccd_repl.data.shape == (8, 8)
    assert ccd_repl.unit == u.adu
    # Other attributes are set to None. In case the function is modified to
    # work on these attributes correctly those tests need to be updated!
    assert ccd_repl.meta == {'testkw': 1}
    assert ccd_repl.mask is None
    assert ccd_repl.wcs is None
    assert ccd_repl.uncertainty is None

    # Make sure meta is copied
    ccd_repl.meta['testkw2'] = 10
    assert 'testkw2' not in ccd.meta
예제 #26
0
def test_arithmetic_overload_ccddata_operand(ccd_data):
    ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
    operand = ccd_data.copy()
    result = ccd_data.add(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data, 2 * ccd_data.data)
    np.testing.assert_array_equal(result.uncertainty.array,
                                  np.sqrt(2) * ccd_data.uncertainty.array)

    result = ccd_data.subtract(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data, 0 * ccd_data.data)
    np.testing.assert_array_equal(result.uncertainty.array,
                                  np.sqrt(2) * ccd_data.uncertainty.array)

    result = ccd_data.multiply(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data, ccd_data.data**2)
    expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
                            ccd_data.uncertainty.array)
    np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty)

    result = ccd_data.divide(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data))
    expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
                            ccd_data.uncertainty.array)
    np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty)
예제 #27
0
def test_create_collection_from_spectrum1D():
    spec = Spectrum1D(spectral_axis=np.linspace(0, 50, 50) * u.AA,
                      flux=np.random.randn(50) * u.Jy,
                      uncertainty=StdDevUncertainty(np.random.sample(50),
                                                    unit='Jy'))
    spec1 = Spectrum1D(spectral_axis=np.linspace(20, 60, 50) * u.AA,
                       flux=np.random.randn(50) * u.Jy,
                       uncertainty=StdDevUncertainty(np.random.sample(50),
                                                     unit='Jy'))

    spec_coll = SpectrumCollection.from_spectra([spec, spec1])

    assert spec_coll.ndim == 1
    assert spec_coll.shape == (2, )
    assert spec_coll.nspectral == 50
    assert isinstance(spec_coll.flux, u.Quantity)
    assert isinstance(spec_coll.spectral_axis, u.Quantity)
예제 #28
0
def test_template_redshift_with_multiple_template_spectra_in_match():
    # Seed np.random so that results are consistent
    np.random.seed(42)

    # Create test spectra
    spec_axis = np.linspace(0, 50, 50) * u.AA
    perm_flux = np.random.randn(50) * u.Jy

    # Test redshift
    redshift = 3

    # Observed spectrum
    spec = Spectrum1D(spectral_axis=spec_axis * (1+redshift),
                      flux=perm_flux,
                      uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))

    # Template spectrum
    spec1 = Spectrum1D(spectral_axis=spec_axis,
                       flux=np.random.randn(50) * u.Jy,
                       uncertainty=StdDevUncertainty(np.random.sample(50)))
    spec2 = Spectrum1D(spectral_axis=spec_axis,
                       flux=np.random.randn(50) * u.Jy,
                       uncertainty=StdDevUncertainty(np.random.sample(50)))

    # Combine spectra into SpectrumCollection object
    spec_coll = SpectrumCollection.from_spectra([spec1, spec2])

    # Test redshift parameters
    min_redshift = .5
    max_redshift = 5.5
    delta_redshift = .25
    redshift_trial_values = np.arange(min_redshift, max_redshift+delta_redshift, delta_redshift)

    tm_result = template_comparison.template_match(observed_spectrum=spec, spectral_templates=spec_coll,
                                                      resample_method="flux_conserving",
                                                      redshift=redshift_trial_values)
    assert len(tm_result) == 4
    np.testing.assert_almost_equal(tm_result[1], 6803.922741644725)

    # When a spectrum collection is matched with a redshift
    # grid, a list-of-lists is returned with the trial chi2
    # values computed for every combination redshift-template.
    # The external list spans the templates in the collection,
    # while each internal list contains all chi2 values
    # for a given template.
    assert len(tm_result[3]) == 2
예제 #29
0
def create_spectrum1d(xmin, xmax, uncertainty=None):
    flux = np.ones(xmax - xmin) * u.Jy
    wavelength = np.arange(xmin, xmax) * 0.1 * u.nm
    uncertainty = StdDevUncertainty(np.ones(xmax - xmin) *
                                    u.Jy) if uncertainty is not None else None
    return Spectrum1D(spectral_axis=wavelength,
                      flux=flux,
                      uncertainty=uncertainty)
예제 #30
0
    def rd(self,num, ext=0) :
        """ Read an image

        Args :
            num (str or int) : name or number of image to read
        Returns :
            image (CCDData ) : CCDData object
        """
        out=[]
        # loop over different channels (if any)
        idet=0 
        for form,gain,rn in zip(self.formstr,self.gain,self.rn) :
            # find the files that match the directory/format
            if type(num) is int :
                search=self.dir+'/'+self.root+form.format(num)+'.fits*'
            elif type(num) is str or type(num) is np.str_ :
                if num.find('/') >= 0 :
                    search=num+'*'
                else :
                    search=self.dir+'/*'+num+'*'
            else :
                print('stopping in rd... num:',num)
                pdb.set_trace()
            file=glob.glob(search)
            if len(file) == 0 : 
                print('cannot find file matching: '+search)
                return
            elif len(file) > 1 : 
                if self.verbose : print('more than one match found, using first!',file)
            file=file[0]

            # read the file into a CCDData object
            if self.verbose : print('  Reading file: {:s}'.format(file)) 
            try : im=CCDData.read(file,hdu=ext,unit='adu')
            except : raise RuntimeError('Error reading file: {:s}'.format(file))
            im.header['FILE'] = os.path.basename(file)
            if 'OBJECT' not in im.header :
                try: im.header['OBJECT'] = im.header['OBJNAME']
                except KeyError : im.header['OBJECT'] = im.header['FILE']

            # Add uncertainty (will be in error if there is an overscan, but redo with overscan subraction later)
            data=copy.copy(im.data)
            data[data<0] = 0.
            im.uncertainty = StdDevUncertainty(np.sqrt( data/gain + (rn/gain)**2 ))

            # Add mask
            if self.mask is not None : im.mask = self.mask
            else : im.mask = np.zeros(im.data.shape,dtype=bool)
            if self.badpix is not None :
                for badpix in self.badpix[idet] :
                    badpix.setval(im.mask,True)

            out.append(im)
            idet+=1

        # return the data
        if len(out) == 1 : return out[0]
        else : return out