Ejemplo n.º 1
0
def read_fits_spectrum1d(filename, dispersion_unit=None, flux_unit=None):
    """
    1D reader for spectra in FITS format. This function determines what format
    the FITS file is in, and attempts to read the Spectrum. This reader just
    uses the primary extension in a FITS file and reads the data and header from
    that. It will return a Spectrum1D object if the data is linear, or a list of
    Spectrum1D objects if the data format is multi-spec

    Parameters
    ----------

    filename : str
        FITS filename

    dispersion_unit : ~astropy.unit.Unit, optional
        unit of the dispersion axis - will overwrite possible information given
        in the FITS keywords
        default = None

    flux_unit : ~astropy.unit.Unit, optional
        unit of the flux

    Raises
    --------
    NotImplementedError
        If the format can't be read currently
    """
    if dispersion_unit:
        dispersion_unit = u.Unit(dispersion_unit)

    data = fits.getdata(filename)
    header = fits.getheader(filename)

    wcs_info = FITSWCSSpectrum(header)

    if wcs_info.naxis == 1:
        wcs = read_fits_wcs_linear1d(wcs_info, dispersion_unit=dispersion_unit)
        return Spectrum1D(data, wcs=wcs)
    elif wcs_info.naxis == 2 and \
            wcs_info.affine_transform_dict['ctype'] == ["MULTISPE", "MULTISPE"]:
        multi_wcs = multispec_wcs_reader(wcs_info, dispersion_unit=dispersion_unit)
        multispec = []
        for spectrum_data, spectrum_wcs in zip(data, multi_wcs.values()):
            multispec.append(
                Spectrum1D(spectrum_data, wcs=spectrum_wcs))
        return multispec

    elif wcs_info.naxis == 3 and \
            wcs_info.affine_transform_dict['ctype'] == ["LINEAR","LINEAR","LINEAR"]:
        wcs = read_fits_wcs_linear1d(wcs_info, dispersion_unit=dispersion_unit)
        equispec = []
        for i in range(data.shape[0]):
            equispec.append(
                Spectrum1D(data[i][0], wcs=wcs))
        return equispec

    elif wcs_info.naxis == 3 and \
            wcs_info.affine_transform_dict['ctype'] == ["MULTISPE", "MULTISPE","LINEAR"]:
        multi_wcs = multispec_wcs_reader(wcs_info, dispersion_unit=dispersion_unit)
        multispec = []
        for j in range(data.shape[1]):
            equispec = []
            for i in range(data.shape[0]):
                equispec.append(
                    Spectrum1D(data[i][j], wcs=list(multi_wcs.values())[j]))
            multispec.append(equispec)
        return multispec

    else:
        raise NotImplementedError("Either the FITS file does not represent a 1D"
                                  " spectrum or the format isn't supported yet")
Ejemplo n.º 2
0
def test_fermi_isotropic():
    filename = "$GAMMAPY_DATA/fermi_3fhl/iso_P8R2_SOURCE_V6_v06.txt"
    model = TableModel.read_fermi_isotropic_model(filename)
    assert_quantity_allclose(
        model(50 * u.GeV), 1.463 * u.Unit("1e-13 MeV-1 cm-2 s-1 sr-1"), rtol=1e-3
    )
Ejemplo n.º 3
0
 def unit(self, val):
     self._unit = u.Unit(val)
Ejemplo n.º 4
0
def read_file_spectrum(file,
                       wave_units=q.um,
                       flux_units=q.erg / q.s / q.cm**2 / q.AA,
                       survey=None):
    """Create a spectrum from an ASCII, XML, or FITS file

    Parameters
    ----------
    file: str
        The path to the file
    wave_units: str, astropy.units.quantity.Quantity
        The wavelength units
    flux_units: str, astropy.units.quantity.Quantity, None
        The flux units
    survey: str
        The name of the survey, ['SDSS']

    Returns
    -------
    list
        The [wavelength, flux, error] of the file spectrum with units
    """
    # Read the fits data...
    if file.endswith('.fits'):

        if file.endswith('.fits'):
            data, head = fits.getdata(file, header=True)

        elif survey == 'SDSS':
            raw, head = fits.getdata(file, header=True)
            flux_units = 1E-17 * q.erg / q.s / q.cm**2 / q.AA
            wave_units = q.AA
            log_w = head['COEFF0'] + head['COEFF1'] * np.arange(len(raw.flux))
            data = [10**log_w, raw.flux, raw.ivar]

        # Check if it is a recarray
        elif isinstance(raw, fits.fitsrec.FITS_rec):

            # Check if it's an SDSS spectrum
            raw = fits.getdata(file, ext=ext)
            data = raw['WAVELENGTH'], raw['FLUX'], raw['ERROR']

        # Otherwise just an array
        else:
            print("Sorry, I cannot read the file at", file)

    # ...or the ascii data...
    elif file.endswith('.txt'):
        data = np.genfromtxt(file, unpack=True)

    # ...or the VO Table
    elif file.endswith('.xml'):
        vot = vo.parse_single_table(file)
        data = np.array([list(i) for i in vot.array]).T

    else:
        raise IOError('The file needs to be ASCII, XML, or FITS.')

    # Make sure units are astropy quantities
    if isinstance(wave_units, str):
        wave_units = q.Unit(wave_units)
    if isinstance(flux_units, str):
        flux_units = q.Unit(flux_units)

    # Sanity check for wave_units
    if data[0].min() > 100 and wave_units == q.um:
        print(
            "WARNING: Your wavelength range ({} - {}) looks like Angstroms. Are you sure it's {}?"
            .format(data[0].min(), data[0].max(), wave_units))

    # Apply units
    wave = data[0] * wave_units
    flux = data[1] * (flux_units or 1.)
    if len(data) > 2:
        unc = data[2] * (flux_units or 1.)
    else:
        unc = None

    return [wave, flux, unc]
Ejemplo n.º 5
0
datastore = DataStore.from_dir("$HESS_DATA") 
src=SkyCoord.from_name(name)
sep=SkyCoord.separation(src,datastore.obs_table.pointing_radec)
srcruns=(datastore.obs_table[sep<2.0*u.deg]) 
obsid=srcruns['OBS_ID'].data
mylist=datastore.obs_list((obsid[0],))

# Define obs parameters
livetime_src = 10.0 * u.hr
livetime = mylist[0].observation_live_time_duration
offset = SkyCoord.separation(mylist[0].pointing_radec,src)
lo_threshold = 0.1 * u.TeV
hi_threshold = 60 * u.TeV

# Define spectral model
index = 2.0 * u.Unit('')
amplitude = 2.5 * 1e-10 * u.Unit('cm-2 s-1 TeV-1')
reference = 1 * u.TeV
model = PowerLaw(index=index, amplitude=amplitude, reference=reference)

#do a user defined model too!





edisp=mylist[0].edisp.to_energy_dispersion(offset=offset)
aeff=mylist[0].aeff.to_effective_area_table(offset=offset)

aeff.lo_threshold = lo_threshold
aeff.hi_threshold = hi_threshold
Ejemplo n.º 6
0
import pytest
import numpy as np

from astropy.wcs import WCS
from astropy import units as u
from astropy.io import fits

from ..spectral_cube import SpectralCube
from .helpers import assert_allclose

# the back of the book
dv = 3e-2 * u.Unit('m/s')
dy = 2e-5 * u.Unit('deg')
dx = 1e-5 * u.Unit('deg')
data_unit = u.dimensionless_unscaled

m0v = np.array([[27, 30, 33], [36, 39, 42], [45, 48, 51]]) * data_unit * dv
m0y = np.array([[9, 12, 15], [36, 39, 42], [63, 66, 69]]) * data_unit * dy
m0x = np.array([[3, 12, 21], [30, 39, 48], [57, 66, 75]]) * data_unit * dx

# M1V is a special case, where we return the actual coordinate
m1v = np.array([[1.66666667, 1.6, 1.54545455], [1.5, 1.46153846, 1.42857143],
                [1.4, 1.375, 1.35294118]]) * dv + 2 * u.Unit('m/s')
m1y = np.array([[1.66666667, 1.5, 1.4], [1.16666667, 1.15384615, 1.14285714],
                [1.0952381, 1.09090909, 1.08695652]]) * dy
m1x = np.array([[1.66666667, 1.16666667, 1.0952381],
                [1.06666667, 1.05128205, 1.04166667],
                [1.03508772, 1.03030303, 1.02666667]]) * dx

m2v = np.array([[0.22222222, 0.30666667, 0.36914601],
                [0.41666667, 0.45364892, 0.4829932],
Ejemplo n.º 7
0
 def evaluate(x):
     return np.exp(x * u.Unit('1 / TeV'))
Ejemplo n.º 8
0
def afrho2fluxd(wave,
                afrho,
                rap,
                geom,
                sun=None,
                unit=u.Unit('W/(m2 um)'),
                bandpass=None):
    """Convert A(th)frho to flux density.

    See A'Hearn et al. (1984) for the definition of Afrho.

    Parameters
    ----------
    wave : Quantity
      The wavelength of the measurement.
    afrho : Quantity
      The Afrho parameter.
    rap : Quanitity
      The aperture radius.  May be angular size or projected linear
      size at the distance of the comet.
    geom : dictionary of Quantities or ephem.Geom
      The observing geometry via keywords `rh`, `delta`.
    sun : Quantity, optional
      Use this value for the solar flux density at 1 AU, or `None` to
      use `calib.solar_flux`.
    unit : Unit, optional
      Unit to pass to `calib.solar_flux`.
    bandpass : dict, optional
      Instead of using `sun`, set to a dictionary of keywords to pass,
      along with the solar spectrum, to `util.bandpass`.

    Returns
    -------
    fluxd : Quantity
      The flux density of the comet.

    Notes
    -----
    Farnham, Schleicher, and A'Hearn (2000), Hale-Bopp
    filter set:

      UC = 0.3449 * u.um, qUC = 2.716e17 -> 908.9 * u.Unit('W/m2/um')
      BC = 0.4453 * u.um, qBC = 1.276e17 -> 1934 * u.Unit('W/m2/um')
      GC = 0.5259 * u.um, qGC = 1.341e17 -> 1841 * u.Unit('W/m2/um')
      RC = 0.7133 * u.um, qRC = 1.975e17 -> 1250 * u.Unit('W/m2/um')

    """

    from . import util
    from . import calib

    # parameter check
    assert wave.unit.is_equivalent(u.um)
    assert afrho.unit.is_equivalent(u.um)
    assert geom['rh'].unit.is_equivalent(u.um)
    assert geom['delta'].unit.is_equivalent(u.um)

    if rap.unit.is_equivalent(u.cm):
        rho = rap.to(afrho.unit)
    elif rap.unit.is_equivalent(u.arcsec):
        rho = geom['delta'].to(afrho.unit) * rap.to(u.rad).value
    else:
        raise ValueError("rap must have angular or length units.")

    if sun is None:
        assert unit.is_equivalent('W/(m2 um)', u.spectral_density(wave))

        if bandpass is None:
            sun = calib.solar_flux(wave, unit=unit)
        else:
            sw, sf = calib.e490(smooth=True, unit=unit)
            sun = util.bandpass(sw.to(u.um).value, sf.value, **bandpass)[1]
            sun *= unit
    else:
        assert sun.unit.is_equivalent('W/(m2 um)', u.spectral_density(wave))

    fluxd = (afrho * rho * sun * (1 * u.au / geom['rh'])**2 / 4. /
             geom['delta']**2)
    return fluxd.to(unit)
Ejemplo n.º 9
0
def afrho2Q(Afrho,
            rap,
            geom,
            k,
            v1,
            u1=-0.5,
            u2=-1.0,
            Ap=0.05,
            rho_g=1 * u.Unit('g/cm3'),
            arange=[0.1, 1e4] * u.um):
    """Convert Afrho to dust production rate.

    The conversion assumes Afrho is measured within the 1/rho regime,
    and allows for a size-dependent expansion speed, and power-law
    size distributions.

      Q = 2 / (rho pi) \int_a0^a1 n(a) m(a) v(a) da

    where rho is the projected linear aperture radius at the distance
    of the comet, the particle radii range from a0 to a1, n(a) is the
    differential size distribution, m(a) is the mass of a grain with
    radius a, v(a) is the expansion speed of a grain with radius a.

    Parameters
    ----------
    Afrho : Quantity
      The Afrho value (at a phase angle of 0 deg).
    rap : Quantity
      The angular or linear raidus of the projected aperture within
      which `Afrho` was measured.
    geom : dictionary of Quantity
      The observation geometry via keywords `rh`, `delta`.
    k : float
      The power-law slope of the differential size distribution.
    v1 : Quantity
      The expansion speed of 1 micron radius grains ejected at 1 AU
      from the sun.
    u1, u2 : float, optional
      Defines the relationship between expansion speed, grain radius,
      and heliocentric distance: v = v1 a^{u1} rh^{u2}.
    Ap : float, optional
      The geometric albedo of the dust at the same wavelength as
      `Afrho` is measured.
    rho_g : Quantity, optional
      The dust grain density.
    arange : Quanitity array, optional
      The minimum and maximum grain sizes in the coma.

    Returns
    -------
    Q : Quantity
      The dust mass production rate.

    """

    from scipy.integrate import quad
    from numpy import pi

    Afrho = u.Quantity(Afrho, u.m)
    rh = u.Quantity(geom['rh'], u.au)
    delta = u.Quantity(geom['delta'], u.au)

    try:
        rho = u.Quantity(rap, u.m)
    except u.UnitsError:
        try:
            rho = (u.Quantity(rap, u.arcsec) * 725e3 * u.m / u.arcsec / u.au *
                   delta)
        except u.UnitsError:
            print 'rap must have units of length or angluar size.'
            raise

    v1 = u.Quantity(v1, u.m / u.s)
    rho_g = u.Quantity(rho_g, u.kg / u.m**3)
    arange = u.Quantity(arange, u.m)

    A = 4 * Ap
    cs = pi * quad(lambda a: a**(2 + k), *arange.value)[0] * u.m**2
    N = (Afrho * pi * rho / A / cs).decompose().value
    q = (4 * pi / 3 * rho_g * v1 * (rh / (1 * u.au))**u2 * 1e6**u1 *
         quad(lambda a: a**(k + 3 + u1), *arange.value)[0] * u.m**3)
    Q = (N * 2 / pi / rho * q).to(u.kg / u.s)

    return Q
Ejemplo n.º 10
0
        spin_2y = data['spin2y'][j]
        spin_2z = data['spin2z'][j]
        iota = data['inclination'][j]
        phase = data['coa_phase'][j]
        distance = data['distance'][j]
        time = float(data['geocent_end_time'][j] + \
                     data['geocent_end_time_ns'][j] * 1.0e-9)
        pol = data['polarization'][j]
        lon = data['longitude'][j]
        lat = data['latitude'][j]
        lambda_1 = 0.0
        lambda_2 = lambdas[j]

        # convert longitude and latitude (in radians) to RA, DEC (in radians)
        t = at.Time(time, format='gps')
        sc = ac.SkyCoord(lon, lat, obstime=t, unit=au.Unit('rad'))
        ra = sc.ra.rad
        dec = sc.dec.rad

        # convert LALInference spin parameters into form desired by
        # waveform generators
        converted = inverse_transform_precessing_spins(iota, spin_1x, spin_1y, \
                                                       spin_1z, spin_2x, spin_2y, \
                                                       spin_2z, mass_1, mass_2, \
                                                       reference_frequency, phase)
        theta_jn, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2 = converted

        # We are going to inject a binary neutron star waveform.  We first establish a
        # dictionary of parameters that includes all of the different waveform
        # parameters, including masses of the black hole (mass_1) and NS (mass_2),
        # spins of both objects (a, tilt, phi), and deformabilities (lambdas,
Ejemplo n.º 11
0
    def fluxd(self,
              observer,
              date,
              wave,
              rap=1.0 * u.arcsec,
              reflected=True,
              thermal=True,
              nucleus=True,
              coma=True,
              ltt=False,
              unit=u.Unit('W / (m2 um)'),
              **kwargs):
        """Total flux density as seen by an observer.

        Parameters
        ----------
        observer : SolarSysObject
          The observer.
        date : string, float, astropy Time, datetime
          The time of the observation in any format acceptable to
          `observer`.
        wave : Quantity
          The wavelengths to compute `fluxd`.
        rap : Quantity, optional
          The aperture radius, angular or projected distance at the
          comet.
        reflected : bool, optional
          If `True` include the reflected light model.
        thermal : bool, optional
          If `True` include the thermal emission model.
        nucleus : bool, optional
          If `True` include the nucleus.
        coma : bool, optional
          If `True` include the coma.
        ltt : bool, optional
          Set to `True` to correct the object's position for light
          travel time.
        unit : astropy Unit
          The return unit, must be flux density.
        
        Returns
        -------
        fluxd : Quantity

        """

        fluxd = np.zeros(np.size(wave.value)) * unit

        if nucleus:
            fluxd += self.nucleus.fluxd(observer,
                                        date,
                                        wave,
                                        reflected=reflected,
                                        thermal=thermal,
                                        unit=unit)
        if coma:
            fluxd += self.coma.fluxd(observer,
                                     date,
                                     wave,
                                     rap=rap,
                                     reflected=reflected,
                                     thermal=thermal,
                                     unit=unit)

        return fluxd
Ejemplo n.º 12
0
def test_detector_units(name):
    # just check that such a unit exists and doesn't evaluate to False
    assert units.Unit(name)
Ejemplo n.º 13
0
from astropy import units

from ..units import parse_unit

__author__ = 'Duncan Macleod <*****@*****.**>'


@pytest.mark.parametrize('arg, unit', [
    (None, None),
    (units.m, units.m),
    ('meter', units.m),
    ('Volts', units.V),
    ('Meters/Second', units.m / units.s),
    ('Amp', units.ampere),
    ('MPC', units.megaparsec),
    ('degrees_C', units.Unit('Celsius')),
    ('DegC', units.Unit('Celsius')),
    ('degrees_F', units.Unit('Fahrenheit')),
])
def test_parse_unit(arg, unit):
    assert parse_unit(arg, parse_strict='silent') == unit


def test_parse_unit_strict():
    # check that errors get raise appropriately
    with pytest.raises(ValueError) as exc:
        parse_unit('metre', parse_strict='raise')

    # check that warnings get posted, and a custom NamedUnit gets returned
    with pytest.warns(units.UnitsWarning) as exc:
        u = parse_unit('metre', parse_strict='warn')
Ejemplo n.º 14
0
    def __init__(self, wave, trans, wave_unit=u.AA,
                 trans_unit=u.dimensionless_unscaled, normalize=False,
                 name=None, trim_level=None):
        wave = np.asarray(wave, dtype=np.float64)
        trans = np.asarray(trans, dtype=np.float64)
        if wave.shape != trans.shape:
            raise ValueError('shape of wave and trans must match')
        if wave.ndim != 1:
            raise ValueError('only 1-d arrays supported')

        # Ensure that units are actually units and not quantities, so that
        # `to` method returns a float and not a Quantity.
        wave_unit = u.Unit(wave_unit)
        trans_unit = u.Unit(trans_unit)

        if wave_unit != u.AA:
            wave = wave_unit.to(u.AA, wave, u.spectral())

        # If transmission is in units of inverse energy, convert to
        # unitless transmission:
        #
        # (transmitted photons / incident photons) =
        #      (photon energy) * (transmitted photons / incident energy)
        #
        # where photon energy = h * c / lambda
        if trans_unit != u.dimensionless_unscaled:
            trans = (HC_ERG_AA / wave) * trans_unit.to(u.erg**-1, trans)

        # Check that values are monotonically increasing.
        # We could sort them, but if this happens, it is more likely a user
        # error or faulty bandpass definition. So we leave it to the user to
        # sort them.
        if not np.all(np.ediff1d(wave) > 0.):
            raise ValueError('bandpass wavelength values must be monotonically'
                             ' increasing when supplied in wavelength or '
                             'decreasing when supplied in energy/frequency.')

        if normalize:
            trans /= np.max(trans)

        # Trim "out-of-band" transmission
        if trim_level is not None:
            s = slice_exclude_below(trans, np.max(trans) * trim_level, grow=1)
            wave = wave[s]
            trans = trans[s]

        # if more than one leading or trailing transmissions are zero, we
        # can remove them.
        if ((trans[0] == 0.0 and trans[1] == 0.0) or (trans[-1] == 0.0 and
                                                      trans[-2] == 0.0)):
            i = 0
            while i < len(trans) and trans[i] == 0.0:
                i += 1
            if i == len(trans):
                raise ValueError('all zero transmission')
            j = len(trans) - 1
            while j >= 0 and trans[j] == 0.0:
                j -= 1

            # back out to include a single zero
            if i > 0:
                i -= 1
            if j < len(trans) - 1:
                j += 1

            wave = wave[i:j+1]
            trans = trans[i:j+1]

        self.wave = wave
        self.trans = trans

        # Set up interpolation.
        # This appears to be the fastest-evaluating interpolant in
        # scipy.interpolate.
        self._tck = splrep(self.wave, self.trans, k=1)

        self.name = name
Ejemplo n.º 15
0
# ---------------------------- astropy.units
# http://docs.astropy.org/en/stable/units/

import astropy.units as u

# defining a quantity: a value with a unit
print(1 * u.m)
print(type(1 * u.m))

# or...
print(1 * u.Unit('m'))
print(type(1 * u.Unit('m')))

# units can be used as you would expect:

# you can form multiplicative units (in this case a velocity)
a = 1 * u.m / u.s
print(a)

# # but you cannot add quantities with different units
# print(1*u.m + 2*u.s)

# units propagate properly through most astropy function and the basic
# numpy functions
import numpy as np
b = 2 * u.m * u.m
print(b**2)
print(np.sqrt(b))

# in case the function cannot deal with units, you can strip the quantity
print(a.unit)
Ejemplo n.º 16
0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from numpy.testing import assert_allclose
import pytest
from astropy import units as u
from ...utils.testing import requires_data, requires_dependency
from ...utils.testing import assert_quantity_allclose
from ..gammacat import SourceCatalogGammaCat
from ..gammacat import GammaCatResource, GammaCatResourceIndex

SOURCES = [
    {
        'name': 'Vela X',
        'spec_type': 'ecpl',
        'dnde_1TeV': 1.36e-11 * u.Unit('cm-2 s-1 TeV-1'),
        'dnde_1TeV_err': 7.531e-13 * u.Unit('cm-2 s-1 TeV-1'),
        'flux_1TeV': 2.104e-11 * u.Unit('cm-2 s-1'),
        'flux_1TeV_err': 1.973e-12 * u.Unit('cm-2 s-1'),
        'eflux_1_10TeV': 9.265778680255336e-11 * u.Unit('erg cm-2 s-1'),
        'eflux_1_10TeV_err': 9.590978299538194e-12 * u.Unit('erg cm-2 s-1'),
        'n_flux_points': 24,
        'is_pointlike': False,
        'spatial_model': 'SkyGaussian',
    },
    {
        'name': 'HESS J1848-018',
        'spec_type': 'pl',
        'dnde_1TeV': 3.7e-12 * u.Unit('cm-2 s-1 TeV-1'),
        'dnde_1TeV_err': 4e-13 * u.Unit('cm-2 s-1 TeV-1'),
        'flux_1TeV': 2.056e-12 * u.Unit('cm-2 s-1'),
Ejemplo n.º 17
0
def plot_spectra(sampler,
                 mle_result,
                 fit_range=[0.03, 30] * u.TeV,
                 min_sample=50):
    joint_model = Log10Parabola(
        amplitude=3.78 * 1e-11 * u.Unit('cm-2 s-1 TeV-1'),
        reference=1 * u.Unit('TeV'),
        alpha=2.49 * u.Unit(''),
        beta=0.22 * u.Unit(''),
    )
    joint_model.plot(energy_range=fit_range,
                     energy_power=2,
                     color='black',
                     label='joint')

    r = np.median(sampler.chain[:, min_sample:, :3], axis=(0, 1))
    fitted_model = Log10Parabola(
        amplitude=r[0] * 1e-11 * u.Unit('cm-2 s-1 TeV-1'),
        reference=1 * u.Unit('TeV'),
        alpha=r[1] * u.Unit(''),
        beta=r[2] * u.Unit(''),
    )
    fitted_model.plot(energy_range=fit_range,
                      energy_power=2,
                      color='crimson',
                      label='mcmc')

    mle_model = Log10Parabola(
        amplitude=mle_result.x[0] * 1e-11 * u.Unit('cm-2 s-1 TeV-1'),
        reference=1 * u.Unit('TeV'),
        alpha=mle_result.x[1] * u.Unit(''),
        beta=mle_result.x[2] * u.Unit(''),
    )
    mle_model.plot(energy_range=fit_range,
                   energy_power=2,
                   color='orange',
                   ls='--',
                   label='mle')

    fact_model = Log10Parabola(
        amplitude=3.47 * 1e-11 * u.Unit('cm-2 s-1 TeV-1'),
        reference=1 * u.Unit('TeV'),
        alpha=2.56 * u.Unit(''),
        beta=0.4 * u.Unit(''),
    )
    fact_model.plot(energy_range=fit_range,
                    energy_power=2,
                    color='gray',
                    label='fact')

    magic_model = Log10Parabola(
        amplitude=4.20 * 1e-11 * u.Unit('cm-2 s-1 TeV-1'),
        reference=1 * u.Unit('TeV'),
        alpha=2.58 * u.Unit(''),
        beta=0.43 * u.Unit(''),
    )
    magic_model.plot(energy_range=fit_range,
                     energy_power=2,
                     color='gray',
                     ls='--',
                     label='magic')

    CrabSpectrum(reference='meyer').model.plot(energy_range=[0.01, 100] *
                                               u.TeV,
                                               energy_power=2,
                                               color='black',
                                               ls=':')
Ejemplo n.º 18
0
    def get_model(self, identifier, unit=None, ext="fits"):
        '''Get a specific model by its identifier

        :param identifier: a :class:`~pdrtpy.measurement.Measurement` ID. It can be an intensity or a ratio,
   #,habing_unit,draine_unit,mathis_unit      e.g., "CII_158","CI_609/FIR"
        :type identifier: str
        :returns: The model matching the identifier
        :rtype: :class:`~pdrtpy.measurement.Measurement`
        :raises: KeyError if identifier not found in this ModelSet
        '''

        if identifier not in self.table["ratio"]:
            raise KeyError(f"{identifier} is not in this ModelSet")

        d = model_dir()
        _thefile = d + self._tabrow["path"] + self.table.loc[identifier][
            "filename"] + "." + ext
        _title = self._table.loc[identifier]['title']
        if unit is None:
            # make a guess at the unit
            if '/' in identifier:
                unit = self._default_unit["ratio"]
                modeltype = "ratio"
            else:
                unit = self._default_unit['intensity']
                modeltype = "intensity"
        else:
            if unit == u.dimensionless_unscaled:
                modeltype = "ratio"
            else:
                modeltype = "intensity"
        _model = Measurement.read(_thefile,
                                  title=_title,
                                  unit=unit,
                                  identifier=identifier)
        _wcs = _model.wcs
        _model.header["MODELTYP"] = modeltype
        _model.modeltype = modeltype
        #@todo this is messy.  clean up by doing if wcs.. first?
        if self.is_wk2006 or self.name == "smc":
            # fix WK2006 model headerslisthd
            if _wcs.wcs.cunit[0] == "":
                _model.header["CUNIT1"] = "cm^-3"
                _wcs.wcs.cunit[0] = u.Unit("cm^-3")
            else:
                _model.header["CUNIT1"] = str(_wcs.wcs.cunit[0])
            if _wcs.wcs.cunit[1] == "":
                _model.header["CUNIT2"] = "Habing"
                # Raises UnitScaleError:
                # "The FITS unit format is not able to represent scales that are not powers of 10.  Multiply your data by 1.600000e-03."
                # This causes all sorts of downstream problems.  Workaround in LineRatioFit.read_models().
                #_model.wcs.wcs.cunit[1] = habing_unit
        elif self.code == "KOSMA-tau":
            # fix KosmaTau model headers
            if _wcs.wcs.cunit[0] == "":
                _model.header["CUNIT1"] = "cm^-3"
                _wcs.wcs.cunit[0] = u.Unit("cm^-3")
            else:
                _model.header["CUNIT1"] = str(_wcs.wcs.cunit[0])
            if _wcs.wcs.cunit[1] == "":
                _model.header["CUNIT2"] = "Draine"
            else:
                _model.header["CUNIT2"] = str(_wcs.wcs.cunit[1])
        else:
            # copy wcs cunit to header. used later.
            _model.header["CUNIT1"] = str(_wcs.wcs.cunit[0])
            _model.header["CUNIT2"] = str(_wcs.wcs.cunit[1])

        return _model
Ejemplo n.º 19
0
def calculate_flux(trace, percentiles=[16, 50, 84]):
    norm = 1 * u.Unit('km-2 s-1 TeV-1')
    flux = (trace['expected_counts'][:, :] * norm).to_value(
        1 / (u.TeV * u.s * u.cm**2))

    return np.percentile(flux, q=percentiles, axis=0)
Ejemplo n.º 20
0
    def vue_fit_model_to_cube(self, *args, **kwargs):
        data = self.app.data_collection[self._selected_data_label]

        # First, ensure that the selected data is cube-like. It is possible
        # that the user has selected a pre-existing 1d data object.
        if data.ndim != 3:
            snackbar_message = SnackbarMessage(
                f"Selected data {self._selected_data_label} is not cube-like",
                color='error',
                sender=self)
            self.hub.broadcast(snackbar_message)
            return

        # Get the primary data component
        attribute = data.main_components[0]
        component = data.get_component(attribute)
        temp_values = data.get_data(attribute)

        # Transpose the axis order
        values = np.moveaxis(temp_values, 0, -1) * u.Unit(component.units)

        # We manually create a Spectrum1D object from the flux information
        #  in the cube we select
        wcs = data.coords.sub([WCSSUB_SPECTRAL])
        spec = Spectrum1D(flux=values, wcs=wcs)

        # TODO: in vuetify >2.3, timeout should be set to -1 to keep open
        #  indefinitely
        snackbar_message = SnackbarMessage(
            "Fitting model to cube...",
            loading=True, timeout=0, sender=self)
        self.hub.broadcast(snackbar_message)

        fitted_model, fitted_spectrum = fit_model_to_spectrum(
            spec,
            self._initialized_models.values(),
            self.model_equation,
            run_fitter=True)

        # Transpose the axis order back
        values = np.moveaxis(fitted_spectrum.flux.value, -1, 0)

        count = max(map(lambda s: int(next(iter(re.findall("\d$", s)), 0)),
                        self.data_collection.labels)) + 1

        label = f"{self.model_label} [Cube] {count}"

        # Create new glue data object
        output_cube = Data(label=label,
                           coords=data.coords)
        output_cube['flux'] = values
        output_cube.get_component('flux').units = \
            fitted_spectrum.flux.unit.to_string()

        # Add to data collection
        self.app.data_collection.append(output_cube)

        snackbar_message = SnackbarMessage(
            "Finished cube fitting",
            color='success', loading=False, sender=self)
        self.hub.broadcast(snackbar_message)
Ejemplo n.º 21
0
 def integral(self, xmin, xmax):
     return np.exp(xmax * u.Unit('1 / TeV')) - np.exp(
         xmin * u.Unit('1 / TeV'))
Ejemplo n.º 22
0
def table_model():
    energy = MapAxis.from_energy_bounds(0.1 * u.TeV, 100 * u.TeV, 1000).center

    model = PowerLawSpectralModel(
        index=2.3, amplitude="4 cm-2 s-1 TeV-1", reference="1 TeV"
    )
    dnde = model(energy)

    return TemplateSpectralModel(energy, dnde, 1)


TEST_MODELS = [
    dict(
        name="powerlaw",
        model=PowerLawSpectralModel(
            index=2.3 * u.Unit(""),
            amplitude=4 / u.cm ** 2 / u.s / u.TeV,
            reference=1 * u.TeV,
        ),
        val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), "cm-2 s-1 TeV-1"),
        integral_1_10TeV=u.Quantity(2.9227116204223784, "cm-2 s-1"),
        eflux_1_10TeV=u.Quantity(6.650836884969039, "TeV cm-2 s-1"),
    ),
    dict(
        name="powerlaw",
        model=PowerLawSpectralModel(
            index=2 * u.Unit(""),
            amplitude=4 / u.cm ** 2 / u.s / u.TeV,
            reference=1 * u.TeV,
        ),
        val_at_2TeV=u.Quantity(1.0, "cm-2 s-1 TeV-1"),
from math import *
from astropy import constants as cts
from astropy import units as u
from astropy.constants import G, M_earth, R_earth



mu = G.value*M_earth.value
Re = R_earth.value
rad_earth= 6378.13  # Radius
r_test = np.array([Re + 600.0*1000, 0, 50])
v_test = np.array([0, 6.5 * 1000, 0])
t = 0
M_fact= 7.292115E-5
mu_Sun = cts.GM_sun.to(u.Unit('au3 / day2')).value
F= 1 / 298.257223563  # WGS-84
mu_Earth = cts.GM_earth.to(u.Unit('km3 / s2')).value



def julian_day_from_utc(utc_time):
    """Returns julian day.
    """
    return days_since_2000(utc_time) + 2451545



def cartesian_to_spherical(data):
    '''
    Takes as an input a data set containing points in cartesian format (time, x, y, z) and returns the computed
Ejemplo n.º 24
0
def test_ecpl_integrate():
    # regression test to check the numerical integration for small energy bins
    ecpl = ExpCutoffPowerLawSpectralModel()
    value = ecpl.integral(1 * u.TeV, 1.1 * u.TeV)
    assert_quantity_allclose(value, 8.380761e-14 * u.Unit("s-1 cm-2"))
Ejemplo n.º 25
0
    energy = energy_edges.log_centers

    index = 2.3 * u.Unit("")
    amplitude = 4 / u.cm ** 2 / u.s / u.TeV
    reference = 1 * u.TeV
    pl = PowerLaw(index, amplitude, reference)
    flux = pl(energy)

    return TableModel(energy, flux, 1 * u.Unit(""))


TEST_MODELS = [
    dict(
        name="powerlaw",
        model=PowerLaw(
            index=2.3 * u.Unit(""),
            amplitude=4 / u.cm ** 2 / u.s / u.TeV,
            reference=1 * u.TeV,
        ),
        val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), "cm-2 s-1 TeV-1"),
        integral_1_10TeV=u.Quantity(2.9227116204223784, "cm-2 s-1"),
        eflux_1_10TeV=u.Quantity(6.650836884969039, "TeV cm-2 s-1"),
    ),
    dict(
        name="powerlaw",
        model=PowerLaw(
            index=2 * u.Unit(""),
            amplitude=4 / u.cm ** 2 / u.s / u.TeV,
            reference=1 * u.TeV,
        ),
        val_at_2TeV=u.Quantity(1.0, "cm-2 s-1 TeV-1"),
Ejemplo n.º 26
0
 def evaluate(x):
     return np.exp(x * u.Unit("1 / TeV"))
Ejemplo n.º 27
0
    def from_columns(cls, columns, names, units=None, meta={}, **kwargs):
        """Create `~sbpy.data.DataClass` object from a sequence. If that
        sequence is one-dimensional, it is interpreted as
        a single column; if the sequence is two-dimensional, it is
        interpreted as a sequence of columns.

        Parameters
        ----------
        columns: list, `~numpy.ndarray`, tuple, or `~astropy.units.Quantity`
            Data that will be ingested in `DataClass` object. A
            one-dimensional sequence is interpreted as a single column.
            A two-dimensional sequence is interpreted as a sequence of
            columns, each of which must have the same length.
        names: str or list-like
            Field names, must have the same number of names as data columns.
            Please note that in order to make use of :ref:`fieldnames`
            and to ensure compatibility with sbpy functionality, the
            field names chosen must be in the list of :ref:`field name list`.
        units: str or list-like, optional
            Unit labels (as provided by `~astropy.units.Unit`) in which
            the data provided in ``columns`` will be stored in the underlying
            table. If None, the units as provided by ``columns``
            are used. If the units provided in ``units`` differ from those
            used in ``columns``, ``columns`` will be transformed to the units
            provided in ``units``. Must have the same length as ``names``
            and the individual data columns in ``columns``. Default: None
        meta: dictionary, optional
            Meta data that will be stored in the data table. Default:
            empty dictionary
        kwargs: additional keyword arguments, optional
            Additional keyword arguments that will be passed on to
            `~astropy.table.QTable` in the creation of the underlying
            data table.

        Returns
        -------
        `DataClass` object

        Examples
        --------
        The following example creates a single-column `~sbpy.data.Ephem`
        object.

        >>> from sbpy.data import Ephem
        >>> import astropy.units as u
        >>> eph = Ephem.from_columns([1, 2, 3, 4]*u.au,
        ...                          names='a')
        >>> eph
        <QTable length=4>
           a
           AU
        float64
        -------
            1.0
            2.0
            3.0
            4.0

        This example creates a two-column `~sbpy.data.Ephem` object in which
        units are assigned using the optional ``units`` keyword argument.

        >>> eph = Ephem.from_columns([[1, 2, 3, 4],
        ...                           [90, 50, 30, 10]],
        ...                          names=['r', 'alpha'],
        ...                          units=['au', 'deg'])
        >>> eph
        <QTable length=4>
           r     alpha
           AU     deg
        float64 float64
        ------- -------
            1.0    90.0
            2.0    50.0
            3.0    30.0
            4.0    10.0

        If units are provided in ``columns`` and ``units``, those units in
        ``columns`` will be transformed into those units in ``units`` on a
        per-column basis.

        >>> eph = Ephem.from_columns([[1, 2, 3, 4]*u.au,
        ...                           [90, 50, 30, 10]*u.deg],
        ...                           names=['r', 'alpha'],
        ...                           units=['km', 'rad'])
        >>> eph
        <QTable length=4>
                r                 alpha
                km                 rad
             float64             float64
        ------------------ -------------------
               149597870.7  1.5707963267948966
               299195741.4  0.8726646259971648
        448793612.09999996  0.5235987755982988
               598391482.8 0.17453292519943295
        """

        # turn single column name to a list
        if isinstance(names, str):
            names = [names]

        # turn single column to a list
        if not iterable(columns[0]):
            columns = [columns]
        elif isinstance(columns[0], (str, bytes)):
            columns = [columns]

        if units is not None:
            if all([isinstance(col, u.Quantity) for col in columns]):
                # if all columns have units, transform to `units`
                columns = [
                    val.to(unit) for val, unit in list(zip(columns, units))
                ]
            else:
                # if columns has no units, apply `units`
                columns = [
                    val * u.Unit(unit) if unit is not None else val
                    for val, unit in list(zip(columns, units))
                ]

        self = cls()
        self._table = QTable(columns, names=names, meta=meta, **kwargs)
        return self
Ejemplo n.º 28
0
 def integral(self, xmin, xmax, **kwargs):
     return np.exp(xmax * u.Unit("1 / TeV")) - np.exp(
         xmin * u.Unit("1 / TeV"))
Ejemplo n.º 29
0
 def test_velocities(self):
     assert self.model.v_inner.unit == u.Unit('cm/s')
     assert_almost_equal(self.model.v_inner[0].to(u.km / u.s).value, 9000)
Ejemplo n.º 30
0
def read_table_fits(input, hdu=None, astropy_native=False, memmap=False,
                    character_as_bytes=True):
    """
    Read a Table object from an FITS file

    If the ``astropy_native`` argument is ``True``, then input FITS columns
    which are representations of an astropy core object will be converted to
    that class and stored in the ``Table`` as "mixin columns".  Currently this
    is limited to FITS columns which adhere to the FITS Time standard, in which
    case they will be converted to a `~astropy.time.Time` column in the output
    table.

    Parameters
    ----------
    input : str or file-like object or compatible `astropy.io.fits` HDU object
        If a string, the filename to read the table from. If a file object, or
        a compatible HDU object, the object to extract the table from. The
        following `astropy.io.fits` HDU objects can be used as input:
        - :class:`~astropy.io.fits.hdu.table.TableHDU`
        - :class:`~astropy.io.fits.hdu.table.BinTableHDU`
        - :class:`~astropy.io.fits.hdu.table.GroupsHDU`
        - :class:`~astropy.io.fits.hdu.hdulist.HDUList`
    hdu : int or str, optional
        The HDU to read the table from.
    astropy_native : bool, optional
        Read in FITS columns as native astropy objects where possible instead
        of standard Table Column objects. Default is False.
    memmap : bool, optional
        Whether to use memory mapping, which accesses data on disk as needed. If
        you are only accessing part of the data, this is often more efficient.
        If you want to access all the values in the table, and you are able to
        fit the table in memory, you may be better off leaving memory mapping
        off. However, if your table would not fit in memory, you should set this
        to `True`.
    character_as_bytes : bool, optional
        If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
        and are converted on-the-fly to unicode strings when accessing
        individual elements. If you need to use Numpy unicode arrays (dtype
        ``U``) internally, you should set this to `False`, but note that this
        will use more memory. If set to `False`, string columns will not be
        memory-mapped even if ``memmap`` is `True`.
    """

    if isinstance(input, HDUList):

        # Parse all table objects
        tables = dict()
        for ihdu, hdu_item in enumerate(input):
            if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
                tables[ihdu] = hdu_item

        if len(tables) > 1:
            if hdu is None:
                warnings.warn("hdu= was not specified but multiple tables"
                              " are present, reading in first available"
                              f" table (hdu={first(tables)})",
                              AstropyUserWarning)
                hdu = first(tables)

            # hdu might not be an integer, so we first need to convert it
            # to the correct HDU index
            hdu = input.index_of(hdu)

            if hdu in tables:
                table = tables[hdu]
            else:
                raise ValueError(f"No table found in hdu={hdu}")

        elif len(tables) == 1:
            if hdu is not None:
                msg = None
                try:
                    hdi = input.index_of(hdu)
                except KeyError:
                    msg = f"Specified hdu={hdu} not found"
                else:
                    if hdi >= len(input):
                        msg = f"Specified hdu={hdu} not found"
                    elif hdi not in tables:
                        msg = f"No table found in specified hdu={hdu}"
                if msg is not None:
                    warnings.warn(f"{msg}, reading in first available table "
                                  f"(hdu={first(tables)}) instead. This will"
                                  " result in an error in future versions!",
                                  AstropyDeprecationWarning)
            table = tables[first(tables)]

        else:
            raise ValueError("No table found")

    elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):

        table = input

    else:

        hdulist = fits_open(input, character_as_bytes=character_as_bytes,
                            memmap=memmap)

        try:
            return read_table_fits(hdulist, hdu=hdu,
                                   astropy_native=astropy_native)
        finally:
            hdulist.close()

    # Check if table is masked
    masked = any(col.null is not None for col in table.columns)

    # TODO: in future, it may make more sense to do this column-by-column,
    # rather than via the structured array.

    # In the loop below we access the data using data[col.name] rather than
    # col.array to make sure that the data is scaled correctly if needed.
    data = table.data

    columns = []
    for col in data.columns:

        # Set column data
        if masked:
            column = MaskedColumn(data=data[col.name], name=col.name, copy=False)
            if col.null is not None:
                column.set_fill_value(col.null)
                column.mask[column.data == col.null] = True
        else:
            column = Column(data=data[col.name], name=col.name, copy=False)

        # Copy over units
        if col.unit is not None:
            column.unit = u.Unit(col.unit, format='fits', parse_strict='silent')

        # Copy over display format
        if col.disp is not None:
            column.format = _fortran_to_python_format(col.disp)

        columns.append(column)

    # Create Table object
    t = Table(columns, masked=masked, copy=False)

    # TODO: deal properly with unsigned integers

    hdr = table.header
    if astropy_native:
        # Avoid circular imports, and also only import if necessary.
        from .fitstime import fits_to_time
        hdr = fits_to_time(hdr, t)

    for key, value, comment in hdr.cards:

        if key in ['COMMENT', 'HISTORY']:
            # Convert to io.ascii format
            if key == 'COMMENT':
                key = 'comments'

            if key in t.meta:
                t.meta[key].append(value)
            else:
                t.meta[key] = [value]

        elif key in t.meta:  # key is duplicate

            if isinstance(t.meta[key], list):
                t.meta[key].append(value)
            else:
                t.meta[key] = [t.meta[key], value]

        elif is_column_keyword(key) or key in REMOVE_KEYWORDS:

            pass

        else:

            t.meta[key] = value

    # TODO: implement masking

    # Decode any mixin columns that have been stored as standard Columns.
    t = _decode_mixins(t)

    return t