Beispiel #1
0
    def set_units(self, disp_unit, data_unit):
        if self.dispersion_unit.is_equivalent(disp_unit,
                                              equivalencies=spectral()):
            self._dispersion = self.dispersion.data.to(
                disp_unit, equivalencies=spectral()).value

            # Finally, change the unit
            self.dispersion_unit = disp_unit
        else:
            logging.warning("Units are not compatible.")

        if self.unit.is_equivalent(data_unit,
                                   equivalencies=spectral_density(
                                       self.dispersion.data)):
            self._data = self.data.data.to(
                data_unit, equivalencies=spectral_density(
                    self.dispersion.data)).value

            self._uncertainty = self._uncertainty.__class__(
                self.raw_uncertainty.data.to(
                    data_unit, equivalencies=spectral_density(
                        self.dispersion.data)).value)

            # Finally, change the unit
            self._unit = data_unit
        else:
            logging.warning("Units are not compatible.")
Beispiel #2
0
    def bandflux(self, band):
        """Perform synthentic photometry in a given bandpass.
      
        The bandpass transmission is interpolated onto the wavelength grid
        of the spectrum. The result is a weighted sum of the spectral flux
        density values (weighted by transmission values).
        
        Parameters
        ----------
        band : Bandpass object or name of registered bandpass.

        Returns
        -------
        bandflux : float
            Total flux in ph/s/cm^2. If part of bandpass falls
            outside the spectrum, `None` is returned instead.
        bandfluxerr : float
            Error on flux. Only returned if the `error` attribute is not
            `None`.
        """

        band = get_bandpass(band)
        bwave, btrans = band.to_unit(self._wunit)

        if (bwave[0] < self._wave[0] or
            bwave[-1] > self._wave[-1]):
            return None

        idx = ((self._wave > bwave[0]) & 
               (self._wave < bwave[-1]))
        d = self._wave[idx]
        f = self._flux[idx]

        #TODO: use spectral density equivalencies once they can do photons.
        # first convert to ergs / s /cm^2 / (wavelength unit)
        target_unit = u.erg / u.s / u.cm**2 / self._wunit
        if self._unit != target_unit:
            f = self._unit.to(target_unit, f, 
                              u.spectral_density(self._wunit, d))

        # Then convert ergs to photons: photons = Energy / (h * nu)
        f = f / const.h.cgs.value / self._wunit.to(u.Hz, d, u.spectral())

        trans = np.interp(d, bwave, btrans)
        binw = np.gradient(d)
        ftot = np.sum(f * trans * binw)

        if self._error is None:
            return ftot

        else:
            e = self._error[idx]

            # Do the same conversion as above
            if self._unit != target_unit:
                e = self._unit.to(target_unit, e, 
                                  u.spectral_density(self._wunit, d))
            e = e / const.h.cgs.value / self._wunit.to(u.Hz, d, u.spectral())
            etot = np.sqrt(np.sum((e * binw) ** 2 * trans))
            return ftot, etot
Beispiel #3
0
    def from_config(cls, config, **kwargs):
        """
        Create a new Simulation instance from a Configuration object.

        Parameters
        ----------
        config : tardis.io.config_reader.Configuration
        **kwargs
            Allow overriding some structures, such as model, plasma, atomic data
            and the runner, instead of creating them from the configuration
            object.

        Returns
        -------
        Simulation

        """
        # Allow overriding some config structures. This is useful in some
        # unit tests, and could be extended in all the from_config classmethods.
        if 'model' in kwargs:
            model = kwargs['model']
        else:
            model = Radial1DModel.from_config(config)
        if 'plasma' in kwargs:
            plasma = kwargs['plasma']
        else:
            plasma = assemble_plasma(config, model,
                                     atom_data=kwargs.get('atom_data', None))
        if 'runner' in kwargs:
            runner = kwargs['runner']
        else:
            runner = MontecarloRunner.from_config(config)

        luminosity_nu_start = config.supernova.luminosity_wavelength_end.to(
                u.Hz, u.spectral())

        try:
            luminosity_nu_end = config.supernova.luminosity_wavelength_start.to(
                u.Hz, u.spectral())
        except ZeroDivisionError:
            luminosity_nu_end = np.inf * u.Hz

        last_no_of_packets = config.montecarlo.last_no_of_packets
        if last_no_of_packets is None or last_no_of_packets < 0:
            last_no_of_packets =  config.montecarlo.no_of_packets
        last_no_of_packets = int(last_no_of_packets)

        return cls(iterations=config.montecarlo.iterations,
                   model=model,
                   plasma=plasma,
                   runner=runner,
                   no_of_packets=int(config.montecarlo.no_of_packets),
                   no_of_virtual_packets=int(
                       config.montecarlo.no_of_virtual_packets),
                   luminosity_nu_start=luminosity_nu_start,
                   luminosity_nu_end=luminosity_nu_end,
                   last_no_of_packets=last_no_of_packets,
                   luminosity_requested=config.supernova.luminosity_requested.cgs,
                   convergence_strategy=config.montecarlo.convergence_strategy,
                   nthreads=config.montecarlo.nthreads)
Beispiel #4
0
    def evaluate(self, packet_nu, packet_energy, virtual_nu, virtual_energy,
                 param_names, param_values):
        packet_lambda = packet_nu.to(u.angstrom, u.spectral())
        bin_counts = np.histogram(packet_lambda, bins=self.wavelength_bins)[0]

        uncertainty = (np.sqrt(bin_counts) * np.mean(packet_energy)
                       / np.diff(self.wavelength_bins))

        if self.mode == 'normal':
            luminosity = np.histogram(packet_lambda,
                          weights=packet_energy,
                          bins=self.wavelength_bins)[0]
        elif self.mode == 'virtual':
            virtual_packet_lambda = virtual_nu.to(u.angstrom, u.spectral())
            luminosity = np.histogram(virtual_packet_lambda,
                          weights=virtual_energy,
                          bins=self.wavelength_bins)[0]
            uncertainty /= self.virtual_uncertainty_scaling

        luminosity_density = luminosity / np.diff(self.wavelength_bins)

        self.luminosity_density = luminosity_density
        self.uncertainty = uncertainty.value
        return (self.observed_wavelength, luminosity_density, uncertainty.value,
                param_names, param_values)
Beispiel #5
0
def test_equivalency_context_manager():
    base_registry = u.get_current_unit_registry()

    def just_to_from_units(equivalencies):
        return [(equiv[0], equiv[1]) for equiv in equivalencies]

    tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles())
    tf_spectral = just_to_from_units(u.spectral())
    assert base_registry.equivalencies == []
    with u.set_enabled_equivalencies(u.dimensionless_angles()):
        new_registry = u.get_current_unit_registry()
        assert (set(just_to_from_units(new_registry.equivalencies)) ==
                set(tf_dimensionless_angles))
        assert set(new_registry.all_units) == set(base_registry.all_units)
        with u.set_enabled_equivalencies(u.spectral()):
            newer_registry = u.get_current_unit_registry()
            assert (set(just_to_from_units(newer_registry.equivalencies)) ==
                    set(tf_spectral))
            assert (set(newer_registry.all_units) ==
                    set(base_registry.all_units))

        assert (set(just_to_from_units(new_registry.equivalencies)) ==
                set(tf_dimensionless_angles))
        assert set(new_registry.all_units) == set(base_registry.all_units)
        with u.add_enabled_equivalencies(u.spectral()):
            newer_registry = u.get_current_unit_registry()
            assert (set(just_to_from_units(newer_registry.equivalencies)) ==
                    set(tf_dimensionless_angles) | set(tf_spectral))
            assert (set(newer_registry.all_units) ==
                    set(base_registry.all_units))

    assert base_registry is u.get_current_unit_registry()
Beispiel #6
0
    def test_convert_back(self, unit_from, unit_to,convention,ref_unit):
        if unit_from in ('cms','cm/s','centimeter/second') or unit_to in ('cms','cm/s','centimeter/second'):
            xvals = np.linspace(1000,10000,3)
        else:
            xvals = np.linspace(1,10,3)
        assert not np.any(xvals==0.0)
        # all conversions include a * or / by speedoflight_ms
        threshold = np.spacing(units.speedoflight_ms) * 100
        if 'megameter' in unit_from or 'Mm' in unit_from:
            threshold *= 10
        if 'centimeter' in unit_from or 'cm' in unit_from:
            threshold *= 10

        unit_to = u.Unit(unit_to)
        unit_from = u.Unit(unit_from)
        # come up with a sane reference value
        if unit_from.physical_type in ('frequency','length'):
            refX = u.Quantity(5, unit_from).to(ref_unit, u.spectral())
        elif unit_to.physical_type in ('frequency','length'):
            refX = u.Quantity(5, unit_to).to(ref_unit, u.spectral())
        else:
            refX = u.Quantity(5, ref_unit)
        xarr = units.SpectroscopicAxis(np.copy(xvals), unit=unit_from, refX=refX,
                                       velocity_convention=convention)
        xarr.convert_to_unit(unit_to, velocity_convention=convention,
                             make_dxarr=False)
        xarr.convert_to_unit(unit_from, velocity_convention=convention,
                             make_dxarr=False)
        infinites = np.isinf(xarr.value)
        assert all(np.abs((xarr.value - xvals)/xvals)[~infinites] < threshold)
        assert xarr.unit == unit_from
Beispiel #7
0
    def pixel_range(self, waverange, **kwargs):
        """Calculate the number of pixels within the given wavelength
        range and ``self.binwave``.

        Parameters
        ----------
        waverange : tuple of float or `astropy.units.quantity.Quantity`
            Lower and upper limits of the desired wavelength range.
            If not a Quantity, assumed to be in the same unit as
            ``self.binwave``.

        kwargs : dict
            Keywords accepted by :func:`synphot.binning.pixel_range`.

        Returns
        -------
        npix : number
            Number of pixels.

        Raises
        ------
        synphot.exceptions.UndefinedBinset
            Missing binned data.

        """
        self._set_data(True)  # Use binned data

        w1 = units.validate_quantity(
            waverange[0], self._wave.unit, equivalencies=u.spectral())
        w2 = units.validate_quantity(
            waverange[-1], self._wave.unit, equivalencies=u.spectral())

        return binning.pixel_range(
            self._wave.value, (w1.value, w2.value), **kwargs)
Beispiel #8
0
def test_equivalent_units2():
    units = set(u.Hz.find_equivalent_units(u.spectral()))
    match = set(
        [u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr,
         u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
         u.jupiterRad])
    assert units == match

    from astropy.units import imperial
    with u.add_enabled_units(imperial):
        units = set(u.Hz.find_equivalent_units(u.spectral()))
        match = set(
            [u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry,
             imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur,
             imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi,
             imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci,
             imperial.nmi, u.k, u.earthRad, u.jupiterRad])
        assert units == match

    units = set(u.Hz.find_equivalent_units(u.spectral()))
    match = set(
        [u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr,
         u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
         u.jupiterRad])
    assert units == match
Beispiel #9
0
def parse_spectrum_list2dict(spectrum_list):
    """
    Parse the spectrum list [start, stop, num] to a list
    """
    if 'start' in spectrum_list and 'stop' in spectrum_list \
            and 'num' in spectrum_list:
        spectrum_list = [spectrum_list['start'], spectrum_list['stop'],
                         spectrum_list['num']]
    if spectrum_list[0].unit.physical_type != 'length' and \
                    spectrum_list[1].unit.physical_type != 'length':
        raise ValueError('start and end of spectrum need to be a length')


    spectrum_config_dict = {}
    spectrum_config_dict['start'] = spectrum_list[0]
    spectrum_config_dict['end'] = spectrum_list[1]
    spectrum_config_dict['bins'] = spectrum_list[2]

    spectrum_frequency = quantity_linspace(
        spectrum_config_dict['end'].to('Hz', u.spectral()),
        spectrum_config_dict['start'].to('Hz', u.spectral()),
        num=spectrum_config_dict['bins'] + 1)

    spectrum_config_dict['frequency'] = spectrum_frequency

    return spectrum_config_dict
Beispiel #10
0
    def from_config(cls, config):
        """
        Create a new MontecarloRunner instance from a Configuration object.

        Parameters
        ----------
        config : tardis.io.config_reader.Configuration

        Returns
        -------
        MontecarloRunner

        """
        if config.plasma.disable_electron_scattering:
            logger.warn('Disabling electron scattering - this is not physical')
            sigma_thomson = 1e-200 / (u.cm ** 2)
        else:
            logger.debug("Electron scattering switched on")
            sigma_thomson = 6.652486e-25 / (u.cm ** 2)

        spectrum_frequency = quantity_linspace(
            config.spectrum.stop.to('Hz', u.spectral()),
            config.spectrum.start.to('Hz', u.spectral()),
            num=config.spectrum.num + 1)

        return cls(seed=config.montecarlo.seed,
                   spectrum_frequency=spectrum_frequency,
                   virtual_spectrum_range=config.montecarlo.virtual_spectrum_range,
                   sigma_thomson=sigma_thomson,
                   enable_reflective_inner_boundary=config.montecarlo.enable_reflective_inner_boundary,
                   inner_boundary_albedo=config.montecarlo.inner_boundary_albedo,
                   line_interaction_type=config.plasma.line_interaction_type,
                   distance=config.supernova.get('distance', None))
Beispiel #11
0
def kappa(nu, nu0=271.1*u.GHz, kappa0=0.0114*u.cm**2*u.g**-1, beta=1.75):
    """
    Compute the opacity $\kappa$ given a reference frequency (or wavelength)
    and a power law governing the opacity as a fuction of frequency:

    $$ \kappa = \kappa_0 \left(\\frac{\\nu}{\\nu_0}\\right)^{\\beta} $$

    The default kappa=0.0114 at 271.1 GHz comes from extrapolating the
    Ossenkopf & Henning 1994 opacities for the thin-ice-mantle, 10^6 year model
    anchored at 1.0 mm with an assumed beta of 1.75.

    Parameters
    ----------
    nu: astropy.Quantity [u.spectral() equivalent]
        The frequency at which to evaluate kappa
    nu0: astropy.Quantity [u.spectral() equivalent]
        The reference frequency at which $\kappa$ is defined
    kappa0: astropy.Quantity [cm^2/g]
        The dust opacity per gram of H2 along the line of sight.  Because of
        the H2 conversion, this factor implicitly includes a dust to gas ratio
        (usually assumed 100)
    beta: float
        The power-law index governing kappa as a function of nu
    """
    return (kappa0*(nu.to(u.GHz,u.spectral())/nu0.to(u.GHz,u.spectral()))**(beta)).to(u.cm**2/u.g)
Beispiel #12
0
    def to_hdf5_group(self, group, name):

        self.check_all_set()

        # Get spectral coordinate in Hz and transmision in fractional terms
        nu = self.spectral_coord.to(u.Hz, equivalencies=u.spectral()).value
        tr = self.transmission.to(u.one).value

        # Sort in order of increasing Hz
        order = np.argsort(nu)
        nu = nu[order]
        tr = tr[order]

        # Get other parameters for the normalization
        nu0 = self.central_spectral_coord.to(u.Hz, equivalencies=u.spectral()).value
        alpha = self.alpha
        beta = self._beta

        # Here we normalize the filter before passing it to Hyperion
        tr_norm = tr / nu ** (1 + beta) / nu0 ** alpha / integrate(nu, tr / nu ** (1.0 + alpha + beta))

        # Now multiply by nu so that Hyperion returns nu * Fnu
        tr_norm *= nu

        dset = group.create_dataset(
            name, data=np.array(list(zip(nu, tr, tr_norm)), dtype=[("nu", float), ("tr", float), ("tn", float)])
        )

        dset.attrs["name"] = np.string_(self.name)

        dset.attrs["alpha"] = self.alpha
        dset.attrs["beta"] = self._beta
        dset.attrs["nu0"] = self.central_spectral_coord.to(u.Hz, equivalencies=u.spectral()).value
Beispiel #13
0
    def __readCDMS(self):
        
        '''
        Read data from CDMS line list catalogs for a specific molecule.
        
        '''
        
        data = DataIO.readFile(self.fn,\
                               replace_spaces=0)
        print 'Reading data from CDMS database for'
        print self.fn
        
        #-- If the uncertainties are negative, change the unit of min/max to 
        #   cm-1
        uncertainties = [float(line[13:21]) for line in data]
        if min(uncertainties) < 0 and max(uncertainties) == 0:
            self.x_min = self.x_min.to(1./u.cm,equivalencies=u.spectral())
            self.x_max = self.x_max.to(1./u.cm,equivalencies=u.spectral())
        elif min(uncertainties) < 0 and max(uncertainties) > 0:
            raise ValueError('Uncertainties in CDMS input file for ' + \
                             'file %s are ambiguous.'\
                             %self.fn)

        data = self.__parseCatalog(data)

        #-- If unit was changed, change the f values to MHz, the default unit
        rcm = u.Unit("1 / cm")
        if self.x_min.unit == rcm:
            data = sorted([[(entry*rcm).to(u.MHz,equivalencies=u.spectral()) 
                                if not i else entry
                            for i,entry in enumerate(line)] 
                           for line in data])
        self.line_list = data
Beispiel #14
0
 def _xunit_is_equivalent(self,unit):
     from .datasource import UtilsUnits as Utils
     from astropy.units import spectral
     assert not Utils.is_dimensionless(self.xunit)
     _a = Utils.are_equivalents(unit, self._xunit_kind, spectral())
     _b = Utils.are_equivalents(unit, self.xunit,       spectral())
     return _a and _b
Beispiel #15
0
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
    spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
    spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())

    spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
    spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)

    return spectrum_start_wavelength, spectrum_end_wavelength
def cdelt_derivative(crval, cdelt, intype, outtype, linear=False, rest=None):
    if intype == outtype:
        return cdelt
    elif set((outtype, intype)) == set(("length", "frequency")):
        # Symmetric equations!
        return (-constants.c / crval ** 2 * cdelt).to(PHYS_UNIT_DICT[outtype])
    elif outtype in ("frequency", "length") and intype == "speed":
        if linear:
            numer = cdelt * rest.to(PHYS_UNIT_DICT[outtype], u.spectral())
            denom = constants.c
        else:
            numer = cdelt * constants.c * rest.to(PHYS_UNIT_DICT[outtype], u.spectral())
            denom = (constants.c + crval) * (constants.c ** 2 - crval ** 2) ** 0.5
        if outtype == "frequency":
            return (-numer / denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
        else:
            return (numer / denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
    elif outtype == "speed" and intype in ("frequency", "length"):

        if linear:
            numer = cdelt * constants.c
            denom = rest.to(PHYS_UNIT_DICT[intype], u.spectral())
        else:
            numer = 4 * constants.c * crval * rest.to(crval.unit, u.spectral()) ** 2 * cdelt
            denom = (crval ** 2 + rest.to(crval.unit, u.spectral()) ** 2) ** 2
        if intype == "frequency":
            return (-numer / denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
        else:
            return (numer / denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
    elif intype == "air wavelength":
        raise TypeError("Air wavelength should be converted to vacuum earlier.")
    elif outtype == "air wavelength":
        raise TypeError("Conversion to air wavelength not supported.")
    else:
        raise ValueError("Invalid in/out frames")
Beispiel #17
0
def cdelt_derivative(crval, cdelt, intype, outtype, linear=False, rest=None):
    if intype == outtype:
        return cdelt
    elif set((outtype,intype)) == set(('length','frequency')):
        # Symmetric equations!
        return (-constants.c / crval**2 * cdelt).to(PHYS_UNIT_DICT[outtype])
    elif outtype in ('frequency','length') and intype == 'speed':
        if linear:
            numer = cdelt * rest.to(PHYS_UNIT_DICT[outtype], u.spectral())
            denom = constants.c
        else:
            numer = cdelt * constants.c * rest.to(PHYS_UNIT_DICT[outtype], u.spectral())
            denom = (constants.c + crval)*(constants.c**2 - crval**2)**0.5
        if outtype == 'frequency':
            return (-numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
        else:
            return (numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
    elif outtype == 'speed' and intype in ('frequency','length'):

        if linear:
            numer = cdelt * constants.c
            denom = rest.to(PHYS_UNIT_DICT[intype], u.spectral())
        else:
            numer = 4 * constants.c * crval * rest.to(crval.unit, u.spectral())**2 * cdelt
            denom = (crval**2 + rest.to(crval.unit, u.spectral())**2)**2
        if intype == 'frequency':
            return (-numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
        else:
            return (numer/denom).to(PHYS_UNIT_DICT[outtype], u.spectral())
    else:
        raise ValueError("Invalid in/out frames")
Beispiel #18
0
def bbfunc(wavelengths, temperature):
    """Planck law for blackbody radiation in PHOTLAM per steradian.

    .. warning::

        Data points where overflow or underflow occurs will be set
        to zeroes.

    Parameters
    ----------
    wavelengths : array_like or `~astropy.units.quantity.Quantity`
        Wavelength values. If not a Quantity, assumed to be in Angstrom.

    temperature : float or `~astropy.units.quantity.Quantity`
        Blackbody temperature. If not a Quantity, assumed to be in Kelvin.

    Returns
    -------
    fluxes : `~astropy.units.quantity.Quantity`
        Blackbody radiation in PHOTLAM per steradian.

    """
    # Silence Numpy
    old_np_err_cfg = np.seterr(all='ignore')

    # Calculations must use Angstrom
    wavelengths = units.validate_quantity(
        wavelengths, u.AA, equivalencies=u.spectral()).astype(np.float64)

    # Calculations must use Kelvin
    temperature = units.validate_quantity(temperature, u.K).astype(np.float64)

    x = wavelengths * temperature

    # Catch division by zero
    mask = x > 0
    x = np.where(mask, units.HC / (const.k_B.cgs * x), 0.0)

    # Catch overflow/underflow
    mask = (x >= _VERY_SMALL) & (x < _VERY_LARGE)
    factor = np.where(mask, 1.0 / np.expm1(x), 0.0)

    # Convert FNU to PHOTLAM
    freq = u.Quantity(np.where(
        factor, wavelengths.to(u.Hz, equivalencies=u.spectral()), 0.0), u.Hz)
    bb_nu = 2.0 * const.h * factor * freq * freq * freq / const.c ** 2
    bb_lam = np.where(
        factor, units.FNU.to(units.PHOTLAM, bb_nu.cgs.value,
                             equivalencies=u.spectral_density(wavelengths)),
        0.0)

    # Restore Numpy settings
    dummy = np.seterr(**old_np_err_cfg)

    return u.Quantity(bb_lam, unit=units.PHOTLAM/u.sr)
Beispiel #19
0
def _(attr, results):
    return set(
        it for it in results
        if
        it.wave.wavemax is not None
        and
        attr.min <= it.wave.wavemax.to(u.angstrom, equivalencies=u.spectral())
        and
        it.wave.wavemin is not None
        and
        attr.max >= it.wave.wavemin.to(u.angstrom, equivalencies=u.spectral())
    )
Beispiel #20
0
 def shift_rv(self, rv):
     '''Shift spectrum by rv
     
     Parameters
     ----------
     rv : :class:`~astropy.quantity.Quantity`
         radial velocity (positive value will red-shift the spectrum, negative
         value will blue-shift the spectrum)
     '''
     self[self.dispersion] = (self.disp.to(u.m, equivalencies=u.spectral()) * (
             1.*u.dimensionless_unscaled+rv/const.c)).to(
             self.disp.unit, equivalencies=u.spectral()).value
    def test_broadband(self, tmpdir):

        np.random.seed(12345)

        from ..filter import Filter

        f1_wav = np.linspace(5., 1., 100) * u.micron

        f1 = Filter()
        f1.name = 'alice'
        f1.central_wavelength = 7. * u.micron
        f1.nu = f1_wav.to(u.Hz, equivalencies=u.spectral())
        f1.response = np.random.random(100)
        f1.normalize()

        f2_wav = np.linspace(15., 10., 100) * u.micron

        f2 = Filter()
        f2.name = 'bob'
        f2.central_wavelength = 12. * u.micron
        f2.nu = f2_wav.to(u.Hz, equivalencies=u.spectral())
        f2.response = np.random.random(100)
        f2.normalize()

        f3_wav = np.linspace(25., 15., 100) * u.micron

        f3 = Filter()
        f3.name = 'eve'
        f3.central_wavelength = 20. * u.micron
        f3.nu = f3_wav.to(u.Hz, equivalencies=u.spectral())
        f3.response = np.random.random(100)
        f3.normalize()

        from ..convolve import convolve_model_dir

        convolve_model_dir(self.tmpdir, filters=[f1, f2, f3])

        from ..fit import fit

        data_file = tmpdir.join('data').strpath
        open(data_file, 'w').write(DATA.strip())

        output_file = tmpdir.join('output').strpath

        fit(data_file, ['bob', 'alice', 'eve'], [1., 3., 3.] * u.arcsec, self.tmpdir, output_file,
            extinction_law=self.extinction,
            distance_range=[1., 2.] * u.kpc,
            av_range=[0., 0.1],
            output_format=('F', 3.),
            output_convolved=False)

        self._postprocess(output_file, tmpdir)
 def nu_bins(self):
     """frequency grid for the opacity evaluation"""
     if self._nu_bins is None:
         nu_max = self.lam_min.to("Hz", equivalencies=units.spectral())
         nu_min = self.lam_max.to("Hz", equivalencies=units.spectral())
         if self.bin_scaling == "log":
             nu_bins = (np.logspace(np.log10(nu_min.value),
                                    np.log10(nu_max.value), self.nbins+1) *
                        units.Hz)
         elif self.bin_scaling == "linear":
             nu_bins = np.linspace(nu_min, nu_max, self.nbins+1)
         self._nu_bins = nu_bins
     return self._nu_bins
Beispiel #23
0
def write_input(temperature=10, column=1e12, collider_densities={'H2':1},
        bw=0.01, tbg=2.73, species='co', velocity_gradient=1.0, minfreq=1,
        maxfreq=10, delete_tempfile=True):
    """
    Write radex.inp file parameters

    Parameters
    ----------
    temperature : float
        Kinetic temperature (K)
    collider_densities : dict
        Collider names and their number densities
    column : float
        column density of the molecule
    species : str
        Name of the molecule (specifically, the prefix for the file name, e.g.
        for "co.dat", species='co').  Case sensitive!
    tbg : float
        Temperature of the background radiation (e.g. CMB)
    velocity_gradient : float
        Velocity gradient per pc in km/s
    """

    if hasattr(minfreq, 'unit'):
        minfreq = unitless(minfreq.to('GHz',u.spectral()))
    if hasattr(maxfreq, 'unit'):
        maxfreq = unitless(maxfreq.to('GHz',u.spectral()))

    infile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
    outfile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
    infile.write(species+'.dat\n')
    infile.write(outfile.name+'\n')
    infile.write(str(minfreq)+' '+str(maxfreq)+'\n')
    infile.write(str(temperature)+'\n')

    # RADEX doesn't allow densities < 1e-3
    for k in collider_densities.keys():
        if collider_densities[k] < 1e-3:
            collider_densities.pop(k)

    infile.write('%s\n' % len(collider_densities))
    for name,dens in collider_densities.iteritems():
        infile.write('%s\n' % name)
        infile.write(str(dens)+'\n')
    infile.write(str(tbg)+'\n')
    infile.write(str(column)+'\n')
    infile.write(str(velocity_gradient)+'\n')
    # end the input file
    infile.write('0\n')
    infile.flush()
    return infile,outfile
Beispiel #24
0
def parse_supernova_section(supernova_dict):
    """
    Parse the supernova section

    Parameters
    ----------

    supernova_dict: dict
        YAML parsed supernova dict

    Returns
    -------

    config_dict: dict

    """
    config_dict = {}

    # parse luminosity
    luminosity_value, luminosity_unit = supernova_dict["luminosity_requested"].strip().split()

    if luminosity_unit == "log_lsun":
        config_dict["luminosity_requested"] = (
            10 ** (float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
        )
    else:
        config_dict["luminosity_requested"] = (float(luminosity_value) * u.Unit(luminosity_unit)).to("erg/s")

    config_dict["time_explosion"] = parse_quantity(supernova_dict["time_explosion"]).to("s")

    if "distance" in supernova_dict:
        config_dict["distance"] = parse_quantity(supernova_dict["distance"])
    else:
        config_dict["distance"] = None

    if "luminosity_wavelength_start" in supernova_dict:
        config_dict["luminosity_nu_end"] = parse_quantity(supernova_dict["luminosity_wavelength_start"]).to(
            "Hz", u.spectral()
        )
    else:
        config_dict["luminosity_nu_end"] = np.inf * u.Hz

    if "luminosity_wavelength_end" in supernova_dict:
        config_dict["luminosity_nu_start"] = parse_quantity(supernova_dict["luminosity_wavelength_end"]).to(
            "Hz", u.spectral()
        )
    else:
        config_dict["luminosity_nu_start"] = 0.0 * u.Hz

    return config_dict
Beispiel #25
0
def test_spectral4(in_val, in_unit):
    """Wave number conversion w.r.t. wavelength, freq, and energy."""
    # Spectroscopic and angular
    out_units = [u.micron ** -1, u.radian / u.micron]
    answers = [[1e+5, 2.0, 1.0], [6.28318531e+05, 12.5663706, 6.28318531]]

    for out_unit, ans in zip(out_units, answers):
        # Forward
        a = in_unit.to(out_unit, in_val, u.spectral())
        assert_allclose(a, ans)

        # Backward
        b = out_unit.to(in_unit, ans, u.spectral())
        assert_allclose(b, in_val)
    def test_input_units_equivalencies(self):

        self.model._input_units = {'a': u.micron}

        with pytest.raises(UnitsError) as exc:
            self.model(3 * u.PHz, 3)
        assert exc.value.args[0] == ("MyTestModel: Units of input 'a', PHz (frequency), could "
                                     "not be converted to required input units of "
                                     "micron (length)")

        self.model.input_units_equivalencies = {'a': u.spectral()}

        assert_quantity_allclose(self.model(3 * u.PHz, 3),
                                3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
Beispiel #27
0
    def _get_range_from_textfields(self, min_text, max_text, linelist_units, plot_units):
        amin = amax = None
        if min_text.hasAcceptableInput() and max_text.hasAcceptableInput():

            amin = float(min_text.text())
            amax = float(max_text.text())

            amin = Quantity(amin, plot_units)
            amax = Quantity(amax, plot_units)

            amin = amin.to(linelist_units, equivalencies=u.spectral())
            amax = amax.to(linelist_units, equivalencies=u.spectral())

        return (amin, amax)
Beispiel #28
0
    def convert_ha(self):

        """
        This function ...
        :return:
        """

        # What I thought the unit conversion should be:

        # Conversion from erg / [s * cm2] (per pixel2) to erg / [s * cm2 * micron] (per pixel2) --> divide by eff. bandwidth
        #self.conversion_factor *= 1.0 / self.image.filter.effective_bandwidth()

        # Conversion from erg / [s * cm2 * micron] (per pixel2) to erg / [s * cm2 * Hz] (per pixel2)
        #self.conversion_factor *= self.spectral_factor_micron_to_hz(self.image.wavelength)

        # What Ilse says it should be:

        # Get the frequency of Ha
        frequency = self.image.wavelength.to("Hz", equivalencies=spectral())

        # Conversion from erg / [s * cm2] (per pixel2) to erg / [s * cm2 * Hz] (per pixel2)
        self.conversion_factor *= 1.0 / frequency.value

        # Conversion from erg / [s * cm2 * Hz] per pixel2 to the target unit (MJy / sr)
        self.convert_from_ergscmhz()

        # Correct for contribution of NII (Bendo et. al 2015) NII/Halpha = 0.55 for M81 => divide by a factor of 1.55
        self.conversion_factor *= 1.0 / 1.55
    def test_with_spectral_unit(self, name, masktype):
        cube, data = cube_and_raw(name + '.fits')
        cube_freq = cube.with_spectral_unit(u.Hz)

        if masktype == BooleanArrayMask:
            mask = BooleanArrayMask(data>0, wcs=cube._wcs)
        elif masktype == LazyMask:
            mask = LazyMask(lambda x: x>0, cube=cube)
        elif masktype == FunctionMask:
            mask = FunctionMask(lambda x: x>0)
        elif masktype == CompositeMask:
            mask1 = FunctionMask(lambda x: x>0)
            mask2 = LazyMask(lambda x: x>0, cube)
            mask = CompositeMask(mask1, mask2)

        cube2 = cube.with_mask(mask)
        cube_masked_freq = cube2.with_spectral_unit(u.Hz)

        assert cube_freq._wcs.wcs.ctype[cube_freq._wcs.wcs.spec] == 'FREQ-W2F'
        assert cube_masked_freq._wcs.wcs.ctype[cube_masked_freq._wcs.wcs.spec] == 'FREQ-W2F'
        assert cube_masked_freq._mask._wcs.wcs.ctype[cube_masked_freq._mask._wcs.wcs.spec] == 'FREQ-W2F'

        # values taken from header
        rest = 1.42040571841E+09*u.Hz
        crval = -3.21214698632E+05*u.m/u.s
        outcv = crval.to(u.m, u.doppler_optical(rest)).to(u.Hz, u.spectral())

        assert_allclose(cube_freq._wcs.wcs.crval[cube_freq._wcs.wcs.spec],
                        outcv.to(u.Hz).value)
        assert_allclose(cube_masked_freq._wcs.wcs.crval[cube_masked_freq._wcs.wcs.spec],
                        outcv.to(u.Hz).value)
        assert_allclose(cube_masked_freq._mask._wcs.wcs.crval[cube_masked_freq._mask._wcs.wcs.spec],
                        outcv.to(u.Hz).value)
    def calculate_spectrum(self):

        if self.tardis_config.sn_distance is None:
            logger.info('Distance to supernova not selected assuming 10 pc for calculation of spectra')
            distance = units.Quantity(10, 'pc').to('cm').value
        else:
            distance = self.tardis_config.sn_distance
        self.spec_flux_nu = np.histogram(self.montecarlo_nu[self.montecarlo_nu > 0],
                                         weights=self.montecarlo_energies[self.montecarlo_energies > 0],
                                         bins=self.spec_nu_bins)[0]

        flux_scale = (self.time_of_simulation * (self.spec_nu[1] - self.spec_nu[0]) * (4 * np.pi * distance ** 2))

        self.spec_flux_nu /= flux_scale

        self.spec_virtual_flux_nu /= flux_scale

        self.spec_reabsorbed_nu = \
            np.histogram(self.montecarlo_nu[self.montecarlo_nu < 0],
                         weights=self.montecarlo_energies[self.montecarlo_nu < 0], bins=self.spec_nu_bins)[0]
        self.spec_reabsorbed_nu /= flux_scale

        self.spec_angstrom = units.Unit('Hz').to('angstrom', self.spec_nu, units.spectral())

        self.spec_flux_angstrom = (self.spec_flux_nu * self.spec_nu ** 2 / constants.c.cgs.value / 1e8)
        self.spec_reabsorbed_angstrom = (self.spec_reabsorbed_nu * self.spec_nu ** 2 / constants.c.cgs.value / 1e8)
        self.spec_virtual_flux_angstrom = (self.spec_virtual_flux_nu * self.spec_nu ** 2 / constants.c.cgs.value / 1e8)
Beispiel #31
0
    def __getitem__(self, item):
        """
        Override the class indexer. We do this here because there are two cases
        for slicing on a ``Spectrum1D``:

            1.) When the flux is one dimensional, indexing represents a single
                flux value at a particular spectral axis bin, and returns a new
                ``Spectrum1D`` where all attributes are sliced.
            2.) When flux is multi-dimensional (i.e. several fluxes over the
                same spectral axis), indexing returns a new ``Spectrum1D`` with
                the sliced flux range and a deep copy of all other attributes.

        The first case is handled by the parent class, while the second is
        handled here.
        """

        if self.flux.ndim > 1 or (type(item) == tuple and item[0] == Ellipsis):
            if type(item) == tuple:
                if len(item) == len(self.flux.shape) or item[0] == Ellipsis:
                    spec_item = item[-1]
                    if not isinstance(spec_item, slice):
                        if isinstance(item, u.Quantity):
                            raise ValueError(
                                "Indexing on single spectral axis "
                                "values is not currently allowed, "
                                "please use a slice.")
                        spec_item = slice(spec_item, spec_item + 1, None)
                        item = item[:-1] + (spec_item, )
                else:
                    # Slicing on less than the full number of axes means we want
                    # to keep the whole spectral axis
                    spec_item = slice(None, None, None)
            elif isinstance(item,
                            slice) and (isinstance(item.start, u.Quantity)
                                        or isinstance(item.stop, u.Quantity)):
                # We only allow slicing with world coordinates along the spectral
                # axis for now
                for attr in ("start", "stop"):
                    if getattr(item, attr) is None:
                        continue
                    if not getattr(item, attr).unit.is_equivalent(
                            u.AA, equivalencies=u.spectral()):
                        raise ValueError(
                            "Slicing with world coordinates is only"
                            " enabled for spectral coordinates.")
                        break
                spec_item = item
            else:
                # Slicing with a single integer or slice uses the leading axis,
                # so we keep the whole spectral axis, which is last
                spec_item = slice(None, None, None)

            if (isinstance(spec_item.start, u.Quantity)
                    or isinstance(spec_item.stop, u.Quantity)):
                temp_spec = self._spectral_slice(spec_item)
                if spec_item is item:
                    return temp_spec
                else:
                    # Drop the spectral axis slice and perform only the spatial part
                    return temp_spec[item[:-1]]

            if "original_wcs" not in self.meta:
                new_meta = deepcopy(self.meta)
                new_meta["original_wcs"] = deepcopy(self.wcs)
            else:
                new_meta = deepcopy(self.meta)

            return self._copy(
                flux=self.flux[item],
                spectral_axis=self.spectral_axis[spec_item],
                uncertainty=self.uncertainty[item]
                if self.uncertainty is not None else None,
                mask=self.mask[item] if self.mask is not None else None,
                meta=new_meta,
                wcs=None)

        if not isinstance(item, slice):
            if isinstance(item, u.Quantity):
                raise ValueError(
                    "Indexing on a single spectral axis value is not"
                    " currently allowed, please use a slice.")
            # Handle tuple slice as input by NDCube crop method
            elif isinstance(item, tuple):
                if len(item) == 1 and isinstance(item[0], slice):
                    item = item[0]
                else:
                    raise ValueError(f"Unclear how to slice with tuple {item}")
            else:
                item = slice(item, item + 1, None)
        elif (isinstance(item.start, u.Quantity)
              or isinstance(item.stop, u.Quantity)):
            return self._spectral_slice(item)

        tmp_spec = super().__getitem__(item)

        # TODO: this is a workaround until we figure out how to deal with non-
        #  strictly ascending spectral axes. Currently, the wcs is created from
        #  a spectral axis array by converting to a length physical type. On
        #  a regular slicing operation, the wcs is handed back to the
        #  initializer and a new spectral axis is created. This would then also
        #  be in length units, which may not be the units used initially. So,
        #  we create a new ``Spectrum1D`` that includes the sliced spectral
        #  axis. This means that a new wcs object will be created with the
        #  appropriate unit translation handling.
        if "original_wcs" not in self.meta:
            new_meta = deepcopy(self.meta)
            new_meta["original_wcs"] = deepcopy(self.wcs)
        else:
            new_meta = deepcopy(self.meta)

        return tmp_spec._copy(spectral_axis=self.spectral_axis[item],
                              wcs=None,
                              meta=new_meta)
Beispiel #32
0
    def __init__(self,
                 flux=None,
                 spectral_axis=None,
                 wcs=None,
                 velocity_convention=None,
                 rest_value=None,
                 redshift=None,
                 radial_velocity=None,
                 bin_specification=None,
                 **kwargs):
        # Check for pre-defined entries in the kwargs dictionary.
        unknown_kwargs = set(kwargs).difference({
            'data', 'unit', 'uncertainty', 'meta', 'mask', 'copy',
            'extra_coords'
        })

        if len(unknown_kwargs) > 0:
            raise ValueError("Initializer contains unknown arguments(s): {}."
                             "".format(', '.join(map(str, unknown_kwargs))))

        # If the flux (data) argument is already a Spectrum1D (as it would
        # be for internal arithmetic operations), avoid setup entirely.
        if isinstance(flux, Spectrum1D):
            super().__init__(flux)
            return

        # Handle initializing from NDCube objects
        elif isinstance(flux, NDCube):
            if flux.unit is None:
                raise ValueError("Input NDCube missing unit parameter")

            # Change the flux array from bare ndarray to a Quantity
            q_flux = flux.data << u.Unit(flux.unit)

            self.__init__(flux=q_flux,
                          wcs=flux.wcs,
                          mask=flux.mask,
                          uncertainty=flux.uncertainty)
            return

        # If the mask kwarg is not passed to the constructor, but the flux array
        # contains NaNs, add the NaN locations to the mask.
        if "mask" not in kwargs and flux is not None:
            nan_mask = np.isnan(flux)
            if nan_mask.any():
                if hasattr(self, "mask"):
                    kwargs["mask"] = np.logical_or(nan_mask, self.mask)
                else:
                    kwargs["mask"] = nan_mask.copy()
            del nan_mask

        # Ensure that the flux argument is an astropy quantity
        if flux is not None:
            if not isinstance(flux, u.Quantity):
                raise ValueError("Flux must be a `Quantity` object.")
            elif flux.isscalar:
                flux = u.Quantity([flux])

        # Ensure that only one or neither of these parameters is set
        if redshift is not None and radial_velocity is not None:
            raise ValueError("Cannot set both radial_velocity and redshift at "
                             "the same time.")

        # In cases of slicing, new objects will be initialized with `data`
        # instead of ``flux``. Ensure we grab the `data` argument.
        if flux is None and 'data' in kwargs:
            flux = kwargs.pop('data')

        # Ensure that the unit information codified in the quantity object is
        # the One True Unit.
        kwargs.setdefault(
            'unit',
            flux.unit if isinstance(flux, u.Quantity) else kwargs.get('unit'))

        # In the case where the arithmetic operation is being performed with
        # a single float, int, or array object, just go ahead and ignore wcs
        # requirements
        if (not isinstance(flux, u.Quantity) or isinstance(flux, float)
                or isinstance(flux, int)) and np.ndim(flux) == 0:

            super(Spectrum1D, self).__init__(data=flux, wcs=wcs, **kwargs)
            return

        if rest_value is None:
            if hasattr(wcs, 'rest_frequency') and wcs.rest_frequency != 0:
                rest_value = wcs.rest_frequency * u.Hz
            elif hasattr(wcs, 'rest_wavelength') and wcs.rest_wavelength != 0:
                rest_value = wcs.rest_wavelength * u.AA
            elif hasattr(wcs, 'wcs') and hasattr(
                    wcs.wcs, 'restfrq') and wcs.wcs.restfrq > 0:
                rest_value = wcs.wcs.restfrq * u.Hz
            elif hasattr(wcs, 'wcs') and hasattr(
                    wcs.wcs, 'restwav') and wcs.wcs.restwav > 0:
                rest_value = wcs.wcs.restwav * u.m
            else:
                rest_value = None
        else:
            if not isinstance(rest_value, u.Quantity):
                warnings.warn(
                    "No unit information provided with rest value. "
                    f"Assuming units of spectral axis ('{spectral_axis.unit}')."
                )
                rest_value = u.Quantity(rest_value, spectral_axis.unit)
            elif not rest_value.unit.is_equivalent(u.AA,
                                                   equivalencies=u.spectral()):
                raise u.UnitsError("Rest value must be "
                                   "energy/wavelength/frequency equivalent.")

        # If flux and spectral axis are both specified, check that their lengths
        # match or are off by one (implying the spectral axis stores bin edges)
        if flux is not None and spectral_axis is not None:
            if spectral_axis.shape[0] == flux.shape[-1]:
                if bin_specification == "edges":
                    raise ValueError(
                        "A spectral axis input as bin edges"
                        "must have length one greater than the flux axis")
                bin_specification = "centers"
            elif spectral_axis.shape[0] == flux.shape[-1] + 1:
                if bin_specification == "centers":
                    raise ValueError(
                        "A spectral axis input as bin centers"
                        "must be the same length as the flux axis")
                bin_specification = "edges"
            else:
                raise ValueError(
                    "Spectral axis length ({}) must be the same size or one "
                    "greater (if specifying bin edges) than that of the last "
                    "flux axis ({})".format(spectral_axis.shape[0],
                                            flux.shape[-1]))

        # If a WCS is provided, check that the spectral axis is last and reorder
        # the arrays if not
        if wcs is not None and hasattr(wcs, "naxis"):
            if wcs.naxis > 1:
                temp_axes = []
                phys_axes = wcs.world_axis_physical_types
                for i in range(len(phys_axes)):
                    if phys_axes[i] is None:
                        continue
                    if phys_axes[i][0:2] == "em" or phys_axes[i][
                            0:5] == "spect":
                        temp_axes.append(i)
                if len(temp_axes) != 1:
                    raise ValueError(
                        "Input WCS must have exactly one axis with "
                        "spectral units, found {}".format(len(temp_axes)))

                # Due to FITS conventions, a WCS with spectral axis first corresponds
                # to a flux array with spectral axis last.
                if temp_axes[0] != 0:
                    warnings.warn(
                        "Input WCS indicates that the spectral axis is not"
                        " last. Reshaping arrays to put spectral axis last.")
                    wcs = wcs.swapaxes(0, temp_axes[0])
                    if flux is not None:
                        flux = np.swapaxes(flux,
                                           len(flux.shape) - temp_axes[0] - 1,
                                           -1)
                    if "mask" in kwargs:
                        if kwargs["mask"] is not None:
                            kwargs["mask"] = np.swapaxes(
                                kwargs["mask"],
                                len(kwargs["mask"].shape) - temp_axes[0] - 1,
                                -1)
                    if "uncertainty" in kwargs:
                        if kwargs["uncertainty"] is not None:
                            if isinstance(kwargs["uncertainty"],
                                          NDUncertainty):
                                # Account for Astropy uncertainty types
                                unc_len = len(
                                    kwargs["uncertainty"].array.shape)
                                temp_unc = np.swapaxes(
                                    kwargs["uncertainty"].array,
                                    unc_len - temp_axes[0] - 1, -1)
                                if kwargs["uncertainty"].unit is not None:
                                    temp_unc = temp_unc * u.Unit(
                                        kwargs["uncertainty"].unit)
                                kwargs["uncertainty"] = type(
                                    kwargs["uncertainty"])(temp_unc)
                            else:
                                kwargs["uncertainty"] = np.swapaxes(
                                    kwargs["uncertainty"],
                                    len(kwargs["uncertainty"].shape) -
                                    temp_axes[0] - 1, -1)

        # Attempt to parse the spectral axis. If none is given, try instead to
        # parse a given wcs. This is put into a GWCS object to
        # then be used behind-the-scenes for all specutils operations.
        if spectral_axis is not None:
            # Ensure that the spectral axis is an astropy Quantity
            if not isinstance(spectral_axis, u.Quantity):
                raise ValueError("Spectral axis must be a `Quantity` or "
                                 "`SpectralAxis` object.")

            # If spectral axis is provided as an astropy Quantity, convert it
            # to a specutils SpectralAxis object.
            if not isinstance(spectral_axis, SpectralAxis):
                if spectral_axis.shape[0] == flux.shape[-1] + 1:
                    bin_specification = "edges"
                else:
                    bin_specification = "centers"
                self._spectral_axis = SpectralAxis(
                    spectral_axis,
                    redshift=redshift,
                    radial_velocity=radial_velocity,
                    doppler_rest=rest_value,
                    doppler_convention=velocity_convention,
                    bin_specification=bin_specification)
            # If a SpectralAxis object is provided, we assume it doesn't need
            # information from other keywords added
            else:
                for a in [radial_velocity, redshift]:
                    if a is not None:
                        raise ValueError("Cannot separately set redshift or "
                                         "radial_velocity if a SpectralAxis "
                                         "object is input to spectral_axis")

                self._spectral_axis = spectral_axis

            if wcs is None:
                wcs = gwcs_from_array(self._spectral_axis)

        elif wcs is None:
            # If no spectral axis or wcs information is provided, initialize
            # with an empty gwcs based on the flux.
            size = flux.shape[-1] if not flux.isscalar else 1
            wcs = gwcs_from_array(np.arange(size) * u.Unit(""))

        super().__init__(
            data=flux.value if isinstance(flux, u.Quantity) else flux,
            wcs=wcs,
            **kwargs)

        # If no spectral_axis was provided, create a SpectralCoord based on
        # the WCS
        if spectral_axis is None:
            # If the WCS doesn't have a spectral attribute, we assume it's the
            # dummy GWCS we created or a solely spectral WCS
            if hasattr(self.wcs, "spectral"):
                # Handle generated 1D WCS that aren't set to spectral
                if not self.wcs.is_spectral and self.wcs.naxis == 1:
                    spec_axis = self.wcs.pixel_to_world(
                        np.arange(self.flux.shape[-1]))
                else:
                    spec_axis = self.wcs.spectral.pixel_to_world(
                        np.arange(self.flux.shape[-1]))
            else:
                spec_axis = self.wcs.pixel_to_world(
                    np.arange(self.flux.shape[-1]))

            try:
                if spec_axis.unit.is_equivalent(u.one):
                    spec_axis = spec_axis * u.pixel
            except AttributeError:
                raise AttributeError(f"spec_axis does not have unit: "
                                     f"{type(spec_axis)} {spec_axis}")

            self._spectral_axis = SpectralAxis(
                spec_axis,
                redshift=redshift,
                radial_velocity=radial_velocity,
                doppler_rest=rest_value,
                doppler_convention=velocity_convention)

        if hasattr(self, 'uncertainty') and self.uncertainty is not None:
            if not flux.shape == self.uncertainty.array.shape:
                raise ValueError(
                    "Flux axis ({}) and uncertainty ({}) shapes must be the "
                    "same.".format(flux.shape, self.uncertainty.array.shape))
Beispiel #33
0
 def wavelength(self):
     """
     The `spectral_axis` as a `~astropy.units.Quantity` in units of Angstroms
     """
     return self.spectral_axis.to(u.AA, u.spectral())
Beispiel #34
0
    def from_config(cls,
                    config,
                    packet_source=None,
                    virtual_packet_logging=False,
                    **kwargs):
        """
        Create a new Simulation instance from a Configuration object.

        Parameters
        ----------
        config : tardis.io.config_reader.Configuration

        **kwargs
            Allow overriding some structures, such as model, plasma, atomic data
            and the runner, instead of creating them from the configuration
            object.

        Returns
        -------
        Simulation
        """
        # Allow overriding some config structures. This is useful in some
        # unit tests, and could be extended in all the from_config classmethods.
        if "model" in kwargs:
            model = kwargs["model"]
        else:
            if hasattr(config, "csvy_model"):
                model = Radial1DModel.from_csvy(config)
            else:
                model = Radial1DModel.from_config(config)
        if "plasma" in kwargs:
            plasma = kwargs["plasma"]
        else:
            plasma = assemble_plasma(config,
                                     model,
                                     atom_data=kwargs.get("atom_data", None))
        if "runner" in kwargs:
            if packet_source is not None:
                raise ConfigurationError(
                    "Cannot specify packet_source and runner at the same time."
                )
            runner = kwargs["runner"]
        else:
            runner = MontecarloRunner.from_config(
                config,
                packet_source=packet_source,
                virtual_packet_logging=virtual_packet_logging,
            )

        luminosity_nu_start = config.supernova.luminosity_wavelength_end.to(
            u.Hz, u.spectral())

        if u.isclose(config.supernova.luminosity_wavelength_start,
                     0 * u.angstrom):
            luminosity_nu_end = np.inf * u.Hz
        else:
            luminosity_nu_end = (
                const.c / config.supernova.luminosity_wavelength_start).to(
                    u.Hz)

        last_no_of_packets = config.montecarlo.last_no_of_packets
        if last_no_of_packets is None or last_no_of_packets < 0:
            last_no_of_packets = config.montecarlo.no_of_packets
        last_no_of_packets = int(last_no_of_packets)

        return cls(
            iterations=config.montecarlo.iterations,
            model=model,
            plasma=plasma,
            runner=runner,
            no_of_packets=int(config.montecarlo.no_of_packets),
            no_of_virtual_packets=int(config.montecarlo.no_of_virtual_packets),
            luminosity_nu_start=luminosity_nu_start,
            luminosity_nu_end=luminosity_nu_end,
            last_no_of_packets=last_no_of_packets,
            luminosity_requested=config.supernova.luminosity_requested.cgs,
            convergence_strategy=config.montecarlo.convergence_strategy,
            nthreads=config.montecarlo.nthreads,
        )
Beispiel #35
0

def test_validate_unit_exceptions():
    """Test that unit validation raises appropriate exceptions."""
    with pytest.raises(exceptions.SynphotError):
        x = units.validate_unit(10)
    with pytest.raises(ValueError):
        x = units.validate_unit('foo')
    with pytest.raises(exceptions.SynphotError):
        x = units.validate_wave_unit('Kelvin')


@pytest.mark.parametrize(
    ('in_val', 'out_u', 'eqv', 'ans'),
    [(100.0, units.AREA, [], 100.0), (100.0 * units.AREA, u.m * u.m, [], 0.01),
     (_wave_angstrom, u.micron**-1, u.spectral(), _wavenum_micron.value)])
def test_validate_quantity(in_val, out_u, eqv, ans):
    """Test quantity validation."""
    result = units.validate_quantity(in_val, out_u, equivalencies=eqv)
    np.testing.assert_allclose(result.value, ans)
    assert result.unit == out_u


@pytest.mark.parametrize(('in_q', 'out_u', 'ans'),
                         [(_wave_angstrom, u.Hz, _freq),
                          (_freq, u.AA, _wave_angstrom),
                          (_wave_angstrom, u.micron**-1, _wavenum_micron),
                          (_wavenum_micron, u.AA, _wave_angstrom),
                          (_freq, u.micron**-1, _wavenum_micron),
                          (_wavenum_micron, u.Hz, _freq)])
def test_wave_conversion(in_q, out_u, ans):
Beispiel #36
0
                if ppcat['Fpeak6cm_Becker'].unit is None:
                    ppcat['Fpeak6cm_Becker'].unit = u.mJy
                if ppcat['Fpeak6cm_MAGPIS'].unit is None:
                    ppcat['Fpeak6cm_MAGPIS'].unit = u.Jy
                for key in (70, 160, 250, 350, 500):
                    if ppcat[f'Fpeak{key}um'].unit is None:
                        ppcat[f'Fpeak{key}um'].unit = u.Jy / u.sr

                ppcat['x_cen'].unit = u.deg
                ppcat['y_cen'].unit = u.deg

                ppcat['3mm20cmindex_THOR'] = np.log(
                    ppcat['MUSTANG_10as_peak'] /
                    (ppcat['Fpeak20cm_THOR'])) / np.log(
                        constants.mustang_central_frequency /
                        (20 * u.cm).to(u.GHz, u.spectral()))
                ppcat['3mm20cmindex'] = np.log(
                    ppcat['MUSTANG_10as_peak'] /
                    (ppcat['Fpeak20cm'])) / np.log(
                        constants.mustang_central_frequency /
                        (20 * u.cm).to(u.GHz, u.spectral()))
                ppcat['3mm1mmindex'] = np.log(
                    ppcat['MUSTANG_10as_peak'] /
                    (ppcat['Fint1100um_40as'] * 1.46)) / np.log(
                        constants.mustang_central_frequency / (271.1 * u.GHz))
                ppcat['3mm6cmindex_MAGPIS'] = np.log(
                    ppcat['MUSTANG_10as_peak'] /
                    (ppcat['Fpeak6cm_MAGPIS'].quantity.to(u.Jy).value)
                ) / np.log(constants.mustang_central_frequency / (5 * u.GHz))
                ppcat['3mm6cmindex_CORNISH'] = np.log(
                    ppcat['MUSTANG_10as_peak'] /
Beispiel #37
0
def main():

    # setup and parse the command line
    parser = initialize_parser()
    args = parser.parse_args()

    # read in the observed spectrum
    # assumed to be astropy table compatibile and include units
    specfile = args.spectrumfile
    outputname = specfile.split(".")[0]
    if not os.path.isfile(specfile):
        pack_path = pkg_resources.resource_filename("pahfit", "data/")
        test_specfile = "{}/{}".format(pack_path, specfile)
        if os.path.isfile(test_specfile):
            specfile = test_specfile
        else:
            raise ValueError(
                "Input spectrumfile {} not found".format(specfile))

    # get the table format (from extension of filename)
    tformat = specfile.split(".")[-1]
    if tformat == "ecsv":
        tformat = "ascii.ecsv"
    obs_spectrum = Table.read(specfile, format=tformat)
    obs_x = obs_spectrum["wavelength"].to(u.micron, equivalencies=u.spectral())
    obs_y = obs_spectrum["flux"].to(u.Jy,
                                    equivalencies=u.spectral_density(obs_x))
    obs_unc = obs_spectrum["sigma"].to(u.Jy,
                                       equivalencies=u.spectral_density(obs_x))

    # strip units as the observed spectrum is in the internal units
    obs_x = obs_x.value
    obs_y = obs_y.value
    weights = 1.0 / obs_unc.value

    # read in the pack file
    packfile = args.packfile
    if not os.path.isfile(packfile):
        pack_path = pkg_resources.resource_filename("pahfit", "packs/")
        test_packfile = "{}/{}".format(pack_path, packfile)
        if os.path.isfile(test_packfile):
            packfile = test_packfile
        else:
            raise ValueError("Input packfile {} not found".format(packfile))

    pmodel = PAHFITBase(obs_x,
                        obs_y,
                        estimate_start=args.estimate_start,
                        filename=packfile)

    # pick the fitter
    fit = LevMarLSQFitter()

    # fit
    obs_fit = fit(
        pmodel.model,
        obs_x,
        obs_y,
        weights=weights,
        maxiter=200,
        epsilon=1e-10,
        acc=1e-10,
    )
    print(fit.fit_info["message"])

    # save results to fits file
    pmodel.save(obs_fit, outputname, args.saveoutput)

    # plot result
    fontsize = 18
    font = {"size": fontsize}
    mpl.rc("font", **font)
    mpl.rc("lines", linewidth=2)
    mpl.rc("axes", linewidth=2)
    mpl.rc("xtick.major", size=5, width=1)
    mpl.rc("ytick.major", size=5, width=1)
    mpl.rc("xtick.minor", size=3, width=1)
    mpl.rc("ytick.minor", size=3, width=1)

    fig, axs = plt.subplots(ncols=1,
                            nrows=2,
                            figsize=(15, 10),
                            gridspec_kw={'height_ratios': [3, 1]},
                            sharex=True)

    pmodel.plot(axs,
                obs_x,
                obs_y,
                obs_unc.value,
                obs_fit,
                scalefac_resid=args.scalefac_resid)

    # use the whitespace better
    fig.subplots_adjust(hspace=0)

    # show
    if args.showplot:
        plt.show()
    # save (always)
    fig.savefig("{}.{}".format(outputname, args.savefig))
Beispiel #38
0
    def evaluate(self, x, temperature, scale):
        """Evaluate the model.

        Parameters
        ----------
        x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
            Frequency at which to compute the blackbody. If no units are given,
            this defaults to Hz.

        temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
            Temperature of the blackbody. If no units are given, this defaults
            to Kelvin.

        scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
            Desired scale for the blackbody.

        Returns
        -------
        y : number or ndarray
            Blackbody spectrum. The units are determined from the units of
            ``scale``.

        .. note::

            Use `numpy.errstate` to suppress Numpy warnings, if desired.

        .. warning::

            Output values might contain ``nan`` and ``inf``.

        Raises
        ------
        ValueError
            Invalid temperature.

        ZeroDivisionError
            Wavelength is zero (when converting to frequency).
        """
        if not isinstance(temperature, u.Quantity):
            in_temp = u.Quantity(temperature, u.K)
        else:
            in_temp = temperature

        # Convert to units for calculations, also force double precision
        with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
            freq = u.Quantity(x, u.Hz, dtype=np.float64)
            temp = u.Quantity(in_temp, u.K)

        # check the units of scale and setup the output units
        bb_unit = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)  # default unit
        # use the scale that was used at initialization for determining the units to return
        # to support returning the right units when fitting where units are stripped
        if hasattr(self.scale, "unit") and self.scale.unit is not None:
            # check that the units on scale are covertable to surface brightness units
            if not self.scale.unit.is_equivalent(bb_unit, u.spectral_density(x)):
                raise ValueError(
                    f"scale units not surface brightness: {self.scale.unit}"
                )
            # use the scale passed to get the value for scaling
            if hasattr(scale, "unit"):
                mult_scale = scale.value
            else:
                mult_scale = scale
            bb_unit = self.scale.unit
        else:
            mult_scale = scale

        # Check if input values are physically possible
        if np.any(temp < 0):
            raise ValueError(f"Temperature should be positive: {temp}")
        if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
            warnings.warn(
                "Input contains invalid wavelength/frequency value(s)",
                AstropyUserWarning,
            )

        log_boltz = const.h * freq / (const.k_B * temp)
        boltzm1 = np.expm1(log_boltz)

        # Calculate blackbody flux
        bb_nu = 2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1) / u.sr

        y = mult_scale * bb_nu.to(bb_unit, u.spectral_density(freq))

        # If the temperature parameter has no unit, we should return a unitless
        # value. This occurs for instance during fitting, since we drop the
        # units temporarily.
        if hasattr(temperature, "unit"):
            return y
        else:
            return y.value
Beispiel #39
0
    def _new_spectral_wcs(self, unit, velocity_convention=None,
                          rest_value=None):
        """
        Returns a new WCS with a different Spectral Axis unit

        Parameters
        ----------
        unit : :class:`~astropy.units.Unit`
            Any valid spectral unit: velocity, (wave)length, or frequency.
            Only vacuum units are supported.
        velocity_convention : 'relativistic', 'radio', or 'optical'
            The velocity convention to use for the output velocity axis.
            Required if the output type is velocity. This can be either one
            of the above strings, or an `astropy.units` equivalency.
        rest_value : :class:`~astropy.units.Quantity`
            A rest wavelength or frequency with appropriate units.  Required if
            output type is velocity.  The cube's WCS should include this
            already if the *input* type is velocity, but the WCS's rest
            wavelength/frequency can be overridden with this parameter.

            .. note: This must be the rest frequency/wavelength *in vacuum*,
                     even if your cube has air wavelength units

        """
        from .spectral_axis import (convert_spectral_axis,
                                    determine_ctype_from_vconv)

        # Allow string specification of units, for example
        if not isinstance(unit, u.Unit):
            unit = u.Unit(unit)

        # Velocity conventions: required for frq <-> velo
        # convert_spectral_axis will handle the case of no velocity
        # convention specified & one is required
        if velocity_convention in DOPPLER_CONVENTIONS:
            velocity_convention = DOPPLER_CONVENTIONS[velocity_convention]
        elif (velocity_convention is not None and
              velocity_convention not in DOPPLER_CONVENTIONS.values()):
            raise ValueError("Velocity convention must be radio, optical, "
                             "or relativistic.")

        # If rest value is specified, it must be a quantity
        if (rest_value is not None and
            (not hasattr(rest_value, 'unit') or
             not rest_value.unit.is_equivalent(u.m, u.spectral()))):
            raise ValueError("Rest value must be specified as an astropy "
                             "quantity with spectral equivalence.")

        # Shorter versions to keep lines under 80
        ctype_from_vconv = determine_ctype_from_vconv

        meta = self._meta.copy()
        if 'Original Unit' not in self._meta:
            meta['Original Unit'] = self._wcs.wcs.cunit[self._wcs.wcs.spec]
            meta['Original Type'] = self._wcs.wcs.ctype[self._wcs.wcs.spec]

        out_ctype = ctype_from_vconv(self._wcs.wcs.ctype[self._wcs.wcs.spec],
                                     unit,
                                     velocity_convention=velocity_convention)

        newwcs = convert_spectral_axis(self._wcs, unit, out_ctype,
                                       rest_value=rest_value)

        newwcs.wcs.set()
        return newwcs, meta
Beispiel #40
0
class BlackBody(Fittable1DModel):
    """
    Blackbody model using the Planck function.

    Parameters
    ----------
    temperature : :class:`~astropy.units.Quantity`
        Blackbody temperature.

    scale : float or :class:`~astropy.units.Quantity`
        Scale factor

    Notes
    -----

    Model formula:

        .. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}

    Examples
    --------
    >>> from astropy.modeling import models
    >>> from astropy import units as u
    >>> bb = models.BlackBody(temperature=5000*u.K)
    >>> bb(6000 * u.AA)  # doctest: +FLOAT_CMP
    <Quantity 1.53254685e-05 erg / (cm2 Hz s sr)>

    .. plot::
        :include-source:

        import numpy as np
        import matplotlib.pyplot as plt

        from astropy.modeling.models import BlackBody
        from astropy import units as u
        from astropy.visualization import quantity_support

        bb = BlackBody(temperature=5778*u.K)
        wav = np.arange(1000, 110000) * u.AA
        flux = bb(wav)

        with quantity_support():
            plt.figure()
            plt.semilogx(wav, flux)
            plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
            plt.show()
    """

    # We parametrize this model with a temperature and a scale.
    temperature = Parameter(default=5000.0, min=0, unit=u.K)
    scale = Parameter(default=1.0, min=0)

    # We allow values without units to be passed when evaluating the model, and
    # in this case the input x values are assumed to be frequencies in Hz.
    _input_units_allow_dimensionless = True

    # We enable the spectral equivalency by default for the spectral axis
    input_units_equivalencies = {"x": u.spectral()}

    def evaluate(self, x, temperature, scale):
        """Evaluate the model.

        Parameters
        ----------
        x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
            Frequency at which to compute the blackbody. If no units are given,
            this defaults to Hz.

        temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
            Temperature of the blackbody. If no units are given, this defaults
            to Kelvin.

        scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
            Desired scale for the blackbody.

        Returns
        -------
        y : number or ndarray
            Blackbody spectrum. The units are determined from the units of
            ``scale``.

        .. note::

            Use `numpy.errstate` to suppress Numpy warnings, if desired.

        .. warning::

            Output values might contain ``nan`` and ``inf``.

        Raises
        ------
        ValueError
            Invalid temperature.

        ZeroDivisionError
            Wavelength is zero (when converting to frequency).
        """
        if not isinstance(temperature, u.Quantity):
            in_temp = u.Quantity(temperature, u.K)
        else:
            in_temp = temperature

        # Convert to units for calculations, also force double precision
        with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
            freq = u.Quantity(x, u.Hz, dtype=np.float64)
            temp = u.Quantity(in_temp, u.K)

        # check the units of scale and setup the output units
        bb_unit = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)  # default unit
        # use the scale that was used at initialization for determining the units to return
        # to support returning the right units when fitting where units are stripped
        if hasattr(self.scale, "unit") and self.scale.unit is not None:
            # check that the units on scale are covertable to surface brightness units
            if not self.scale.unit.is_equivalent(bb_unit, u.spectral_density(x)):
                raise ValueError(
                    f"scale units not surface brightness: {self.scale.unit}"
                )
            # use the scale passed to get the value for scaling
            if hasattr(scale, "unit"):
                mult_scale = scale.value
            else:
                mult_scale = scale
            bb_unit = self.scale.unit
        else:
            mult_scale = scale

        # Check if input values are physically possible
        if np.any(temp < 0):
            raise ValueError(f"Temperature should be positive: {temp}")
        if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
            warnings.warn(
                "Input contains invalid wavelength/frequency value(s)",
                AstropyUserWarning,
            )

        log_boltz = const.h * freq / (const.k_B * temp)
        boltzm1 = np.expm1(log_boltz)

        # Calculate blackbody flux
        bb_nu = 2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1) / u.sr

        y = mult_scale * bb_nu.to(bb_unit, u.spectral_density(freq))

        # If the temperature parameter has no unit, we should return a unitless
        # value. This occurs for instance during fitting, since we drop the
        # units temporarily.
        if hasattr(temperature, "unit"):
            return y
        else:
            return y.value

    @property
    def input_units(self):
        # The input units are those of the 'x' value, which should always be
        # Hz. Because we do this, and because input_units_allow_dimensionless
        # is set to True, dimensionless values are assumed to be in Hz.
        return {"x": u.Hz}

    def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
        return {"temperature": u.K}

    @property
    def bolometric_flux(self):
        """Bolometric flux."""
        # bolometric flux in the native units of the planck function
        native_bolflux = (
            self.scale.value * const.sigma_sb * self.temperature ** 4 / np.pi
        )
        # return in more "astro" units
        return native_bolflux.to(u.erg / (u.cm ** 2 * u.s))

    @property
    def lambda_max(self):
        """Peak wavelength when the curve is expressed as power density."""
        return const.b_wien / self.temperature

    @property
    def nu_max(self):
        """Peak frequency when the curve is expressed as power density."""
        return 2.8214391 * const.k_B * self.temperature / const.h
Beispiel #41
0
def ncrit(lamda_tables,
          transition_upper,
          transition_lower,
          temperature,
          OPR=3,
          partners=['H2', 'OH2', 'PH2']):
    """
    Compute the critical density for a transition given its temperature.

    The critical density is defined as the Einstein A value divided by the sum
    of the collision rates into the state minus the collision rates out of that
    state.  See Shirley et al 2015, eqn 4
    (http://esoads.eso.org/cgi-bin/bib_query?arXiv:1501.01629)

    Parameters
    ----------
    lamda_tables : list
        The list of LAMDA tables returned from a Lamda.query operation.
        Should be [ collision_rates_dict, Avals/Freqs, Energy Levels ]
    transition_upper : int
        The upper transition number as indexed in the lamda catalog
    transition_lower: int
        The lower transition number as indexed in the lamda catalog
    temperature : float
        Kinetic temperature in Kelvin.  Will be interpolated as appropriate.
        Extrapolation uses nearest value
    OPR : float
        ortho/para ratio of h2 if para/ortho h2 are included as colliders
    partners : list
        A list of valid partners.  It probably does not make sense to include
        both electrons and H2 because they'll have different densities.

    Returns
    -------
    ncrit : astropy.units.Quantity
        A quantity with units cm^-3
    """

    fortho = (OPR) / (OPR - 1)

    # exclude partners that are explicitly excluded
    crates = {
        coll: val
        for coll, val in lamda_tables[0].items() if coll in partners
    }
    avals = lamda_tables[1]
    enlevs = lamda_tables[2]

    aval = avals[(avals['Upper'] == transition_upper)
                 & (avals['Lower'] == transition_lower)]['EinsteinA'][0]

    temperature_re = re.compile(r"C_ij\(T=([0-9]*)\)")
    crate_temperatures = np.array([
        int(temperature_re.search(cn).groups()[0])
        for cn in crates[list(crates.keys())[0]].keys()
        if temperature_re.search(cn)
    ])

    if temperature < crate_temperatures.min():
        crates_ji_all = {
            coll: cr['C_ij(T={0})'.format(crate_temperatures.min())]
            for coll, cr in crates.items()
        }
    elif temperature > crate_temperatures.max():
        crates_ji_all = {
            coll: cr['C_ij(T={0})'.format(crate_temperatures.max())]
            for coll, cr in crates.items()
        }
    elif temperature in crate_temperatures:
        crates_ji_all = {
            coll: cr['C_ij(T={0})'.format(temperature)]
            for coll, cr in crates.items()
        }
    else:  # interpolate
        nearest = np.argmin(np.abs(temperature - crate_temperatures))
        if crate_temperatures[nearest] < temperature:
            low, high = (crate_temperatures[nearest],
                         crate_temperatures[nearest + 1])
        else:
            low, high = (crate_temperatures[nearest - 1],
                         crate_temperatures[nearest])
        crates_ji_all = {
            coll:
            (cr['C_ij(T={0})'.format(high)] - cr['C_ij(T={0})'.format(low)]) *
            (temperature - low) / (high - low) + cr['C_ij(T={0})'.format(low)]
            for coll, cr in crates.items()
        }

    transition_indices_ji = {
        coll: np.nonzero(cr['Upper'] == transition_upper)[0]
        for coll, cr in crates.items()
    }

    crates_ji = {
        coll: crates_ji_all[coll][transition_indices_ji[coll]]
        for coll in crates
    }

    # i > j: collisions from higher levels
    transition_indices_ij = {
        coll: np.nonzero(cr['Lower'] == transition_upper)[0]
        for coll, cr in crates.items()
    }
    crates_ij = {}
    for coll in crates.keys():
        crates_ind = crates[coll][transition_indices_ij[coll]]
        degeneracies_i = enlevs['Weight'][crates_ind['Upper'] - 1]
        degeneracies_j = enlevs['Weight'][crates_ind['Lower'] - 1]
        energy_i = enlevs['Energy'][crates_ind['Upper'] - 1] * u.cm**-1
        energy_j = enlevs['Energy'][crates_ind['Lower'] - 1] * u.cm**-1
        # Shirley 2015 eqn 4:
        crates_ij[coll] = (crates_ji_all[coll][transition_indices_ij[coll]] *
                           degeneracies_i / degeneracies_j.astype('float') *
                           np.exp(
                               (-energy_i - energy_j).to(u.erg, u.spectral()) /
                               (constants.k_B * temperature * u.K)))

    crates_tot_percollider = {
        coll:
        (np.sum(crates_ij[coll]) + np.sum(crates_ji[coll])) * u.cm**3 / u.s
        for coll in crates
    }

    if 'OH2' in crates:
        crates_tot = (fortho * crates_tot_percollider['OH2'] +
                      (1 - fortho) * crates_tot_percollider['PH2'])
    elif 'PH2' in crates:
        crates_tot = crates_tot_percollider['PH2']
    elif 'H2' in crates:
        crates_tot = crates_tot_percollider['H2']

    return ((aval * u.s**-1) / crates_tot).to(u.cm**-3)
Beispiel #42
0
import astropy.units as u

# Can't find the vptable by default. So save it and feed it in manually
# vp.setpbimage(telescope='NOEMA')

# Create a Gaussian pb model b/c casa doesn't have a NOEMA/PdBI beam model?
fwhm = (((230.538 * u.GHz).to(u.m, u.spectral()) / (15 * u.m)) * u.rad).to(
    u.deg).value
vp.setpbgauss(telescope='NOEMA',
              dopb=True,
              halfwidth=fwhm,
              maxrad=4 * fwhm,
              reffreq='230.538GHz')

vp.saveastable('noema_pb.tab')

tclean(
    vis='meas_sets/M33-ARM05.ms',
    datacolumn='data',
    imagename="imaging/M33-ARM05_tclean_dirty",
    field='M33*',
    imsize=[1024, 700],
    cell='0.2arcsec',
    specmode='cube',
    start=1,
    width=1,
    nchan=-1,
    startmodel=None,
    gridder='mosaic',
    weighting='natural',
    niter=0,
Beispiel #43
0
    def _get_url_for_timerange(self, timerange, **kwargs):
        """
        Returns urls to the SUVI data for the specified time range.

        Parameters
        ----------
        timerange: `sunpy.time.TimeRange`
            Time range for which data is to be downloaded.
        level : `str`, optional
            The level of the data. Possible values are 1b and 2 (default).
        wavelength : `astropy.units.Quantity` or `tuple`, optional
            Wavelength band. If not given, all wavelengths are returned.
        satellitenumber : `int`, optional
            GOES satellite number. Must be >= 16. Default is 16.
        """
        base_url = "https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes{goes_number}/"
        supported_waves = [94, 131, 171, 195, 284, 304]
        supported_levels = ("2", "1b")

        # these are optional requirements so if not provided assume defaults
        # if wavelength is not provided assuming all of them
        if "wavelength" in kwargs.keys():
            wavelength_input = kwargs.get("wavelength")
            if isinstance(wavelength_input, u.Quantity):  # not a range
                if int(wavelength_input.to_value('Angstrom')) not in supported_waves:
                    raise ValueError(f"Wavelength {kwargs.get('wavelength')} not supported.")
                else:
                    wavelength = [kwargs.get("wavelength")]
            else:  # Range was provided
                compress_index = [wavelength_input.wavemin <= this_wave <=
                                  wavelength_input.wavemax for this_wave in (supported_waves * u.Angstrom)]
                if not any(compress_index):
                    raise ValueError(
                        f"Wavelength {wavelength_input} not supported.")
                else:
                    wavelength = list(compress(supported_waves, compress_index)) * u.Angstrom
        else:  # no wavelength provided return all of them
            wavelength = supported_waves * u.Angstrom
        # check that the input wavelength can be converted to angstrom
        waves = [int(this_wave.to_value('angstrom', equivalencies=u.spectral()))
                 for this_wave in wavelength]
        # use the given satellite number or choose the best one
        satellitenumber = int(kwargs.get(
            "satellitenumber", self._get_goes_sat_num(timerange.start)))
        if satellitenumber < 16:
            raise ValueError(f"Satellite number {satellitenumber} not supported.")
        # default to the highest level of data
        level = str(kwargs.get("level", "2"))  # make string in case the input is a number

        if level not in supported_levels:
            raise ValueError(f"Level {level} is not supported.")

        results = []
        for this_wave in waves:
            if level == "2":
                search_pattern = base_url + \
                    'l{level}/data/suvi-l{level}-ci{wave:03}/%Y/%m/%d/dr_suvi-l{level}-ci{wave:03}_g{goes_number}_s%Y%m%dT%H%M%SZ_.*\.fits'
            elif level == "1b":
                if this_wave in [131, 171, 195, 284]:
                    search_pattern = base_url + \
                        'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
                elif this_wave == 304:
                    search_pattern = base_url + \
                        'l{level}/suvi-l{level}-he{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-He{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'
                elif this_wave == 94:
                    search_pattern = base_url + \
                        'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\.fits.gz'

            if search_pattern.count('wave_minus1'):
                scraper = Scraper(search_pattern, level=level, wave=this_wave,
                                  goes_number=satellitenumber, wave_minus1=this_wave-1)
            else:
                scraper = Scraper(search_pattern, level=level, wave=this_wave,
                                  goes_number=satellitenumber)
            results.extend(scraper.filelist(timerange))
        return results
Beispiel #44
0
def write_fits_image(dataset,
                     image,
                     image_parameters,
                     filename,
                     channel,
                     beam=None,
                     bunit='Jy/beam',
                     extra_fits_headers=None):
    """Write an image to a FITS file.

    Parameters
    ----------
    dataset : :class:`katsdpimager.loader_core.LoaderBase`
        Source dataset (used to set metadata such as phase centre)
    image : :class:`numpy.ndarray`
        Image data in Jy/beam, indexed by polarization, m, l. For
        a 2M x 2N image, the phase centre is at coordinates (M, N).
    image_parameters : :class:`katsdpimager.parameters.ImageParameters`
        Metadata associated with the image
    filename : str
        File to write. It is silently overwritten if already present.
    channel : int
        Channel number to substitute into `filename` with printf formatting.
    beam : :class:`katsdpimager.beam.Beam`, optional
        Synthesized beam model to write to the header
    bunit : str, optional
        Value for the ``BUNIT`` header in the file. It can be explicitly set
        to ``None`` to avoid writing this key.
    extra_fits_headers : mapping, optional
        Extra headers to add to the FITS headers present.

    Returns
    -------
    image
        Image data that is written to the file (in the order expected by
        :mod:`astropy.io.fits`).
    header
        FITS headers.

    Raises
    ------
    ValueError
        If the set of `polarizations` cannot be represented as a linear
        transform in the FITS header.
    """
    header = fits.Header()
    if bunit is not None:
        header['BUNIT'] = bunit
    header['ORIGIN'] = 'katsdpimager'
    header['HISTORY'] = f'Created by katsdpimager {katsdpimager.__version__}'
    header['TIMESYS'] = 'UTC'
    header['DATE'] = Time.now().utc.isot

    # Transformation from pixel coordinates to intermediate world coordinates,
    # which are taken to be l, m coordinates. The reference point is currently
    # taken to be the centre of the image (actually half a pixel beyond the
    # centre, because of the way fftshift works).  Note that astropy.io.fits
    # reverses the axis order. The X coordinate is computed differently
    # because the X axis is flipped to allow RA to increase right-to-left.
    header['CRPIX1'] = image.shape[2] * 0.5
    header['CRPIX2'] = image.shape[1] * 0.5 + 1.0
    header['CRPIX4'] = 1.0
    # FITS uses degrees; and RA increases right-to-left
    delt = np.arcsin(image_parameters.pixel_size).to(units.deg).value
    header['CDELT1'] = -delt
    header['CDELT2'] = delt
    header['CDELT4'] = 1.0

    # Transformation from intermediate world coordinates to world
    # coordinates (celestial coordinates in this case).
    # TODO: get equinox from input
    phase_centre = dataset.phase_centre()
    header['EQUINOX'] = 2000.0
    header['RADESYS'] = 'FK5'  # Julian equinox
    header['CUNIT1'] = 'deg'
    header['CUNIT2'] = 'deg'
    header['CUNIT4'] = 'Hz'
    header['CTYPE1'] = 'RA---SIN'
    header['CTYPE2'] = 'DEC--SIN'
    header['CTYPE4'] = 'FREQ    '
    header['CRVAL1'] = phase_centre[0].to(units.deg).value
    header['CRVAL2'] = phase_centre[1].to(units.deg).value
    header['CRVAL4'] = image_parameters.wavelength.to(
        units.Hz, equivalencies=units.spectral()).value
    if beam is not None:
        major = beam.major * image_parameters.pixel_size * units.rad
        minor = beam.minor * image_parameters.pixel_size * units.rad
        header['BMAJ'] = major.to(units.deg).value
        header['BMIN'] = minor.to(units.deg).value
        header['BPA'] = beam.theta.to(units.deg).value
    _fits_polarizations(header, 3, image_parameters.fixed.polarizations)
    # This is basically np.nanmin and np.nanmax, but the implementations of
    # those take a slow, safe path if the array is a subclass of ndarray. In
    # our case it is but the fast path still works so we use it directly.
    datamin = float(np.fmin.reduce(image, axis=None))
    datamax = float(np.fmax.reduce(image, axis=None))
    if not np.isnan(datamin):
        header['DATAMIN'] = datamin
        header['DATAMAX'] = datamax

    header.update(dataset.extra_fits_headers())
    if extra_fits_headers is not None:
        header.update(extra_fits_headers)

    # l axis is reversed, because RA increases right-to-left.
    # The np.newaxis adds an axis for frequency. While it's not required for
    # the FITS file to be valid (it's legal for the WCS transformations to
    # reference additional virtual axes), aplpy 1.1.1 doesn't handle it
    # (https://github.com/aplpy/aplpy/issues/350).
    image = image[np.newaxis, :, :, ::-1]

    # Explicitly converting to big-endian has two advantages:
    # 1. It returns a contiguous array, which allows for a much faster path
    #    through writeto.
    # 2. If the image is little-endian, then astropy.io.fits will convert to
    #    big endian, write, and convert back again.
    # The disadvantage is an increase in memory usage.
    #
    image_be = np.require(image, image.dtype.newbyteorder('>'), 'C')
    hdu = fits.PrimaryHDU(image_be, header)
    hdu.writeto(filename, overwrite=True)
    return image, header
Beispiel #45
0
class Conf():

    # acceptable field names for DataClass
    fieldnames_info = [
        # General
        {
            'description': 'Target Identifier',
            'fieldnames': ['targetname', 'id', 'Object'],
            'provenance': ['orbit', 'ephem', 'obs', 'phys'],
            'dimension': None
        },
        {
            'description': 'Target Designation',
            'fieldnames': ['desig', 'designation'],
            'provenance': ['orbit', 'ephem', 'obs', 'phys'],
            'dimension': None
        },
        {
            'description': 'Target Number',
            'fieldnames': ['number'],
            'provenance': ['orbit', 'ephem', 'obs', 'phys'],
            'dimension': None
        },
        {
            'description': 'Target Name',
            'fieldnames': ['name'],
            'provenance': ['orbit', 'ephem', 'obs', 'phys'],
            'dimension': None
        },
        {
            'description': 'Epoch',
            'fieldnames':
            ['epoch', 'datetime', 'Date', 'date', 'Time', 'time'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.time_object
        },

        # Orbital Elements
        {
            'description': 'Semi-Major Axis',
            'fieldnames': ['a', 'sma'],
            'provenance': ['orbit'],
            'dimension': dimensions.length
        },
        {
            'description': 'Eccentricity',
            'fieldnames': ['e', 'ecc'],
            'provenance': ['orbit'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Inclination',
            'fieldnames': ['i', 'inc', 'incl'],
            'provenance': ['orbit'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Perihelion Distance',
            'fieldnames': ['q', 'periheldist'],
            'provenance': ['orbit'],
            'dimension': dimensions.length
        },
        {
            'description': 'Aphelion Distance',
            'fieldnames': ['Q', 'apheldist'],
            'provenance': ['orbit'],
            'dimension': dimensions.length
        },
        {
            'description': 'Longitude of the Ascending Node',
            'fieldnames': ['Omega', 'longnode', 'node'],
            'provenance': ['orbit'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Argument of the Periapsis',
            'fieldnames': ['w', 'argper'],
            'provenance': ['orbit'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Mean Anomaly',
            'fieldnames': ['M', 'mean_anom'],
            'provenance': ['orbit'],
            'dimension': dimensions.angle
        },
        {
            'description': 'True Anomaly',
            'fieldnames': ['v', 'true_anom', 'true_anomaly'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Arc Length',
            'fieldnames': ['arc', 'arc_length'],
            'provenance': ['orbit', 'ephem'],
            'dimension': dimensions.time
        },
        {
            'description': 'Delta-v',
            'fieldnames': ['delta_v', 'delta-v'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Mercury',
            'fieldnames': ['moid_mercury'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Earth',
            'fieldnames': ['moid_earth'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Venus',
            'fieldnames': ['moid_venus'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Mars',
            'fieldnames': ['moid_mars'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Jupiter',
            'fieldnames': ['moid_jupiter'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Saturn',
            'fieldnames': ['moid_saturn'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Uranus',
            'fieldnames': ['moid_uranus'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Minimum Orbit Intersection Distance wrt Neptune',
            'fieldnames': ['moid_neptune'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Tisserand Parameter wrt Jupiter',
            'fieldnames': ['Tj', 'tj'],
            'provenance': ['orbit', 'phys'],
            'dimension': None
        },
        {
            'description': 'MPC Orbit Type',
            'fieldnames': ['mpc_orb_type'],
            'provenance': ['orbit', 'phys'],
            'dimension': None
        },
        {
            'description': 'Epoch of Perihelion Passage',
            'fieldnames': ['Tp'],
            'provenance': ['orbit'],
            'dimension': dimensions.time_object
        },
        {
            'description': 'Orbital Period',
            'fieldnames': ['P', 'period'],
            'provenance': ['orbit', 'phys'],
            'dimension': dimensions.time
        },

        # Ephemerides properties
        {
            'description': 'Heliocentric Distance',
            'fieldnames': ['r', 'rh', 'r_hel', 'heldist'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': 'Heliocentric Radial Velocity',
            'fieldnames':
            ['r_rate', 'rh_rate', 'rdot', 'r-dot', 'rhdot', 'rh-dot'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': 'Distance to the Observer',
            'fieldnames': ['delta', 'Delta', 'obsdist'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description':
            'Observer-Target Radial Velocity',
            'fieldnames':
            ['delta_rate', 'deltadot', 'delta-dot', 'deldot', 'del-dot'],
            'provenance': ['ephem', 'obs'],
            'dimension':
            dimensions.length_per_time
        },
        {
            'description': 'Right Ascension',
            'fieldnames': ['ra', 'RA'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Declination',
            'fieldnames': ['dec', 'DEC', 'Dec'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description':
            'Right Ascension Rate',
            'fieldnames':
            ['ra_rate', 'RA_rate', 'ra_rates', 'RA_rates', 'dRA', 'dra'],
            'provenance': ['ephem', 'obs'],
            'dimension':
            dimensions.angle_per_time
        },
        {
            'description':
            'RA*cos(Dec) Rate',
            'fieldnames': [
                'RA*cos(Dec)_rate', 'dra cos(dec)', 'dRA cos(Dec)',
                'dra*cos(dec)', 'dRA*cos(Dec)'
            ],
            'provenance': ['ephem', 'obs'],
            'dimension':
            dimensions.angle_per_time
        },
        {
            'description':
            'Declination Rate',
            'fieldnames': [
                'dec_rate', 'DEC_rate', 'Dec_rate', 'dec_rates', 'DEC_rates',
                'Dec_rates', 'dDec', 'dDEC', 'ddec'
            ],
            'provenance': ['ephem', 'obs'],
            'dimension':
            dimensions.angle_per_time
        },
        {
            'description': 'Proper Motion',
            'fieldnames': ['mu', 'Proper motion'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle_per_time
        },
        {
            'description': 'Proper Motion Direction',
            'fieldnames': ['Direction', 'direction'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Solar Phase Angle',
            'fieldnames': ['alpha', 'phaseangle', 'Phase', 'phase'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description':
            'Solar Elongation Angle',
            'fieldnames': [
                'elong', 'solarelong', 'solarelongation', 'elongation',
                'Elongation'
            ],
            'provenance': ['ephem', 'obs'],
            'dimension':
            dimensions.angle
        },
        {
            'description': 'V-band Magnitude',
            'fieldnames': ['V', 'Vmag'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.magnitude
        },
        {
            'description': 'Heliocentric Ecliptic Longitude',
            'fieldnames':
            ['hlon', 'EclLon', 'ecllon', 'HelEclLon', 'helecllon'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Heliocentric Ecliptic Latitude',
            'fieldnames':
            ['hlat', 'EclLat', 'ecllat', 'HelEclLat', 'helecllat'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Horizontal Elevation',
            'fieldnames':
            ['el', 'EL', 'elevation', 'alt', 'altitude', 'Altitude'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Horizontal Azimuth',
            'fieldnames': ['az', 'AZ', 'azimuth'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description':
            'Lunar Elongation',
            'fieldnames': [
                'lunar_elong', 'elong_moon', 'elongation_moon',
                'lunar_elongation', 'lunarelong'
            ],
            'provenance': ['ephem', 'obs'],
            'dimension':
            dimensions.angle
        },
        {
            'description': 'X State Vector Component',
            'fieldnames': ['x', 'X', 'x_vec'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': 'Y State Vector Component',
            'fieldnames': ['y', 'Y', 'y_vec'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': 'Z State Vector Component',
            'fieldnames': ['z', 'Z', 'z_vec'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': 'X Velocity Vector Component',
            'fieldnames': ['vx', 'dx', 'dx/dt'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': 'Y Velocity Vector Component',
            'fieldnames': ['vy', 'dy', 'dy/dt'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': 'Z Velocity Vector Component',
            'fieldnames': ['vz', 'dz', 'dz/dt'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': 'X heliocentric position vector',
            'fieldnames': ['x_h', 'X_h'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': 'Y heliocentric position vector',
            'fieldnames': ['y_h', 'Y_h'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': 'Z heliocentric position vector',
            'fieldnames': ['z_h', 'Z_h'],
            'provenance': ['orbit', 'ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': 'Comet Total Absolute Magnitude',
            'fieldnames': ['m1', 'M1'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.magnitude
        },
        {
            'description': 'Comet Nuclear Absolute Magnitude',
            'fieldnames': ['m2', 'M2'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.magnitude
        },
        {
            'description': 'Total Magnitude Scaling Factor',
            'fieldnames': ['k1', 'K1'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Nuclear Magnitude Scaling Factor',
            'fieldnames': ['k2', 'K2'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Phase Coefficient',
            'fieldnames': ['phase_coeff', 'Phase_coeff'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Information on Solar Presence',
            'fieldnames': ['solar_presence', 'Solar_presence'],
            'provenance': ['ephem', 'obs'],
            'dimension': None
        },
        {
            'description': 'Information on Moon and target status',
            'fieldnames': ['status_flag', 'Status_flag'],
            'provenance': ['ephem', 'obs'],
            'dimension': None
        },
        {
            'description': 'Apparent Right Ascension',
            'fieldnames': ['RA_app', 'ra_app'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Apparent Declination',
            'fieldnames': ['DEC_app', 'dec_app'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Azimuth Rate (dAZ*cosE)',
            'fieldnames': ['az_rate', 'AZ_rate'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle_per_time
        },
        {
            'description': 'Elevation Rate (d(ELV)/dt)',
            'fieldnames': ['el_rate', 'EL_rate'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle_per_time
        },
        {
            'description': 'Satellite Position Angle',
            'fieldnames': ['sat_pang', 'Sat_pang'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Local Sidereal Time',
            'fieldnames': ['siderealtime', 'Siderealtime'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.time
        },
        {
            'description': 'Target Optical Airmass',
            'fieldnames': ['airmass', 'Airmass'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'V Magnitude Extinction',
            'fieldnames': ['vmagex', 'Vmagex'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.magnitude
        },
        {
            'description': 'Surface Brightness',
            'fieldnames': ['Surfbright', 'surfbright'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.magnitude_per_solid_angle
        },
        {
            'description': 'Fraction of Illumination',
            'fieldnames': ['frac_illum', 'Frac_illum'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.percent
        },
        {
            'description': 'Illumination Defect',
            'fieldnames': ['defect_illum', 'Defect_illum'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Target-primary angular separation',
            'fieldnames': ['targ_sep', 'Targ_sep'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Target-primary visibility',
            'fieldnames': ['targ_vis', 'Targ_vis'],
            'provenance': ['ephem', 'obs'],
            'dimension': None
        },
        {
            'description': 'Angular width of target',
            'fieldnames': ['targ_width', 'Targ_width'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Apparent planetodetic longitude',
            'fieldnames': ['pldetic_long', 'Pldetic_long'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Apparent planetodetic latitude',
            'fieldnames': ['pldetic_lat', 'Pldetic_lat'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Apparent planetodetic Solar longitude',
            'fieldnames': ['pltdeticSol_long', 'PltdeticSol_long'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Apparent planetodetic Solar latitude',
            'fieldnames': ['pltdeticSol_lat', 'PltdeticSol_lat'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Target sub-solar point position angle',
            'fieldnames': ['subsol_ang', 'Subsol_ang'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Target sub-solar point angle distance',
            'fieldnames': ['subsol_dist', 'Subsol_dist'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Target North pole position angle',
            'fieldnames': ['npole_angle', 'Npole_angle'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Target North pole position distance',
            'fieldnames': ['npole_dist', 'Npole_dist'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Observation centric ecliptic longitude',
            'fieldnames': ['obs_ecl_long', 'Obs_ecl_long'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Observation centric ecliptic latitude',
            'fieldnames': ['obs_ecl_lat', 'Obs_ecl_lat'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'One-way light time',
            'fieldnames': ['lighttime', 'Lighttime'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.time
        },
        {
            'description': 'Target center velocity wrt Sun',
            'fieldnames': ['vel_sun', 'Vel_sun'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': 'Target center velocity wrt Observer',
            'fieldnames': ['vel_obs', 'Vel_obs'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': 'Lunar illumination',
            'fieldnames': ['lun_illum', 'Lun_illum'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.percent
        },
        {
            'description': 'Apparent interfering body elongation wrt observer',
            'fieldnames': ['ib_elong', 'IB_elong'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Interfering body illumination',
            'fieldnames': ['ib_illum', 'IB_illum'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.percent
        },
        {
            'description': 'Observer primary target angle',
            'fieldnames': ['targ_angle_obs', 'Targ_angle_obs'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Orbital plane angle',
            'fieldnames': ['orbangle_plane', 'Orbangle_plane'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Constellation ID containing target',
            'fieldnames': ['constellation', 'Constellation'],
            'provenance': ['ephem', 'obs'],
            'dimension': None
        },
        {
            'description': 'Target North Pole RA',
            'fieldnames': ['targ_npole_ra', 'targ_npole_RA'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Target North Pole DEC',
            'fieldnames': ['targ_npole_dec', 'targ_npole_DEC'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Galactic Longitude',
            'fieldnames': ['glx_long', 'Glx_long'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Galactic Latitude',
            'fieldnames': ['glx_lat', 'Glx_lat'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Local apparent solar time',
            'fieldnames': ['solartime'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.time
        },
        {
            'description': 'Observer light time from Earth',
            'fieldnames': ['earthlighttime', 'Earthlighttime'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.time
        },
        {
            'description': '3 sigma positional uncertainty RA',
            'fieldnames': ['RA_3sigma', 'ra_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': '3 sigma positional uncertainty DEC',
            'fieldnames': ['DEC_3sigma', 'dec_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': '3 sigma positional uncertainty semi-major axis',
            'fieldnames': ['sma_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': '3 sigma positional uncertainty semi-minor axis',
            'fieldnames': ['smi_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': '3 sigma positional uncertainty position angle',
            'fieldnames': ['posangle_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': '3 sigma positional uncertainty ellipse area',
            'fieldnames': ['area_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.solid_angle
        },
        {
            'description': '3 sigma positional uncertainty root sum square',
            'fieldnames': ['rss_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': '3 sigma range uncertainty',
            'fieldnames': ['r_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.length
        },
        {
            'description': '3 sigma range rate uncertainty',
            'fieldnames': ['r_rate_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.length_per_time
        },
        {
            'description': '3 sigma doppler radar uncertainty at S-band',
            'fieldnames': ['sband_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.frequency
        },
        {
            'description': '3 sigma doppler radar uncertainty at X-band',
            'fieldnames': ['xband_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.frequency
        },
        {
            'description': '3 sigma doppler round-trip delay uncertainty',
            'fieldnames': ['dopdelay_3sigma'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.time
        },
        {
            'description': 'Local apparent hour angle',
            'fieldnames': ['locapp_hourangle'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.time
        },
        {
            'description': 'True phase angle',
            'fieldnames': ['true_phaseangle'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Phase angle bisector longitude',
            'fieldnames': ['pab_long'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Phase angle bisector latitude',
            'fieldnames': ['pab_lat'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Absolute V-band Magnitude',
            'fieldnames': ['abs_V', 'abs_Vmag'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.magnitude
        },
        {
            'description': 'Satellite X-position',
            'fieldnames': ['sat_X', 'sat_x'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Satellite Y-position',
            'fieldnames': ['sat_y', 'sat_Y'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },
        {
            'description': 'Atmospheric Refraction',
            'fieldnames': ['atm_refraction', 'refraction'],
            'provenance': ['ephem', 'obs'],
            'dimension': dimensions.angle
        },

        # Physical properties (dependent on other properties)
        {
            'description': 'Infrared Beaming Parameter',
            'fieldnames': ['eta', 'Eta'],
            'provenance': ['ephem', 'obs'],
            'dimension': None
        },
        {
            'description': 'Temperature',
            'fieldnames': ['temp', 'Temp', 'temperature', 'Temperature'],
            'provenance': ['phys', 'ephem', 'obs'],
            'dimension': dimensions.temperature
        },

        # Physical properties (static)
        {
            'description': 'Effective Diameter',
            'fieldnames': ['d', 'D', 'diam', 'diameter', 'Diameter'],
            'provenance': ['phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Effective Radius',
            'fieldnames': ['R', 'radius'],
            'provenance': ['phys'],
            'dimension': dimensions.length
        },
        {
            'description': 'Geometric Albedo',
            'fieldnames': ['pv', 'pV', 'p_v', 'p_V', 'geomalb'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Bond Albedo',
            'fieldnames': ['A', 'bondalbedo'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Emissivity',
            'fieldnames': ['emissivity', 'Emissivity'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Absolute Magnitude',
            'fieldnames': ['absmag', 'H'],
            'provenance': ['phys', 'ephem', 'orbit'],
            'dimension': dimensions.magnitude
        },
        {
            'description': 'Photometric Phase Slope Parameter',
            'fieldnames': ['G', 'slope'],
            'provenance': ['phys', 'ephem', 'orbit'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Molecule Identifier',
            'fieldnames': ['mol_tag', 'mol_name'],
            'provenance': ['phys'],
            'dimension': None
        },
        {
            'description': 'Transition frequency',
            'fieldnames': ['t_freq'],
            'provenance': ['phys'],
            'dimension': dimensions.frequency,
            'equivalencies': u.spectral()
        },
        {
            'description': 'Integrated line intensity at 300 K',
            'fieldnames': ['lgint300'],
            'provenance': ['phys'],
            'dimension': None
        },  # fix when intensity units are resolved
        {
            'description':
            'Integrated line intensity at designated Temperature',
            'fieldnames': ['intl', 'lgint'],
            'provenance': ['phys'],
            'dimension': None
        },  # fix when intensity units are resolved
        {
            'description': 'Partition function at 300 K',
            'fieldnames': ['partfn300'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Partition function at designated temperature',
            'fieldnames': ['partfn'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Upper state degeneracy',
            'fieldnames': ['dgup'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Upper level energy in Joules',
            'fieldnames': ['eup_j', 'eup_J'],
            'provenance': ['phys'],
            'dimension': dimensions.energy
        },
        {
            'description': 'Lower level energy in Joules',
            'fieldnames': ['elo_j', 'elo_J'],
            'provenance': ['phys'],
            'dimension': dimensions.energy
        },
        {
            'description': 'Degrees of freedom',
            'fieldnames': ['degfr', 'ndf', 'degfreedom'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Einstein Coefficient',
            'fieldnames': ['au', 'eincoeff'],
            'provenance': ['phys'],
            'dimension': dimensions.inverse_time
        },
        {
            'description': 'Timescale * r^2',
            'fieldnames': ['beta', 'beta_factor'],
            'provenance': ['phys'],
            'dimension': dimensions.time_area
        },
        {
            'description': 'Total Number',
            'fieldnames': ['totnum', 'total_number_nocd'
                           'total_number'],
            'provenance': ['phys'],
            'dimension': dimensions.dimensionless
        },
        {
            'description': 'Column Density from Bockelee Morvan et al. 2004',
            'fieldnames': ['cdensity', 'col_density'],
            'provenance': ['phys'],
            'dimension': dimensions.inverse_area
        },
        # {  # see module doc string
        #   'description': '',
        #   'fieldnames': [],
        #   'provenance': [],
        #   'dimension': None,
        #   'equivalencies': (astropy units equivalencies, e.g., u.spectral())
        # },
    ]

    # use this code snippet to identify duplicate field names:
    # from sbpy.data import Conf
    # import collections
    # a = sum(Conf.fieldnames, [])
    # print([item for item, count in collections.Counter(a).items()
    #        if count > 1])

    # list of fieldnames; each element a list of alternatives
    fieldnames = [prop['fieldnames'] for prop in fieldnames_info]

    fieldname_idx = {}
    for idx, field in enumerate(fieldnames):
        for alt in field:
            fieldname_idx[alt] = idx

    # field equivalencies defining conversions
    # key defines target quantity; dict with source quantity and function
    # for conversion
    # conversions considered as part of DataClass._translate_columns
    field_eq = {
        'R': {
            'd': lambda r: r / 2
        },
        # diameter to radius}
        'd': {
            'R': lambda d: d * 2
        }
    }

    # definitions for use of pyoorb in Orbits
    oorb_timeScales = {'UTC': 1, 'UT1': 2, 'TT': 3, 'TAI': 4}
    oorb_elemType = {'CART': 1, 'COM': 2, 'KEP': 3, 'DEL': 4, 'EQX': 5}

    # field name, unit; in order as returned from oorb
    # However, in propagate, angular units are returned as deg.  This is
    # accounted for in Orbit.oo_propagate().
    oorb_orbit_fields = {
        'COM': (
            ('id', None),
            ('q', 'au'),
            ('e', ''),
            ('incl', 'rad'),
            ('Omega', 'rad'),
            ('w', 'rad'),
            ('Tp', 'd'),
            ('orbtype', None),
            ('epoch', 'd'),
            ('epoch_scale', None),
            ('H', 'mag'),
            ('G', ''),
        ),
        'KEP': (
            ('id', None),
            ('a', 'au'),
            ('e', ''),
            ('incl', 'rad'),
            ('Omega', 'rad'),
            ('w', 'rad'),
            ('M', 'rad'),
            ('orbtype', None),
            ('epoch', 'd'),
            ('epoch_scale', None),
            ('H', 'mag'),
            ('G', ''),
        ),
        'CART': (
            ('id', None),
            ('x', 'au'),
            ('y', 'au'),
            ('z', 'au'),
            ('vx', 'au/d'),
            ('vy', 'au/d'),
            ('vz', 'au/d'),
            ('orbtype', None),
            ('epoch', 'd'),
            ('epoch_scale', None),
            ('H', 'mag'),
            ('G', ''),
        )
    }

    oorb_ephem_full_fields = [
        'MJD', 'RA', 'DEC', 'RA*cos(Dec)_rate', 'DEC_rate', 'alpha', 'elong',
        'r', 'Delta', 'V', 'pa', 'TopEclLon', 'TopEclLat', 'OppTopEclLon',
        'OppTopEclLat', 'HelEclLon', 'HelEclLat', 'OppHelEclLon',
        'OppHelEclLat', 'EL', 'ELsun', 'ELmoon', 'lunarphase', 'lunarelong',
        'x', 'y', 'z', 'vx', 'vy', 'vz', 'obsx', 'obsy', 'obsz', 'trueanom'
    ]

    oorb_ephem_full_units = [
        'd', 'deg', 'deg', 'deg/d', 'deg/d', 'deg', 'deg', 'au', 'au', 'mag',
        'deg', 'deg', 'deg', 'deg', 'deg', 'deg', 'deg', 'deg', 'deg', 'deg',
        'deg', 'deg', None, 'deg', 'au', 'au', 'au', 'au/d', 'au/d', 'au/d',
        'au', 'au', 'au', 'deg'
    ]

    oorb_ephem_basic_fields = [
        'MJD', 'RA', 'DEC', 'RA*cos(Dec)_rate', 'DEC_rate', 'alpha', 'elong',
        'r', 'Delta', 'V', 'trueanom'
    ]

    oorb_ephem_basic_units = [
        'd', 'deg', 'deg', 'deg/d', 'deg/d', 'deg', 'deg', 'au', 'au', 'mag',
        'deg'
    ]

    # definitions for MPC orbits: MPC field name: [sbpy field name, unit]
    mpc_orbit_fields = {
        'absolute_magnitude': ['absmag', 'mag'],
        'aphelion_distance': ['Q', 'au'],
        'arc_length': ['arc', 'day'],
        'argument_of_perihelion': ['w', 'deg'],
        'ascending_node': ['Omega', 'deg'],
        'delta_v': ['delta_v', 'km/s'],
        'designation': ['desig', None],
        'earth_moid': ['moid_earth', 'au'],
        'eccentricity': ['e', ''],
        'epoch_jd': ['epoch', 'time_jd_utc'],
        'inclination': ['i', 'deg'],
        'jupiter_moid': ['moid_jupiter', 'au'],
        'mars_moid': ['moid_mars', 'au'],
        'mean_anomaly': ['M', 'deg'],
        'mercury_moid': ['moid_mercury', 'au'],
        'name': ['name', None],
        'number': ['number', None],
        'orbit_type': ['mpc_orbit_type', None],
        'perihelion_date_jd': ['Tp', 'time_jd_utc'],
        'perihelion_distance': ['q', 'au'],
        'period': ['P', 'year'],
        'phase_slope': ['G', ''],
        'saturn_moid': ['moid_saturn', 'au'],
        'semimajor_axis': ['a', 'au'],
        'tisserand_jupiter': ['Tj', ''],
        'uranus_moid': ['moid_uranus', 'au'],
        'venus_moid': ['moid_venus', 'au']
    }
Beispiel #46
0
from __future__ import print_function, absolute_import, division, unicode_literals

import numpy as np
import os, imp
import warnings as warn
import pdb

from scipy.integrate import simps
from scipy.interpolate import interp1d

from astropy import units as u
from astropy import constants as const

# Path
pyigm_path = imp.find_module('pyigm')[1]
Ryd = const.Ryd.to('eV', equivalencies=u.spectral())


class CUBA(object):
    """
    Class for CUBA analysis

    JXP on 13 Oct 2015

    Attributes
    ----------
    fits_path : str, optional
      Path to the FITS data files for COS-Halos
    z : ndarray
      Array of z values from CUBA file
    energy : Quantity array
Beispiel #47
0
def test_wave_conversion(in_q, out_u, ans):
    """Full equivalencies test with direct conversion."""
    result = in_q.to(out_u, equivalencies=u.spectral())
    np.testing.assert_allclose(result.value, ans.value)
    assert result.unit == ans.unit
Beispiel #48
0
def ghz_to_um(ghz_value):
     lam = (ghz_value * u.GHz).to(u.um, equivalencies=u.spectral())
     return lam.value
Beispiel #49
0
    def __init__(self,fn,x_min=1e-10,x_max=1e10,unit='micron',\
                 min_strength=None,max_exc=None):
        '''
        Creating a LineList object. 
        
        @param fn: The full path and filename to the spectroscopy file. Must 
                   contain either the CDMS or the JPL string.
        @type fn: str
        
        @keyword x_min: Minimum value for reading data in frequency/wavelength
                        If default, the lowest frequency is the one in the file.
                        
                        (default: 1e-10)
        @type x_min: float
        @keyword x_max: Maximum value for reading data in frequency/wavelength
                        If default, the highest frequency is the one in the file
                      
                        (default: 1e10)
        @type x_max: float
        @keyword unit: Unit of x_min/x_max (micron,GHz,MHz,Hz). Any 
                       astropy.constants unit works. This can also be a Unit()
                       class object.
                              
                       (default: 'micron')
        @type unit: string                      
        @keyword min_strength: if None all are included, else only lines with 
                               strengths above this value are included 
                               (log scale, nm2*Mhz)
                               
                               (default: None)
        @type min_strength: float
        @keyword max_exc: if None all are included, else only lines with 
                          excitation energies below this value are included 
                          (cm-1)
                          
                          (default: None)
        @type max_exc: float
        
        '''

        self.fn = fn
        self.min_strength = min_strength
        self.max_exc = max_exc

        #-- Figure out if it's a JPL or a CDMS catalog
        if self.fn.upper().find('JPL') != -1:
            self.catstring = 'JPL'
        else:
            self.catstring = 'CDMS'

        #-- Grab the unit
        if isinstance(unit,
                      str) and unit.lower() in ['1 / cm', 'cm-1', 'cm^-1']:
            unit = u.Unit("1 / cm")
        elif isinstance(unit, str):
            unit = getattr(u, unit)

        #-- Convert the units to the default MHz (unit of the files), but
        #   reverse min/max in case a wavelength is given. Wave number/Frequency
        #   are OK
        if unit.is_equivalent(u.micron):
            self.x_min = (x_max * unit).to(u.MHz, equivalencies=u.spectral())
            self.x_max = (x_min * unit).to(u.MHz, equivalencies=u.spectral())
        else:
            self.x_max = (x_max * unit).to(u.MHz, equivalencies=u.spectral())
            self.x_min = (x_min * unit).to(u.MHz, equivalencies=u.spectral())

        #-- Initialise the internal line list and read.
        self.line_list = []
        self.read()
Beispiel #50
0
def um_to_ghz(um_value):
     freq = (um_value * u.um).to(u.GHz, equivalencies=u.spectral())
     return freq.value
Beispiel #51
0
 def energy(self):
     """
     The energy of the spectral axis as a `~astropy.units.Quantity` in units
     of eV.
     """
     return self.spectral_axis.to(u.eV, u.spectral())
Beispiel #52
0
58
! NUMBER OF ENERGY LEVELS
{nlev}
!LEVEL + ENERGIES(cm^-1) + WEIGHT + J + V
""".format(nlev=(maxv + 1) * (maxj + 1)))

    ii = 1
    leveldict = {}
    levelenergy = {}
    for vv in range(0, maxv + 1):
        if ii in leveldict.values():
            raise ValueError((vv, ))
        leveldict[(vv, 0)] = ii  #1 + maxj*vv
        energy_K = groundstates[(vv, 0)]
        energy = (energy_K * u.K).to(u.eV, u.temperature_energy()).to(
            u.cm**-1, u.spectral()).value
        levelenergy[(vv, 0)] = energy
        degen = 16
        # manually write out the ground state, since it's not in the table
        fh.write("{0:5d} {1:15.9f} {2:5.1f} {4:5d}_{3:d}\n".format(
            ii, energy, degen, 0, vv))
        ii += 1
        for jj in range(1, maxj + 1):
            row = NaCl[(NaCl['vu'] == vv) & (NaCl['Ju'] == jj)]
            energy = row['E_U'].quantity[0].to(
                u.eV, u.temperature_energy()).to(u.cm**-1, u.spectral()).value
            degen = 16 + 32 * jj
            fh.write("{0:5d} {1:15.9f} {2:5.1f} {4:5d}_{3:d}\n".format(
                ii, energy, degen, jj, vv))
            if ii in leveldict.values():
                raise ValueError((vv, jj))
Beispiel #53
0
 def frequency(self):
     """
     The `spectral_axis` as a `~astropy.units.Quantity` in units of GHz
     """
     return self.spectral_axis.to(u.GHz, u.spectral())
Beispiel #54
0
    def from_jplspec(cls, temp_estimate, transition_freq, mol_tag):
        """Returns relevant constants from JPLSpec catalog and energy
        calculations

        Parameters
        ----------
        temp_estimate : `~astropy.units.Quantity`
            Estimated temperature in Kelvins

        transition_freq : `~astropy.units.Quantity`
            Transition frequency in MHz

        mol_tag : int or str
            Molecule identifier. Make sure it is an exclusive identifier,
            although this function can take a regex as your molecule tag,
            it will return an error if there is ambiguity on what the
            molecule of interest is. The function
            `~astroquery.jplspec.JPLSpec.query_lines_async`
            with the option `parse_name_locally=True` can be used to parse
            for the exclusive identifier of a molecule you might be
            interested in. For more information, visit
            `astroquery.jplspec` documentation.

        Returns
        -------
        data : `~sbpy.data.Phys`
            Quantities in the following order from JPL Spectral Molecular
            Catalog:

                * Transition frequency
                * Temperature
                * Integrated line intensity at 300 K
                * Partition function at 300 K
                * Partition function at designated temperature
                * Upper state degeneracy
                * Upper level energy in Joules
                * Lower level energy in Joules
                * Degrees of freedom

        """

        if isinstance(mol_tag, str):
            query = JPLSpec.query_lines_async(
                min_frequency=(transition_freq - (1 * u.GHz)),
                max_frequency=(transition_freq + (1 * u.GHz)),
                molecule=mol_tag,
                parse_name_locally=True,
                get_query_payload=True)

            res = dict(query)
            # python request payloads aren't stable (could be
            # dictionary or list)
            # depending on the version, so make
            # sure to check back from time to time
            if len(res['Mol']) > 1:
                raise JPLSpecQueryFailed(("Ambiguous choice for molecule,\
                    more than one molecule was found for \
                    the given mol_tag. Please refine \
                    your search to one of the following tags\
                    {} by using JPLSpec.get_species_table()\
                    (as shown in JPLSpec documentation)\
                    to parse their names and choose your \
                    molecule of interest, or refine your\
                    regex to be more specific (hint '^name$'\
                    will match 'name' exactly with no\
                    ambiguity).").format(res['Mol']))
            else:
                mol_tag = res['Mol'][0]

        query = JPLSpec.query_lines(
            min_frequency=(transition_freq - (1 * u.GHz)),
            max_frequency=(transition_freq + (1 * u.GHz)),
            molecule=mol_tag)

        freq_list = query['FREQ']

        if freq_list[0] == 'Zero lines we':
            raise JPLSpecQueryFailed(
                ("Zero lines were found by JPLSpec in a +/- 1 GHz "
                 "range from your provided transition frequency for "
                 "molecule tag {}.").format(mol_tag))

        t_freq = min(list(freq_list.quantity),
                     key=lambda x: abs(x - transition_freq))

        data = query[query['FREQ'] == t_freq.value]

        df = int(data['DR'].data)

        lgint = float(data['LGINT'].data)

        lgint = 10**lgint * u.nm * u.nm * u.MHz

        elo = float(data['ELO'].data) / u.cm

        gu = float(data['GUP'].data)

        cat = JPLSpec.get_species_table()

        mol = cat[cat['TAG'] == mol_tag]

        temp_list = cat.meta['Temperature (K)'] * u.K

        part = list(mol['QLOG1', 'QLOG2', 'QLOG3', 'QLOG4', 'QLOG5', 'QLOG6',
                        'QLOG7'][0])

        temp = temp_estimate

        f = np.interp(np.log(temp.value), np.log(temp_list.value[::-1]),
                      np.log(part[::-1]))

        f = np.exp(f)

        partition = 10**(f)

        part300 = 10**(float(mol['QLOG1'].data))

        # yields in 1/cm
        energy = elo + (t_freq.to(1 / u.cm, equivalencies=u.spectral()))

        energy_J = energy.to(u.J, equivalencies=u.spectral())
        elo_J = elo.to(u.J, equivalencies=u.spectral())

        quantities = [
            t_freq, temp, lgint, part300, partition, gu, energy_J, elo_J, df,
            mol_tag
        ]

        names = [
            't_freq', 'temp', 'lgint300', 'partfn300', 'partfn', 'dgup',
            'eup_J', 'elo_J', 'degfreedom', 'mol_tag'
        ]

        # names = ('Transition frequency',
        #          'Temperature',
        #          'Integrated line intensity at 300 K',
        #          'Partition function at 300 K',
        #          'Partition function at designated temperature',
        #          'Upper state degeneracy',
        #          'Upper level energy in Joules',
        #          'Lower level energy in Joules',
        #          'Degrees of freedom', 'Molecule Identifier')

        result = cls.from_dict(dict(zip(names, quantities)))

        return result
Beispiel #55
0
    def query_object_async(self,
                           wavelength_range=None,
                           wavelength_type='',
                           wavelength_accuracy=None,
                           element_spectrum=None,
                           minimal_abundance=None,
                           depl_factor=None,
                           lower_level_energy_range=None,
                           upper_level_energy_range=None,
                           nmax=None,
                           multiplet=None,
                           transitions=None,
                           show_fine_structure=None,
                           show_auto_ionizing_transitions=None,
                           output_columns=('spec', 'type', 'conf', 'term',
                                           'angm', 'prob', 'ener')):
        """
        Returns
        -------
        response : `requests.Response`
            The HTTP response returned from the service.
        """
        if self._default_form_values is None:
            response = self._request("GET",
                                     url=self.FORM_URL,
                                     data={},
                                     timeout=self.TIMEOUT)
            bs = BeautifulSoup(response.text)
            form = bs.find('form')
            self._default_form_values = self._get_default_form_values(form)
        default_values = self._default_form_values
        wltype = (wavelength_type or default_values.get('air', '')).lower()
        if wltype in ('air', 'vacuum'):
            air = wltype.capitalize()
        else:
            raise ValueError('parameter wavelength_type must be either "air" '
                             'or "vacuum".')
        wlrange = wavelength_range or []
        if len(wlrange) not in (0, 2):
            raise ValueError('Length of `wavelength_range` must be 2 or 0, '
                             'but is: {}'.format(len(wlrange)))
        if not is_valid_transitions_param(transitions):
            raise ValueError(
                'Invalid parameter "transitions": {0!r}'.format(transitions))
        if transitions is None:
            _type = self._default_form_values.get('type')
            type2 = self._default_form_values.get('type2')
        else:
            s = str(transitions)
            if len(s.split(',')) > 1:
                _type = 'Sel'
                type2 = s.split(',')
            else:
                _type = s
                type2 = ''
        # convert wavelengths in incoming wavelength range to Angstroms
        wlrange_in_angstroms = (wl.to(u.Angstrom,
                                      equivalencies=u.spectral()).value
                                for wl in wlrange)

        lower_level_erange = lower_level_energy_range
        if lower_level_erange is not None:
            lower_level_erange = lower_level_erange.to(
                u.cm**-1, equivalencies=u.spectral()).value()
        upper_level_erange = upper_level_energy_range
        if upper_level_erange is not None:
            upper_level_erange = upper_level_erange.to(
                u.cm**-1, equivalencies=u.spectral()).value()
        input = {
            'wavl': '-'.join(map(str, wlrange_in_angstroms)),
            'wave': 'Angstrom',
            'air': air,
            'wacc': wavelength_accuracy,
            'elmion': element_spectrum,
            'abun': minimal_abundance,
            'depl': depl_factor,
            'elo': lower_level_erange,
            'ehi': upper_level_erange,
            'ener': 'cm^-1',
            'nmax': nmax,
            'term': multiplet,
            'type': _type,
            'type2': type2,
            'hydr': show_fine_structure,
            'auto': show_auto_ionizing_transitions,
            'form': output_columns,
            'tptype': 'as_a'
        }
        response = self._submit_form(input)
        return response
Beispiel #56
0
def test_equiv_compose():
    composed = u.m.compose(equivalencies=u.spectral())
    assert any([u.Hz] == x.bases for x in composed)
Beispiel #57
0
def blackbody_nu(in_x, temperature):
    """Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`.

    .. note::

        Use `numpy.errstate` to suppress Numpy warnings, if desired.

    .. warning::

        Output values might contain ``nan`` and ``inf``.

    Parameters
    ----------
    in_x : number, array-like, or `~astropy.units.Quantity`
        Frequency, wavelength, or wave number.
        If not a Quantity, it is assumed to be in Hz.

    temperature : number, array-like, or `~astropy.units.Quantity`
        Blackbody temperature.
        If not a Quantity, it is assumed to be in Kelvin.

    Returns
    -------
    flux : `~astropy.units.Quantity`
        Blackbody monochromatic flux in
        :math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`.

    Raises
    ------
    ValueError
        Invalid temperature.

    ZeroDivisionError
        Wavelength is zero (when converting to frequency).

    """
    # Convert to units for calculations, also force double precision
    with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
        freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
        temp = u.Quantity(temperature, u.K, dtype=np.float64)

    # Check if input values are physically possible
    if np.any(temp < 0):
        raise ValueError(f'Temperature should be positive: {temp}')
    if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
        warnings.warn('Input contains invalid wavelength/frequency value(s)',
                      AstropyUserWarning)

    log_boltz = const.h * freq / (const.k_B * temp)
    boltzm1 = np.expm1(log_boltz)

    if _has_buggy_expm1:
        # Replace incorrect nan results with infs--any result of 'nan' is
        # incorrect unless the input (in log_boltz) happened to be nan to begin
        # with.  (As noted in #4393 ideally this would be replaced by a version
        # of expm1 that doesn't have this bug, rather than fixing incorrect
        # results after the fact...)
        boltzm1_nans = np.isnan(boltzm1)
        if np.any(boltzm1_nans):
            if boltzm1.isscalar and not np.isnan(log_boltz):
                boltzm1 = np.inf
            else:
                boltzm1[np.where(~np.isnan(log_boltz) & boltzm1_nans)] = np.inf

    # Calculate blackbody flux
    bb_nu = (2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1))
    flux = bb_nu.to(FNU, u.spectral_density(freq))

    return flux / u.sr  # Add per steradian to output flux unit
Beispiel #58
0
def _(attr, results):
    return set(
        it for it in results if it.wave.wavemax is not None and
        attr.min <= it.wave.wavemax.to(u.angstrom, equivalencies=u.spectral())
        and it.wave.wavemin is not None and
        attr.max >= it.wave.wavemin.to(u.angstrom, equivalencies=u.spectral()))
def make_obsdata_from_model(model_filename,
                            model_type='tlusty',
                            model_params=None,
                            output_filebase=None,
                            output_path=None,
                            show_plot=False):
    """
    Create the necessary data files (.dat and spectra) from a
    stellar model atmosphere model to use as the unreddened
    comparsion star in the measure_extinction package

    Parameters
    ----------
    model_filename: string
        name of the file with the stellar atmosphere model spectrum

    model_type: string [default = 'tlusty']
        model type

    model_params: dict of {type: value}
        model parameters
        e.g., {'Teff': 10000.0, 'logg': 4.0, 'Z': 1, 'vturb': 2.0}

    output_filebase: string
        base for the output files
        E.g., output_filebase.dat and output_filebase_stis.fits

    output_path: string
        path to use for output files

    show_plot: boolean
        show a plot of the original and rebinned spectra/photometry
    """

    if output_filebase is None:
        output_filebase = '%s_standard' % (model_filename)

    if output_path is None:
        output_path = '/home/kgordon/Python_git/extstar_data/'

    allowed_model_types = ['tlusty']
    if model_type not in allowed_model_types:
        raise ValueError("%s not an allowed model type" % (model_type))

    # read in the model spectrum
    mspec = ascii.read(model_filename,
                       format='no_header',
                       fast_reader={'exponent_style': 'D'},
                       names=['Freq', 'SFlux'])

    # error in file where the exponent 'D' is missing
    #   means that SFlux is read in as a string
    # solution is to remove the rows with the problem and replace
    #   the fortran 'D' with an 'E' and then convert to floats
    if mspec['SFlux'].dtype != np.float:
        indxs = [k for k in range(len(mspec)) if 'D' not in mspec['SFlux'][k]]
        if len(indxs) > 0:
            indxs = [k for k in range(len(mspec)) if 'D' in mspec['SFlux'][k]]
            mspec = mspec[indxs]
            new_strs = [cval.replace('D', 'E') for cval in mspec['SFlux'].data]
            mspec['SFlux'] = new_strs
            mspec['SFlux'] = mspec['SFlux'].astype(np.float)

    # set the units
    mspec['Freq'].unit = u.Hz
    mspec['SFlux'].unit = u.erg / (u.s * u.cm * u.cm * u.Hz)

    # now extract the wave and flux colums
    mfreq = mspec['Freq'].quantity
    mwave = mfreq.to(u.angstrom, equivalencies=u.spectral())
    mflux = mspec['SFlux'].quantity.to(u.erg /
                                       (u.s * u.cm * u.cm * u.angstrom),
                                       equivalencies=u.spectral_density(mfreq))

    # rebin to R=5000 for speed
    #   use a wavelength range that spans FUSE to Spitzer IRS
    wave_r5000, flux_r5000, npts_r5000 = rebin_spectrum(
        mwave.value, mflux.value, 5000, [912., 500000.])

    # save the full spectrum to a binary FITS table
    otable = Table()
    otable['WAVELENGTH'] = Column(wave_r5000, unit=u.angstrom)
    otable['FLUX'] = Column(flux_r5000,
                            unit=u.erg / (u.s * u.cm * u.cm * u.angstrom))
    otable['SIGMA'] = Column(flux_r5000 * 0.0,
                             unit=u.erg / (u.s * u.cm * u.cm * u.angstrom))
    otable['NPTS'] = Column(npts_r5000)
    otable.write("%s/Models/%s_full.fits" % (output_path, output_filebase),
                 overwrite=True)

    # dictionary to saye names of spectroscopic filenames
    specinfo = {}

    # create the ultraviolet HST/STIS mock observation
    # first create the spectrum convolved to the STIS low resolution
    # Resolution approximately 1000
    stis_fwhm_pix = 5000. / 1000.
    g = Gaussian1DKernel(stddev=stis_fwhm_pix / 2.355)

    # Convolve data
    nflux = convolve(otable['FLUX'].data, g)

    stis_table = Table()
    stis_table['WAVELENGTH'] = otable['WAVELENGTH']
    stis_table['FLUX'] = nflux
    stis_table['NPTS'] = otable['NPTS']
    stis_table['STAT-ERROR'] = Column(np.full((len(stis_table)), 1.0))
    stis_table['SYS-ERROR'] = otable['SIGMA']
    # UV STIS obs
    rb_stis_uv = merge_stis_obsspec([stis_table], waveregion='UV')
    rb_stis_uv['SIGMA'] = rb_stis_uv['FLUX'] * 0.0
    stis_uv_file = "%s_stis_uv.fits" % (output_filebase)
    rb_stis_uv.write("%s/Models/%s" % (output_path, stis_uv_file),
                     overwrite=True)
    specinfo['STIS'] = stis_uv_file
    # Optical STIS obs
    rb_stis_opt = merge_stis_obsspec([stis_table], waveregion='Opt')
    rb_stis_opt['SIGMA'] = rb_stis_opt['FLUX'] * 0.0
    stis_opt_file = "%s_stis_opt.fits" % (output_filebase)
    rb_stis_opt.write("%s/Models/%s" % (output_path, stis_opt_file),
                      overwrite=True)
    specinfo['STIS_Opt'] = stis_opt_file

    # Spitzer IRS mock observation
    # Resolution approximately 100
    lrs_fwhm_pix = 5000. / 100.
    g = Gaussian1DKernel(stddev=lrs_fwhm_pix / 2.355)

    # Convolve data
    nflux = convolve(otable['FLUX'].data, g)

    lrs_table = Table()
    lrs_table['WAVELENGTH'] = otable['WAVELENGTH']
    lrs_table['FLUX'] = nflux
    lrs_table['NPTS'] = otable['NPTS']
    lrs_table['ERROR'] = Column(np.full((len(lrs_table)), 1.0))

    rb_lrs = merge_irs_obsspec([lrs_table])
    rb_lrs['SIGMA'] = rb_lrs['FLUX'] * 0.0
    lrs_file = "%s_irs.fits" % (output_filebase)
    rb_lrs.write("%s/Models/%s" % (output_path, lrs_file), overwrite=True)
    specinfo['IRS'] = lrs_file

    # compute photometry
    # band_path = "%s/Band_RespCurves/" % output_path
    john_bands = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']
    john_fnames = ["John%s.dat" % (cband) for cband in john_bands]
    hst_bands = [
        'HST_WFC3_UVIS1_F275W', 'HST_WFC3_UVIS1_F336W', 'HST_WFC3_UVIS1_F475W',
        'HST_WFC3_UVIS1_F814W', 'HST_WFC3_IR_F110W', 'HST_WFC3_IR_F160W',
        'HST_ACS_WFC1_F475W', 'HST_ACS_WFC1_F814W', 'HST_WFPC2_4_F170W'
    ]
    hst_fnames = ['']
    # spitzer_bands = ['IRAC1', 'IRAC2', 'IRAC3', 'IRAC4', 'IRS15', 'MIPS24']
    # spitzer_fnames = ["{}/{}.dat".format(band_path, cband)
    #                   for cband in spitzer_bands]
    bands = john_bands + hst_bands
    band_fnames = john_fnames + hst_fnames
    # bands = john_bands
    # band_fnames = john_fnames

    bandinfo = get_phot(wave_r5000, flux_r5000, bands, band_fnames)

    # create the DAT file
    dat_filename = "%s/Models/%s.dat" % (output_path, output_filebase)
    header_info = [
        "# obsdata created from %s model atmosphere" % model_type,
        "# %s" % (output_filebase),
        "# file created by make_obsdata_from_model.py",
        "model_type = %s" % model_type
    ]
    write_dat_file(dat_filename,
                   bandinfo,
                   specinfo,
                   modelparams=model_params,
                   header_info=header_info)

    if show_plot:
        fig, ax = plt.subplots(figsize=(13, 10))
        # indxs, = np.where(npts_r5000 > 0)
        ax.plot(wave_r5000 * 1e-4, flux_r5000, 'b-')
        ax.plot(bandinfo.waves, bandinfo.fluxes, 'ro')

        indxs, = np.where(rb_stis_uv['NPTS'] > 0)
        ax.plot(rb_stis_uv['WAVELENGTH'][indxs].to(u.micron),
                rb_stis_uv['FLUX'][indxs], 'm-')
        indxs, = np.where(rb_stis_opt['NPTS'] > 0)
        ax.plot(rb_stis_opt['WAVELENGTH'][indxs].to(u.micron),
                rb_stis_opt['FLUX'][indxs], 'g-')
        indxs, = np.where(rb_lrs['NPTS'] > 0)
        ax.plot(rb_lrs['WAVELENGTH'][indxs].to(u.micron),
                rb_lrs['FLUX'][indxs], 'c-')
        ax.set_xscale('log')
        ax.set_yscale('log')
        plt.show()
Beispiel #60
0
def download_cutouts(sbid, username, password, destination_dir, num_channels, data_product_sub_type):
    print ("\n\n** Finding images and image cubes for scheduling block {} ... \n\n".format(sbid))

    sbid_multi_channel_query = "SELECT TOP 1000 * FROM ivoa.obscore where obs_id='" + str(sbid) \
                               + "' and dataproduct_subtype='" + str(data_product_sub_type) \
                               + "' and em_xel > 1 and dataproduct_type = 'cube'"

    # create async TAP query and wait for query to complete
    result_file_path = casda.async_tap_query(sbid_multi_channel_query, username, password, destination_dir)
    image_cube_votable = parse(result_file_path, pedantic=False)
    results_array = image_cube_votable.get_table_by_id('results').array

    # 3) For each of the image cubes, query datalink to get the secure datalink details
    print ("\n\n** Retrieving datalink for each image and image cube...\n\n")
    authenticated_id_tokens = []
    for image_cube_result in results_array:
        image_cube_id = image_cube_result['obs_publisher_did'].decode('utf-8')
        async_url, authenticated_id_token = casda.get_service_link_and_id(image_cube_id, username,
                                                                          password,
                                                                          service='cutout_service',
                                                                          destination_dir=destination_dir)
        if authenticated_id_token is not None:
            authenticated_id_tokens.append([authenticated_id_token, image_cube_result])

    if len(authenticated_id_tokens) == 0:
        print ("No image cubes for scheduling_block_id " + str(sbid))
        return 1

    # For each image cube, slice by channels using num_channels specified by the user.
    band_list = []
    job_locations = []
    for entry in authenticated_id_tokens:
        auth_id_token = entry[0]
        ic = entry[1]

        em_xel = ic['em_xel']
        em_min = ic['em_min'] * u.m
        em_max = ic['em_max'] * u.m

        min_freq = em_max.to(u.Hz, equivalencies=u.spectral())
        max_freq = em_min.to(u.Hz, equivalencies=u.spectral())

        step_size = num_channels
        if step_size > em_xel:
            step_size = em_xel

        hz_per_channel = (max_freq - min_freq) / em_xel
        pos = em_xel

        channel_blocks = math.ceil(em_xel / num_channels)

        for b in range(int(channel_blocks)):
            f1 = get_freq_at_pos(pos, min_freq, hz_per_channel)
            pos -= step_size
            f2 = get_freq_at_pos(pos, min_freq, hz_per_channel)
            pos -= 1 # do not overlap channels between image cubes
            wavelength1 = f1.to(u.m, equivalencies=u.spectral())
            wavelength2 = f2.to(u.m, equivalencies=u.spectral())
            band = str(wavelength1.value) + " " + str(wavelength2.value)
            band_list.append(band)

        # create job for given band params
        job_location = casda.create_async_soda_job([auth_id_token])
        casda.add_params_to_async_job(job_location, 'BAND', band_list)
        job_locations.append(job_location)

    # run all jobs and download
    casda.run_async_jobs_and_download(job_locations, destination_dir)

    return 0