Ejemplo n.º 1
0
def test_grid_to_spec():
    """Test creating spectrum from grid, and related cache."""
    sp = catalog.grid_to_spec('k93models', 6440, 0, 4.3)
    w = sp.waveset
    w_first_50 = w[:50]
    y_first_50 = units.convert_flux(w_first_50, sp(w_first_50), units.FLAM)
    w_last_50 = w[-50:]
    y_last_50 = units.convert_flux(w_last_50, sp(w_last_50), units.FLAM)

    assert 'k93' in sp.meta['expr']
    np.testing.assert_allclose(w_first_50.value, [
        90.90000153, 93.50000763, 96.09999847, 97.70000458, 99.59999847, 102,
        103.80000305, 105.6000061, 107.70000458, 110.40000153, 114,
        117.79999542, 121.30000305, 124.79999542, 127.09999847, 128.40000916,
        130.5, 132.3999939, 133.90000916, 136.6000061, 139.80000305,
        143.30000305, 147.19999695, 151, 155.20001221, 158.80000305,
        162.00001526, 166, 170.30000305, 173.40000916, 176.80000305,
        180.20001221, 181.69999695, 186.1000061, 191, 193.8999939,
        198.40000916, 201.80000305, 205, 210.5, 216.20001221, 219.80000305,
        223, 226.80000305, 230, 234, 240, 246.5, 252.3999939, 256.80001831
    ])
    np.testing.assert_array_equal(y_first_50.value, 0)
    np.testing.assert_allclose(w_last_50.value, [
        83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000, 87400,
        87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, 91400,
        91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, 95400,
        95800, 96200, 96600, 97000, 97400, 97800, 98200, 98600, 99000, 99400,
        99800, 100200, 200000, 400000, 600000, 800000, 1000000, 1200000,
        1400000, 1600000
    ])
    np.testing.assert_allclose(y_last_50.value, [
        2.52510792e+03, 2.47883842e+03, 2.43311637e+03, 2.38843415e+03,
        2.34455095e+03, 2.30190141e+03, 2.25982266e+03, 2.21930715e+03,
        2.17950029e+03, 2.14031198e+03, 2.10216378e+03, 2.06411734e+03,
        2.02789000e+03, 1.99191291e+03, 1.95752853e+03, 1.92259620e+03,
        1.88976666e+03, 1.85768178e+03, 1.82475330e+03, 1.79369145e+03,
        1.76356796e+03, 1.73377904e+03, 1.70432192e+03, 1.67572220e+03,
        1.64739969e+03, 1.61997833e+03, 1.59299008e+03, 1.56657219e+03,
        1.54066436e+03, 1.51508799e+03, 1.49065412e+03, 1.46606232e+03,
        1.44255637e+03, 1.41922753e+03, 1.39555249e+03, 1.37360936e+03,
        1.35179525e+03, 1.33041182e+03, 1.30944458e+03, 1.28851215e+03,
        1.26828580e+03, 1.24841065e+03, 8.04744247e+01, 5.03657385e+00,
        9.88851448e-01, 3.10885179e-01, 1.26599425e-01, 6.07383728e-02,
        3.26344365e-02, 1.90505413e-02
    ])

    # Test cache
    key = list(catalog._CACHE.keys())[0]
    assert key.endswith('grid/k93models/catalog.fits')
    assert isinstance(catalog._CACHE[key], list)

    # Reset cache
    catalog.reset_cache()
    assert catalog._CACHE == {}
def test_grid_to_spec():
    """Test creating spectrum from grid, and related cache."""
    sp = catalog.grid_to_spec('k93models', 6440, 0, 4.3)
    w = sp.waveset
    w_first_50 = w[:50]
    y_first_50 = units.convert_flux(w_first_50, sp(w_first_50), units.FLAM)
    w_last_50 = w[-50:]
    y_last_50 = units.convert_flux(w_last_50, sp(w_last_50), units.FLAM)

    assert 'k93' in sp.meta['expr']
    np.testing.assert_allclose(
        w_first_50.value,
        [90.90000153, 93.50000763, 96.09999847, 97.70000458, 99.59999847, 102,
         103.80000305, 105.6000061, 107.70000458, 110.40000153, 114,
         117.79999542, 121.30000305, 124.79999542, 127.09999847, 128.40000916,
         130.5, 132.3999939, 133.90000916, 136.6000061, 139.80000305,
         143.30000305, 147.19999695, 151, 155.20001221, 158.80000305,
         162.00001526, 166, 170.30000305, 173.40000916, 176.80000305,
         180.20001221, 181.69999695, 186.1000061, 191, 193.8999939,
         198.40000916, 201.80000305, 205, 210.5, 216.20001221, 219.80000305,
         223, 226.80000305, 230, 234, 240, 246.5, 252.3999939, 256.80001831])
    np.testing.assert_array_equal(y_first_50.value, 0)
    np.testing.assert_allclose(
        w_last_50.value,
        [83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000, 87400,
         87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, 91400,
         91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, 95400,
         95800, 96200, 96600, 97000, 97400, 97800, 98200, 98600, 99000, 99400,
         99800, 100200, 200000, 400000, 600000, 800000, 1000000, 1200000,
         1400000, 1600000])
    np.testing.assert_allclose(
        y_last_50.value,
        [2.52510792e+03, 2.47883842e+03, 2.43311637e+03, 2.38843415e+03,
         2.34455095e+03, 2.30190141e+03, 2.25982266e+03, 2.21930715e+03,
         2.17950029e+03, 2.14031198e+03, 2.10216378e+03, 2.06411734e+03,
         2.02789000e+03, 1.99191291e+03, 1.95752853e+03, 1.92259620e+03,
         1.88976666e+03, 1.85768178e+03, 1.82475330e+03, 1.79369145e+03,
         1.76356796e+03, 1.73377904e+03, 1.70432192e+03, 1.67572220e+03,
         1.64739969e+03, 1.61997833e+03, 1.59299008e+03, 1.56657219e+03,
         1.54066436e+03, 1.51508799e+03, 1.49065412e+03, 1.46606232e+03,
         1.44255637e+03, 1.41922753e+03, 1.39555249e+03, 1.37360936e+03,
         1.35179525e+03, 1.33041182e+03, 1.30944458e+03, 1.28851215e+03,
         1.26828580e+03, 1.24841065e+03, 8.04744247e+01, 5.03657385e+00,
         9.88851448e-01, 3.10885179e-01, 1.26599425e-01, 6.07383728e-02,
         3.26344365e-02, 1.90505413e-02])

    # Test cache
    key = list(catalog._CACHE.keys())[0]
    assert key.endswith('grid/k93models/catalog.fits')
    assert isinstance(catalog._CACHE[key], list)

    # Reset cache
    catalog.reset_cache()
    assert catalog._CACHE == {}
Ejemplo n.º 3
0
    def add_abs_lines(self, center, ew, fwhm):
        """
        TODO: accept different profiles (Lorentz1D, Voigt1D, etc)
        Add a absorption line of to a spectrum with center, fwhm and equivalent width specified by the user
        It also supports emission lines if ew is negative

        Parameters
        ----------
        center: float, list, np.ndarray, u.Quantity
            The center of the line

        fwhm:  float, list, np.ndarray, u.Quantity
            The FWHM of the line
        ew: float, list, np.ndarray, u.Quantity
            The Equivalent Width of the line

        Returns
        -------
        Spextrum
        """
        if isinstance(center, u.Quantity) is True:
            center = center.to(u.AA).value
        if isinstance(ew, u.Quantity) is True:
            ew = ew.to(u.AA).value
        if isinstance(fwhm, u.Quantity) is True:
            fwhm = fwhm.to(u.AA).value

        centers = np.array([center]).flatten()
        ews = np.array([ew]).flatten()
        fwhms = np.array([fwhm]).flatten()
        sp = self  #  .__class__(modelclass=self.model)

        sp.meta.update({"em_lines": {"center": list(centers),
                                     "ew": list(ews),
                                     "fwhm": list(fwhms)}})

        for c, e, f in zip(centers, ews, fwhms):
            sign = -1 * np.sign(e)  # to keep the convention that EL are negative and ABS are positive
            left, right = c - np.abs(e / 2), c + np.abs(e / 2)
            wavelengths = self.waveset[(self.waveset.value >= left) & (self.waveset.value <= right)]
            fluxes = units.convert_flux(wavelengths=wavelengths,
                                        fluxes=self(wavelengths),
                                        out_flux_unit=units.FLAM)
            flux = np.trapz(fluxes.value, wavelengths.value)
            line = GaussianFlux1D(mean=c, total_flux=sign * flux, fwhm=f)
            lam = line.sampleset(factor_step=0.35)  # bit better than Nyquist
            g_abs = SourceSpectrum(Empirical1D, points=lam, lookup_table=line(lam))

            sp = sp + g_abs

            if (sp(wavelengths).value < 0).any():
                warnings.warn("Warning: Flux<0 for specified EW and FHWM, setting it to Zero")
                waves = sp.waveset[sp(sp.waveset) < 0]
                zero_sp = SourceSpectrum(Empirical1D, points=waves, lookup_table=-1 * sp(waves).value)
                sp = sp + zero_sp  # Spextrum(modelclass=sp.model + zero_sp.model)

        sp = self._restore_attr(Spextrum(modelclass=sp))

        return sp
Ejemplo n.º 4
0
    def __init__(self, mag, magsystem='VEGAMAG', filt_range=None, sed=None, temp=5778):

        # Define the magnitude system.
        if filt_range is None:
            filt_range = [5000, 6000]
        if magsystem.lower() == 'vegamag':
            sys = units.VEGAMAG
        elif magsystem.lower() == 'stmag':
            sys = u.STmag
        elif magsystem.lower() == 'abnu':
            sys = u.ABmag

        # Get Vega's spectrum.
        vega = SourceSpectrum.from_vega()

        # Set attributes.
        self.mag = mag
        self.SED = sed
        self.temp = temp
        self.inputFlux = units.convert_flux(filt_range, mag * sys, units.FLAM, vegaspec=vega)
        self.range = filt_range
        self.F_lambda = self.starF_lambda()
Ejemplo n.º 5
0
    def synphot_calcs(self, bpfile):
        """
        Calculate zeropoint for a given bandpass in several 
        photometric systems

        Arguments:
        ----------
        bpfile -- Text file containing the throuput table for 
                  a single bandpass

        Returns:
        --------
        Bandpass zeropoint value in Vega mags, AB mags, ST mags,
        photflam, photfnu, and megajansky. Also returns pivot wavelength
        """
        # Define wavelength list to use
        #wave_bins = np.arange(0.5, 5, 0.1) * u.micron

        # Use refactored synphot to calculate zeropoints
        orig_bp = SpectralElement.from_file(bpfile)

        # Now reduce the PCE curve by a factor equal to
        # the gain, in order to get the output to translate
        # from ADU/sec rather than e/sec
        bp = orig_bp / self.gain
        #bp.model.lookup_table = bp.model.lookup_table / self.gain

        photflam = bp.unit_response(self.telescope_area)
        photplam = bp.pivot()
        st_zpt = -2.5 * np.log10(photflam.value) - 21.1
        ab_zpt = (-2.5 * np.log10(photflam.value) - 21.1 -
                  5 * np.log10(photplam.value) + 18.6921)
        #mjy_zpt = units.convert_flux(photplam,photflam, 'MJy')
        mjy_zpt = photflam.to(u.MJy, u.spectral_density(photplam))
        obs = Observation(self.vega, bp, binset=wave_bins)
        vega_zpt = -obs.effstim(flux_unit='obmag', area=self.telescope_area)
        photfnu = units.convert_flux(photplam, photflam, units.FNU)
        return (vega_zpt.value, ab_zpt, st_zpt, photflam.value, photfnu.value,
                mjy_zpt.value, photplam.value)
Ejemplo n.º 6
0
    def grism_cal(self, throughput_files):
        """
        Calculate flux cal outputs for grism mode. Input files should 
        contain the total system throughput for the grism plus crossing 
        filter. The input file list should contain throughputs for all 
        orders of just a single grism+crossing filter.

        Arguments:
        ----------
        throughput_files -- list of text files containing filter throughputs

        Returns:
        --------
        Astropy table containing flux calibration information for grism mode
        """
        print('need to update this for synphot')

        allrows = []
        filters = np.array([])
        pupils = np.array([])
        orders = np.array([])
        fluxes = np.array([])
        uncs = np.array([])
        nelems = np.array([])
        waves = np.array([])
        resps = np.array([])
        #waves = np.expand_dims(waves,axis=0)
        #resps = np.expand_dims(resps,axis=0)

        for file in throughput_files:
            # Read in necessary file info
            with fits.open(file) as h:
                cname = h[0].header['COMPNAME']

            junk, filter, mod = cname.split('_')
            mod = mod[-1].upper()
            pupil = 'GRISMR'
            print('Eventually need to be smarter about pupil value!!!!!')

            print("opening {}".format(file))
            bp = SpectralElement.from_file(file)

            # Now reduce the PCE curve by a factor equal to
            # the gain, in order to get the output to translate
            # from ADU/sec rather than from e/sec
            bp.model.lookup_table = bp.model.lookup_table / self.gain

            # Pivot wavelength in microns
            pwave = bp.pivot()
            #pwave = pwave.to(u.micron)

            # Need to find the order here, so that the proper
            # dispersion value is used.
            ord = file.find('order')
            order = file[ord + 5]
            dispersion = self.disp[order]

            #find pivot? mean? center? effective? wavelength
            #denom = self.h * self.c / eff_lambda
            #countratedensity = self.telescope_area * tp['Throughput'] * vegaflux / denom

            #countratedensityflux,countratedensitywave,pwave = self.getcountsflux(bp)
            #totalrate = self.integrate(countratedensity)

            #Is the variable below used at all?
            #grism_total_rate += totalrate

            obs = self.getcountsflux(bp)

            # From observation, get the flux in counts
            countratedensityflux = obs(obs.binset,
                                       flux_unit='count',
                                       area=self.telescope_area)

            print('countratedensityflux', countratedensityflux)

            # Multiply by dispersion
            countratedensityflux *= dispersion

            # Countrate density at the pivot wavelength
            print('pivot', pwave.value)
            print('countratedensityflux*dispersion', countratedensityflux)
            print('obs.binset', obs.binset)
            #cnorm = np.interp(pwave.value,obs.binset,countratedensityflux)
            cnorm = obs(pwave, flux_unit='count',
                        area=self.telescope_area) * dispersion
            print('cnorm', cnorm)

            # Vega flux value at pivot wavelength, convert to Jansky
            vega_pivot = self.vega(pwave)
            j0 = units.convert_flux(pwave, vega_pivot, 'MJy')

            # Now we need to divide by the area of a pixel in
            # sterradians, so we can eventually get MJy/str per ADU/sec
            j0 /= self.pixel_area_sr

            #vega_pivotorig = np.interp(pwave.value,self.vega.waveset,self.vega(self.vega.waveset))
            #print("units of vega flux are: {}".format(self.vega(self.vega.waveset).unit))
            #j0 = self.toJansky(vega_pivot.value,pwave.value) / 1.e6

            # Ratio of Vega flux to the countrate density at pivot wavelength
            ratio = j0 / cnorm

            print('')
            print('PHOTMJSR', ratio)
            print(
                'NIRISS values are 0.01 to 2.0. I would expect ours to be similar!'
            )
            print('')

            # Define a set of wavelengths to evaluate relative fluxcal
            goodpts = bp(bp.waveset) > 0.0001
            minwave = np.min(bp.waveset[goodpts])
            maxwave = np.max(bp.waveset[goodpts])
            w = minwave.value
            allwaves = np.array([w])
            while (w < maxwave.value):
                delt = w / (np.absolute(np.int(order)) * self.resolving_power)
                allwaves = np.append(allwaves, w + delt)
                w += delt

            # Calculate Vega flux at each wavelength
            nelem = len(allwaves)
            allfluxes = self.vega(allwaves)
            alljansky = units.convert_flux(allwaves, allfluxes, 'MJy')
            allcounts = np.interp(allwaves, obs.binset, countratedensityflux)
            #allfluxes = np.interp(allwaves,self.vega.waveset,self.vega(self.vega.waveset))
            #alljansky = self.toJansky(allfluxes,allwaves) / 1.e6
            # Normalize the Vega counts at all wavelengths by the value at the pivot
            # wavelength
            allratio = alljansky.value / allcounts / ratio / self.pixel_area_sr

            #print(np.min(allwaves),np.max(allwaves),allwaves[0],allwaves[-1])
            #f,a = plt.subplots()
            #a.plot(allwaves,allratio,color='red')
            #a.set_xlabel('Wavelength ({})'.format(bp.waveset.unit))
            #a.set_ylabel('Normalized Ratio (MJy/str per count/sec)')
            #a.set_title("{}, Order {}".format(filter.upper(),order))
            #f.savefig(os.path.split(file)[-1]+'_allratios.pdf')

            #f,a = plt.subplots()
            #a.plot(allwaves,alljansky,color='red')
            #a.set_xlabel('Wavelength ({})'.format(bp.waveset.unit))
            #a.set_ylabel('Vega Flux (MJy)')
            #a.set_title("{}, Order {}".format(filter.upper(),order))
            #f.savefig(os.path.split(file)[-1]+'_alljansky.pdf')

            #f,a = plt.subplots()
            #a.plot(allwaves,allcounts,color='red')
            #a.set_xlabel('Wavelength ({})'.format(bp.waveset.unit))
            #a.set_ylabel('Counts (count/sec)')
            #a.set_title("{}, Order {}".format(filter.upper(),order))
            #f.savefig(os.path.split(file)[-1]+'_allcounts.pdf')

            if np.min(allcounts) < 0:
                print('WARNING: counts<0 for {},{}'.format(
                    filter.upper(), order))
                stop

            if '444' in filter:
                print("")
                print('444!!!!!!')
                print(allratio.value)
                print(len(allratio.value))
                print(type(allratio.value))
                #bad = allratio.value > 1e5
                bad = np.where(allratio.value > 1e5)
                print(len(bad[0]))
                print(type(bad[0]))
                print(alljansky.value[bad[0][0:10]])
                print(allcounts[bad[0][0:10]])
                print(ratio)
                print(self.str_per_detector)
                print(allratio.value[bad[0][0:10]])
                print(allwaves)
                stop

            # Pad allwaves and allratio to be the length needed by the
            # photom reference file. Also convert wavelengths to microns.
            allwaves = np.pad(allwaves / 1.e4, (0, self.maxlen - nelem),
                              'constant')
            allratio = np.pad(allratio, (0, self.maxlen - nelem), 'constant')

            print(allwaves[100:105])
            print(allcounts[100:105])
            print(alljansky[100:105])
            print(alljansky[100:105] / allcounts[100:105])
            print(ratio)
            print(allratio[100:105])

            print("******************")
            print("******************")
            print("******************")
            print("need real conversion factor and uncertainty!!!")
            conversion_factor = 1000.
            uncertainty = ratio * .1
            #row = [filter,pupil,np.int(order),ratio*conversion_factor,uncertainty,nelem,allwaves,allratio]

            # Populate lists that will be used to create the final table
            filters = np.append(filters, filter)
            pupils = np.append(pupils, pupil)
            orders = np.append(orders, np.int(order))
            fluxes = np.append(fluxes, ratio * conversion_factor)
            uncs = np.append(uncs, uncertainty)
            nelems = np.append(nelems, nelem)

            print(allwaves.shape)

            if len(waves) == 0:
                waves = allwaves
                waves = np.expand_dims(waves, axis=0)
                resps = allratio
                resps = np.expand_dims(resps, axis=0)
            else:
                waves = np.append(waves,
                                  np.expand_dims(allwaves, axis=0),
                                  axis=0)
                resps = np.append(resps,
                                  np.expand_dims(allratio, axis=0),
                                  axis=0)

        print('waves.shape', waves.shape)
        print('resps.shape', resps.shape)

        print(filters)
        print(pupils)
        print(orders)
        print(fluxes)
        print(uncs)
        print(nelems)
        print(waves[0, 40:45])
        print(resps[0, 40:45])

        # Zip all data together
        alldata = np.array(zip(np.array(filters), np.array(pupils),
                               np.array(fluxes), np.array(uncs),
                               np.array(orders), np.array(nelems),
                               np.array(waves), np.array(resps)),
                           dtype=self.model.phot_table.dtype)

        return alldata
Ejemplo n.º 7
0
    def p_functioncall(self, tree):
        # Where all the real interpreter action is.
        # Note that things that should only be done at the top level
        # are performed in :func:`interpret` defined below.
        """ V ::= function_call ( V LPAREN V RPAREN ) """
        if not isinstance(tree[2].value, list):
            args = [tree[2].value]
        else:
            args = tree[2].value

        fname = tree[0].value
        metadata = {'expr': '{0}{1}'.format(fname, tuple(args))}

        if fname not in _SYFUNCTIONS:
            log.error('Unknown function: {0}'.format(fname))
            self.error(fname)

        else:
            # Constant spectrum
            if fname == 'unit':
                if args[1] not in _SYFORMS:
                    log.error('Unrecognized unit: {0}'.format(args[1]))
                    self.error(fname)
                try:
                    fluxunit = units.validate_unit(args[1])
                    tree.value = SourceSpectrum(ConstFlux1D,
                                                amplitude=args[0] * fluxunit,
                                                meta=metadata)
                except NotImplementedError as e:
                    log.error(str(e))
                    self.error(fname)

            # Black body
            elif fname == 'bb':
                tree.value = SourceSpectrum(BlackBodyNorm1D,
                                            temperature=args[0])

            # Power law
            elif fname == 'pl':
                if args[2] not in _SYFORMS:
                    log.error('Unrecognized unit: {0}'.format(args[2]))
                    self.error(fname)
                try:
                    fluxunit = units.validate_unit(args[2])
                    tree.value = SourceSpectrum(PowerLawFlux1D,
                                                amplitude=1 * fluxunit,
                                                x_0=args[0],
                                                alpha=-args[1],
                                                meta=metadata)
                except (synexceptions.SynphotError, NotImplementedError) as e:
                    log.error(str(e))
                    self.error(fname)

            # Box throughput
            elif fname == 'box':
                tree.value = SpectralElement(Box1D,
                                             amplitude=1,
                                             x_0=args[0],
                                             width=args[1],
                                             meta=metadata)

            # Source spectrum from file
            elif fname == 'spec':
                tree.value = SourceSpectrum.from_file(irafconvert(args[0]))
                tree.value.meta.update(metadata)

            # Passband
            elif fname == 'band':
                tree.value = spectrum.band(tree[2].svalue)
                tree.value.meta.update(metadata)

            # Gaussian emission line
            elif fname == 'em':
                if args[3] not in _SYFORMS:
                    log.error('Unrecognized unit: {0}'.format(args[3]))
                    self.error(fname)
                x0 = args[0]
                fluxunit = units.validate_unit(args[3])
                totflux = units.convert_flux(x0, args[2] * fluxunit,
                                             units.PHOTLAM).value
                tree.value = SourceSpectrum(GaussianFlux1D,
                                            total_flux=totflux,
                                            mean=x0,
                                            fwhm=args[1])

            # Catalog interpolation
            elif fname == 'icat':
                tree.value = grid_to_spec(*args)

            # Renormalize source spectrum
            elif fname == 'rn':
                sp = args[0]
                bp = args[1]
                fluxunit = units.validate_unit(args[3])
                rnval = args[2] * fluxunit

                if not isinstance(sp, SourceSpectrum):
                    sp = SourceSpectrum.from_file(irafconvert(sp))

                if not isinstance(bp, SpectralElement):
                    bp = SpectralElement.from_file(irafconvert(bp))

                # Always force the renormalization to occur: prevent exceptions
                # in case of partial overlap. Less robust but duplicates
                # IRAF SYNPHOT. Force the renormalization in the case of
                # partial overlap, but raise an exception if the spectrum and
                # bandpass are entirely disjoint.
                try:
                    tree.value = sp.normalize(rnval,
                                              band=bp,
                                              area=conf.area,
                                              vegaspec=spectrum.Vega)
                except synexceptions.PartialOverlap:
                    tree.value = sp.normalize(rnval,
                                              band=bp,
                                              area=conf.area,
                                              vegaspec=spectrum.Vega,
                                              force=True)
                    tree.value.warnings = {
                        'force_renorm': ('Renormalization exceeds the limit '
                                         'of the specified passband.')
                    }
                tree.value.meta.update(metadata)

            # Redshift source spectrum (flat spectrum if fails)
            elif fname == 'z':
                sp = args[0]

                # ETC generates junk (i.e., 'null') sometimes
                if isinstance(sp, str) and sp != 'null':
                    sp = SourceSpectrum.from_file(irafconvert(sp))

                if isinstance(sp, SourceSpectrum):
                    tree.value = sp
                    tree.value.z = args[1]
                else:
                    tree.value = SourceSpectrum(ConstFlux1D, amplitude=1)

                tree.value.meta.update(metadata)

            # Extinction
            elif fname == 'ebmvx':
                try:
                    tree.value = spectrum.ebmvx(args[1], args[0])
                except synexceptions.SynphotError as e:
                    log.error(str(e))
                    self.error(fname)
                tree.value.meta.update(metadata)

            # Default
            else:
                tree.value = ('would call {0} with the following args: '
                              '{1}'.format(fname, repr(args)))
Ejemplo n.º 8
0
def flux2ABmag(wave, flux, band, mag, plot=False):

    # "band" should be either "sdss_g" or "V" for the moment

    # The input "wave" should be in nm

    # Define bandpass:
    if band == "sdss_g":
        bp = SpectralElement.from_file('../utility/photometry/g.dat')
        magvega = -0.08
    elif band == "V":
        bp = SpectralElement.from_filter('johnson_v')
        magvega = 0.03

    # SDSS g-band magnitude of Vega
    #gmagvega=-0.08

    # Read the spectrum of Vega
    sp_vega = SourceSpectrum.from_vega()
    wv_vega = sp_vega.waveset
    fl_vega = sp_vega(wv_vega, flux_unit=units.FLAM)

    ## Convolve with the bandpass
    obs_vega = Observation(sp_vega, bp)

    ## Integrated flux
    fluxtot_vega = obs_vega.integrate()

    # Read the synthetic spectrum
    sp = SourceSpectrum(Empirical1D, points = wave * 10., \
                        lookup_table=flux*units.FLAM)
    wv = sp.waveset
    fl = sp(wv, flux_unit=units.FLAM)

    ## Convolve with the bandpass
    obs = Observation(sp, bp, force='extrap')

    ## Integrated g-band flux
    fluxtot = obs.integrate()

    # Scaling factor to make the flux compatible with the desired magnitude
    dm = mag - magvega
    const = fluxtot_vega * 10**(-0.4 * dm) / fluxtot

    # Scale the original flux by const
    fl_scale = const * fl

    # Convert to ABmag
    fl_scale_mag = units.convert_flux(wv, fl_scale, out_flux_unit='abmag')

    sp_scaled_mag = SourceSpectrum(Empirical1D,
                                   points=wv,
                                   lookup_table=fl_scale_mag)

    # Plot
    if plot == True:
        fig, ax = plt.subplots(2, 1, sharex=True)
        ax[0].plot(wave * 10., flux, linestyle="-", marker="")
        ax[1].plot(wv[1:-1],
                   sp_scaled_mag(wv, flux_unit=u.ABmag)[1:-1],
                   linestyle="-",
                   marker="")
        ax[1].set_xlabel("Angstrom")
        ax[0].set_ylabel("$F_{\lambda}$")
        ax[1].set_ylabel("ABmag")
        ax[1].set_ylim(mag + 3., mag - 2.0)
        #plt.show()

    return (wv[1:-1] / 10., sp_scaled_mag(wv, flux_unit=u.ABmag)[1:-1])