示例#1
0
    def show_plot(self, output_file=None):
        """
        Creates the plot of the black body or multiple black bodies.
        Saves it under the specified name.
        """
        fig = plt.figure()
        ax = fig.add_subplot(111)
        adjustprops = dict(left=0.19,bottom=0.15,right=0.92,top=0.9,wspace=0.,hspace=0.2)
        fig.subplots_adjust(**adjustprops)

        # Setting the labels for the X and Y axis
        ax.set_xlabel(r'$Wavelength \, [\AA]$', size=15, labelpad=20)
        ax.set_ylabel(r'$Flux \, [\mathrm{erg\, \AA\, cm^{-2}\, s^{-1}\, sr^{-1}}]$', size=15)

        ax.minorticks_on()
        ax.grid()

        # We will create the plot for wavelengths of 1nm up to 1um
        wavelength = np.arange(10, 10000, 10)

        with np.errstate(all='ignore'):
            for bb in self.bbs:
                ax.plot(wavelength, blackbody_lambda(wavelength, bb.T).value, color=np.random.rand(3,1), linewidth=3, linestyle="-", label=bb.T)
        plt.legend()

        plt.title('Flux vs Wavelength of BlackBody at %s K' % self.temps, y=1.04)
        fig.show()
        if output_file:
            plt.savefig("plots/"+output_file)
示例#2
0
    def evaluate(self, x, temp, norm):
        """
        Evaluate the blackbody for a given temperature over a wavelength range.

        Parameters
        ----------
        x: numpy.ndarray
            The wavelengths to evaulate over.

        temp: float
            The temperature to evualate at.

        norm: float
            The normalization factor.

        Returns
        -------
        blackbody_flux: numpy.ndarray
            The blackbody flux.
        """
        # x is passed as a bare numpy array; must be
        # converted back to Quantity before calling
        # astropy's black body functions.
        _x_u = x * self.wave.unit

        # convert result of the Planck function to
        # flux density in the same units as the data.
        _flux = (blackbody_lambda(_x_u, temp) * u.sr).to(self.flux.unit)

        # normalize and return just the values,
        # to conform to the Model API.
        return (norm * _flux).value
示例#3
0
    def show_plot(self, output_file=None):
        """
        Creates a plot of Flux vs Wavelength using matplotlib.
        Wavelength range is set from 1nm to 1um.
        If an output file is passed it will save it in the current working
        directory under the given name.
        """
        fig = plt.figure()
        ax = fig.add_subplot(111)
        adjustprops = dict(left=0.19,bottom=0.15,right=0.92,top=0.9,wspace=0.,hspace=0.2)
        fig.subplots_adjust(**adjustprops)

        ax.set_xlabel(r'$Wavelength \, [\AA]$', size=15, labelpad=20)
        ax.set_ylabel(r'$Flux \, [\mathrm{erg\, \AA, cm^{-2}\, s^{-1}, sr^{-1}}]$', size=15)

        ax.minorticks_on()
        ax.grid()

        # We will create the plot for wavelengths of 1nm up to 1um
        wavelength = np.arange(10, 10000, 10)

        spectrum = blackbody_lambda(wavelength, self.T).value

        ax.plot(wavelength, spectrum, color="red", linewidth=3, linestyle="-")
        plt.title('Flux vs Wavelength of BlackBody at %d K' % self.T.value)
        fig.show()
        if output_file:
            plt.savefig("plots/"+output_file)
示例#4
0
    def _generate_photosphere_part(self):
        """generate the photospheric input spectrum part of the Kromer plot"""

        Lph = (af.blackbody_lambda(self.mdl.spectrum_wave, self.mdl.t_inner) *
               4 * np.pi**2 * self.mdl.R_phot**2 * units.sr).to("erg / AA / s")

        self.ax.plot(self.mdl.spectrum_wave, Lph, color="red", ls="dashed")
示例#5
0
 def obs_flux(self, wavelength, T=None):
     """
     Calculates the observed flux for a certain wavelength or wavelength range.
     """
     if T:
         if not isinstance(T, u.Quantity):
             T = u.Quantity(T, u.K)
         self.T = T
     return blackbody_lambda(wavelength, self.T) * (R_sun / (10 * u.parsec).to(u.m))**2
示例#6
0
def flux_filter(central_wavelength, bandwidth, temperature):

	domain = (np.logspace(np.log10(central_wavelength - bandwidth / 2), np.log10(central_wavelength + bandwidth / 2), num=100) * u.AA).to(u.meter)
	flux = blackbody_lambda(domain, temperature)
        flux = flux.to(SI)
        with np.errstate(all='ignore'):
	    flux_earth = flux * (R / r) ** 2 #flux recieved on earth
	area = trapz(flux_earth.value, domain) * np.pi * (u.W / u.meter ** 2)

	return area
示例#7
0
    def fluxes(self, start=0, wavemax=-1):
        if(wavemax < 0):
            wavemax = (const.b_wien / self.kelvin).to(u.AA)  # Wien's displacement law
        else:
            wavemax = wavemax * u.AA

        waveset = np.logspace(start, np.log10(wavemax.value + 10 * wavemax.value), num=1000) * u.AA
        with np.errstate(all='ignore'):
            flux = blackbody_lambda(waveset, self.kelvin)
        return waveset, flux
示例#8
0
    def fluxes(self, start=0, wavemax=-1):
        if (wavemax < 0):
            wavemax = (const.b_wien / self.kelvin).to(
                u.AA)  # Wien's displacement law
        else:
            wavemax = wavemax * u.AA

        waveset = np.logspace(start,
                              np.log10(wavemax.value + 10 * wavemax.value),
                              num=1000) * u.AA
        with np.errstate(all='ignore'):
            flux = blackbody_lambda(waveset, self.kelvin)
        return waveset, flux
示例#9
0
    def evaluate(self, x, temp, norm):
        # x is passed as a bare numpy array; must be
        # converted back to Quantity before calling
        # astropy's black body functions.
        _x_u = x * self.wave.unit

        # convert result of the Planck function to
        # flux density in the same units as the data.
        _flux = (blackbody_lambda(_x_u, temp) * u.sr).to(self.flux.unit)

        # normalize and return just the values,
        # to conform to the Model API.
        return (norm * _flux).value
示例#10
0
    def evaluate(self, x, temp, norm):
        # x is passed as a bare numpy array; must be
        # converted back to Quantity before calling
        # astropy's black body functions.
        _x_u = x * self.wave.unit

        # convert result of the Planck function to
        # flux density in the same units as the data.
        _flux = (blackbody_lambda(_x_u, temp) * u.sr).to(self.flux.unit)

        # normalize and return just the values,
        # to conform to the Model API.
        return (norm * _flux).value
示例#11
0
def light_curve(planet,
                temp_map,
                spectral_array,
                parameters,
                use_tidal_distortion=False,
                nbins=None):
    
    prot, trad_EQ, Tn, albedo = parameters
    prot, trad_EQ, Tn, albedo = N.meshgrid(prot, trad_EQ, Tn, albedo)

    if use_tidal_distortion:
        tidal_axes = planet.tidal_deformation_axes()
        ellipticity = N.sqrt(1-(tidal_axes['y']/tidal_axes['x'])**2)
    else:
        ellipticity = 0
    
    # The spectral array will have two columns, one for the wavelengths and one for the spectral response in units proportional to energy (i.e. electron count "weighted" by the wavelength).
    num_waves = N.shape(spectral_array)[0]
    if (nbins == None) or (nbins == num_waves):
        wavelengths = N.array(spectral_array['wavelength']) * U.um
        responses = N.array(spectral_array['weighted_spectrum']) / U.eV
    else:
        wavelengths = N.array([N.average(spectral_array['wavelength'][i*num_waves/nbins:(i+1)*num_waves/nbins]) for i in range(nbins)]) * U.um
        responses = N.array([N.average(spectral_array['weighted_spectrum'][i*num_waves/nbins:(i+1)*num_waves/nbins]) for i in range(nbins)]) / U.eV

    #Mask for the times where the planet is in transit.
    transit_flag = planet.calculate_occultation(planet.times)['transit flag']

    #Calculate the planet-star cross-sectional area ratio.
    area_ratio = (planet.rp/planet.R)**2
    
    #For each wavelength bin in the instrumental response curve, calculate the corresponding ratio of planet-star flux. FUN TIP: some Numpy methods like "einsum", while useful, will REMOVE Astropy units from your arrays, so be careful to preserve them using the .unit method to re-unit your data.
    for k, (l, E) in enumerate(zip(wavelengths, responses)):
        
        specific_luminosity = l * observable_luminosity(planet, temp_map, l, prot, ellipticity)['integrated'] * E
        if k == 0:
            planet_luminosity = N.einsum('t...->...t', specific_luminosity)
        else:
            planet_luminosity += N.einsum('t...->...t', specific_luminosity)
            
        specific_stellar_luminosity = l * blackbody_lambda(l, planet.Teff) * N.pi*U.sr*planet.R**2 * E
        if k == 0:
            stellar_luminosity = specific_stellar_luminosity
        else:
            stellar_luminosity += specific_stellar_luminosity

        observed_stellar_luminosity = stellar_luminosity * (1 - area_ratio*transit_flag)
            
        bandpass_ratio = (planet_luminosity*specific_luminosity.unit+observed_stellar_luminosity) / stellar_luminosity.to(specific_luminosity.unit)
        return bandpass_ratio
示例#12
0
def blackbody_filter(wave_centre, wave_bandwidth, temperature):
	# Funtion for calculating flux of different filters recieved by detectors on the earth

	waveset_filter = np.linspace((wave_centre - wave_bandwidth / 2), (wave_centre + wave_bandwidth / 2), num=100) * u.AA
	waveset_filter_SI = waveset_filter.to(SI_convert_m)

	with np.errstate(all='ignore'):
	    flux_filter = blackbody_lambda(waveset_filter, temperature)

	# Amount of flux recieved by a detector on Earth
	flux_earth = flux_filter * (R / r) ** 2

	flux_filter_SI = flux_filter.to(SI_convert)
	flux_earth_SI = flux_filter_SI * (R / r) ** 2
	
	#area under curve gives the total radiation recieved per steradian
	area = trapz(flux_earth_SI.value, waveset_filter_SI.value) * np.pi * (u.W / u.meter ** 2)

	return area
示例#13
0
文件: flux.py 项目: akshayah3/Flux
def fluxcalculator(temp):
    """Given a temperature this functions plots a graph between the flux emitted and a set of wavelengths"""

    temp = temp * u.K
    wavemax = (const.b_wien / temp).to(u.AA)  # Wien's displacement law
    waveset = np.logspace(
        0, np.log10(wavemax.value + 10 * wavemax.value), num=100) * u.AA
    with np.errstate(all='ignore'):
        flux = blackbody_lambda(waveset, temp)

    #Plotting

    fig, ax = plt.subplots(figsize=(8, 8))
    ax.plot(waveset.value, flux.value)
    ax.axvline(wavemax.value, ls='--')
    ax.get_yaxis().get_major_formatter().set_powerlimits((0, 1))
    ax.set_xlabel(r'$\lambda$ ({0})'.format(waveset.unit))
    ax.set_ylabel(r'$B_{\lambda}(T)$' + ' ' + '({0})'.format(flux.unit))
    ax.set_title('Blackbody, T = {0}'.format(temp))
    plt.show(block=True)
示例#14
0
文件: flux.py 项目: akshayah3/Flux
def fluxcalculator(temp):
    """Given a temperature this functions plots a graph between the flux emitted and a set of wavelengths"""      

    temp = temp * u.K
    wavemax = (const.b_wien / temp).to(u.AA)  # Wien's displacement law
    waveset = np.logspace(
    0, np.log10(wavemax.value + 10*wavemax.value), num=100) * u.AA
    with np.errstate(all='ignore'):
        flux = blackbody_lambda(waveset, temp)

    #Plotting
    
    fig, ax = plt.subplots(figsize=(8, 8))
    ax.plot(waveset.value, flux.value)
    ax.axvline(wavemax.value, ls='--')
    ax.get_yaxis().get_major_formatter().set_powerlimits((0, 1))
    ax.set_xlabel(r'$\lambda$ ({0})'.format(waveset.unit))
    ax.set_ylabel(r'$B_{\lambda}(T)$' + ' ' + '({0})'.format(flux.unit))
    ax.set_title('Blackbody, T = {0}'.format(temp))
    plt.show(block=True)
def blackbody_residual(params, x, data):
    """Using lmfit to fit black body.

    Blackbody_lambda function parameters are:
    Parameters
    ----------
    in_x : number, array-like, or Quantity
        Frequency, wavelength, or wave number. If not a Quantity, it is assumed to be in Angstrom.
    temperature : number, array-like, or Quantity
        Blackbody temperature. If not a Quantity, it is assumed to be in Kelvin.

    Returns
    -------
    flux: Quantity
        Blackbody monochromatic flux in ergcm−2s−1A˚−1sr−1.
    """
    temp = params["temp"]
    scale = params["scale"]
    model = blackbody_lambda(x * u_nm, temp * u_k) * scale
    return data - model.value
示例#16
0
def observable_luminosity(planet,
                          temp_map,
                          wavelength,
                          prot   = N.array([10])*U.h,
                          ellipticity = 0,
                          contrast_ratio=False):
    
    #Calculates an array of specific intensities. This is the Planck function.
    #intensity = blackbody_lambda(wavelength, temp_map)
    intensity = (2 * C.h * C.c**2) / (wavelength**5) / (N.exp((C.h * C.c)/(wavelength * C.k_B * temp_map)) - 1) / U.sr
    
    #Since we're calling the phi values directly they need to be in assumed radian values. Theta only goes into trig functions so it just needs some angle unit.
    theta_start = planet.theta_range[:-1]
    theta_end = planet.theta_range[1:]
    
    #Function to calculate the cell areas as a function of phi and theta, given the resolution specified.
    if ellipticity > 0:
        a2 = planet.rp**2 / (1-ellipticity**2)

        subsolar_longitude = planet.subsolar_longitude(planet.times, prot[...,N.newaxis])

        phi_start = (planet.phi_range[:-1] - subsolar_longitude[...,N.newaxis]).to(U.rad)
        phi_end = (planet.phi_range[1:] - subsolar_longitude[...,N.newaxis]).to(U.rad)

        #The phi array for the elliptical case has dimensions of ((parameters), time, lon), and theta is just an array of latitudes, so we need to add enough extra dimensions to theta to have them broadcast.
        theta_start_expand = theta_start.reshape((1,)*phi_start.ndim + N.shape(theta_start))
        theta_end_expand = theta_end.reshape((1,)*phi_end.ndim + N.shape(theta_end))

        #The ellipticity array has dimensions of the parameter array, so it needs 3 more for time (t), longitude (v), and latitude (u).
        ep = ellipticity.reshape(N.shape(ellipticity) + (1,)*3)

        #The phi array needs just one more dimension, for latitude (u).
        phi_start_expand = phi_start[...,N.newaxis]
        phi_end_expand = phi_end[...,N.newaxis]

        #Integrals for the surface area of a cell at a given lat/lon, based on the prolate geometry.
        def spheroid_integrand(phi, theta):
            cosp2 = N.cos(phi)**2
            sinp2 = N.sin(phi)**2
            cost2 = N.cos(theta)**2
            sint2 = N.sin(theta)**2
            zeta2 = 1 - ep**2
            ep2 = ep**2
            return N.sqrt( cost2*(1 - ep2*(2-sint2) + ep2**2 * (cost2 + sint2**2*sinp2*cosp2)) )

        def integrate_area(integrand, p1, t1, n):
            def integrate_p(integrand, t, p1, n):
                dp = 2*N.pi*U.rad / planet.longitude_resolution
                p_span = N.linspace(dp/(2*n), dp*(1-1/(2*n)), n)
                p = p1[...,N.newaxis,N.newaxis] + p_span
                integrand_p = integrand(p, t)
                return N.sum(integrand_p, axis=-1)*dp/n
            
            dt = N.pi*U.rad / planet.latitude_resolution
            t_span = N.linspace(dt/(2*n), dt*(1-1/(2*n)), n)
            t = (t1[...,N.newaxis] + t_span)[...,N.newaxis]
            integrand_t = integrate_p(integrand, t, p1, n)
            return N.sum(integrand_t, axis=-1)*dt/n
        
        solid_angle = integrate_area(spheroid_integrand, phi_start_expand, theta_start_expand, n=10)
        
        cell_area = a2 * N.einsum('...tvu->tuv...', solid_angle) * U.rad**2
        
    else:
        phi_start = planet.phi_range[:-1]
        phi_end = planet.phi_range[1:]
        phi_term = ((phi_end - phi_start)/U.rad).decompose()

        theta_term = N.sin(theta_end) - N.sin(theta_start)
        
        cell_area = (phi_term[...,N.newaxis] * theta_term).T * U.rad**2 * planet.rp**2

    phi_step = float(planet.phi_range[1]/U.rad - planet.phi_range[0]/U.rad)
    cell_area_spherical = N.array([phi_step * (N.sin(theta_end) - N.sin(theta_start))]*(planet.longitude_resolution)).T * U.rad**2 * planet.rp**2
    
    #Calculates the luminosity per cell at a given time and desired wavelength, and then an integrated luminosity over the visible area at the given time.

    if ellipticity > 0:
        luminosity = N.einsum('tuv...->...tuv', intensity[:,:-1,:-1,...] * visibility(planet, prot) * N.abs(cell_area)) * intensity.unit * cell_area.unit
        luminosity2 = N.einsum('tuv...->...tuv', intensity[:,:-1,:-1,...] * visibility(planet, prot)) * intensity.unit * N.abs(cell_area_spherical)
        
    else:
        luminosity = N.einsum('tuv...->...tuv', intensity[:,:-1,:-1,...] * visibility(planet, prot)) * intensity.unit * cell_area
        
    integrated_luminosity = N.nansum(N.einsum('...tuv->tuv...', luminosity), axis=(1,2)) * luminosity.unit
    
    #contrast_ratio sets the luminosities in ratios of the stellar luminosity at the desired wavelength.
    if contrast_ratio:
        stellar_luminosity = blackbody_lambda(wavelength, planet.Teff) * N.pi*U.sr*planet.R**2
        return {'map': luminosity/stellar_luminosity, 'integrated': integrated_luminosity/stellar_luminosity}
    else:
        return {'map': luminosity, 'integrated': integrated_luminosity}
def Analyze(
    fileList,
    primary_vsini,
    badregions=[],
    interp_regions=[],
    extensions=True,
    resolution=None,
    trimsize=1,
    vsini_values=(10,),
    Tvalues=range(3000, 6100, 100),
    metal_values=(0.0,),
    logg_values=(4.5,),
    max_vsini=None,
    hdf5_file=StellarModel.HDF5_FILE,
    addmode="ML",
    output_mode="hdf5",
    output_file="Sensitivity.hdf5",
    vel_list=range(-400, 450, 50),
    tolerance=5.0,
    rerun=False,
    debug=False,
):
    """
    This function runs a sensitivity analysis using the same methodology as GenericSearch.companion_search.
    Most of the parameters are the same, with the exception of the ones listed below:

    Parameters:
    ===========
    - max_vsini:         float
                         The maximum vsini (in km/s) that we search. If it is given and less than
                         any of the vsini_values, then the model we correlate against has this vsini. For example,
                         if one of the vsini_values is 150 km/s and the max_vsini is 40 km/s, then a 150 km/s model
                         will be added to the data, but we use a 40 km/s model to correlate against the result.

    - vel_list:          list of floats
                         The list of radial velocities to add the model to the data with.
                         This provides for several independent(-ish) tests of the sensitivity

    - tolerance:         float
                         How close the highest CCF peak needs to be to the correct velocity
                         to count as a detection

    - rerun:             boolean
                         If output_mode=hdf5, check to see if the current parameters have
                         already been checked before running.
    """

    model_list = StellarModel.GetModelList(
        type="hdf5", hdf5_file=hdf5_file, temperature=Tvalues, metal=metal_values, logg=logg_values
    )
    modeldict, processed = StellarModel.MakeModelDicts(
        model_list, type="hdf5", hdf5_file=hdf5_file, vsini_values=vsini_values, vac2air=True, logspace=True
    )

    get_weights = True if addmode.lower() == "weighted" else False

    MS = SpectralTypeRelations.MainSequence()

    # Do the cross-correlation
    datadict = defaultdict(list)
    alpha = 0.0
    for temp in sorted(modeldict.keys()):
        for gravity in sorted(modeldict[temp].keys()):
            for metallicity in sorted(modeldict[temp][gravity].keys()):
                for vsini_sec in vsini_values:
                    if debug:
                        logging.info(
                            "T: {}, logg: {}, [Fe/H]: {}, vsini: {}".format(temp, gravity, metallicity, vsini_sec)
                        )
                    # broaden the model
                    model = modeldict[temp][gravity][metallicity][alpha][vsini_sec].copy()
                    broadened = Broaden.RotBroad(model, vsini_sec * units.km.to(units.cm), linear=True)
                    if resolution is not None:
                        broadened = FittingUtilities.ReduceResolutionFFT(broadened, resolution)
                    if max_vsini is not None and max_vsini < vsini_sec:
                        search_model = Broaden.RotBroad(model, vsini_sec * units.km.to(units.cm), linear=True)
                        if resolution is not None:
                            search_model = FittingUtilities.ReduceResolutionFFT(search_model, resolution)
                    else:
                        search_model = broadened.copy()

                    # Make an interpolator function
                    bb_flux = blackbody_lambda(broadened.x * units.nm, temp)
                    idx = np.where(broadened.x > 700)[0]
                    s = np.median(broadened.y[idx] / bb_flux[idx])
                    broadened.cont = bb_flux * s
                    modelfcn = interp(broadened.x, broadened.y / broadened.cont)

                    for i, (fname, vsini_prim) in enumerate(zip(fileList, primary_vsini)):
                        # Read in data
                        process_data = False if fname in datadict else True
                        if process_data:
                            orders_original = HelperFunctions.ReadExtensionFits(fname)
                            orders_original = GenericSearch.Process_Data(
                                orders_original,
                                badregions=badregions,
                                interp_regions=[],
                                trimsize=trimsize,
                                vsini=None,
                                reject_outliers=False,
                                logspacing=False,
                            )

                            datadict[fname] = orders_original
                        else:
                            orders_original = datadict[fname]

                        header = fits.getheader(fname)
                        starname = header["OBJECT"]
                        date = header["DATE-OBS"].split("T")[0]

                        components = get_companions(starname)
                        print(components)
                        primary_temp = components["temperature"]
                        primary_radius = components["radius"]
                        primary_mass = components["mass"]
                        secondary_spt = MS.GetSpectralType("temperature", temp)[0]
                        secondary_radius = MS.Interpolate("radius", secondary_spt)
                        secondary_mass = MS.Interpolate("mass", secondary_spt)

                        for rv in vel_list:
                            # Check if these parameters already exist
                            params = {
                                "velocity": rv,
                                "primary_temps": primary_temp,
                                "secondary_temp": temp,
                                "object": starname,
                                "date": date,
                                "primary_vsini": vsini_prim,
                                "secondary_vsini": vsini_sec,
                                "primary_masses": primary_mass,
                                "secondary_mass": secondary_mass,
                                "logg": gravity,
                                "[Fe/H]": metallicity,
                                "addmode": addmode,
                            }
                            if output_mode == "hdf5" and not rerun and check_existence(output_file, params):
                                continue

                            # Make a copy of the data orders
                            orders = [order.copy() for order in orders_original]

                            for ordernum, order in enumerate(orders):
                                # Get the flux ratio
                                prim_flux = 0.0
                                for ptemp, pR in zip(primary_temp, primary_radius):
                                    prim_flux += blackbody_lambda(order.x * units.nm, ptemp).cgs.value * pR
                                sec_flux = blackbody_lambda(order.x * units.nm, temp).cgs.value * secondary_radius
                                scale = sec_flux / prim_flux

                                # Add the model to the data
                                model_segment = (modelfcn(order.x * (1.0 - rv / lightspeed)) - 1.0) * scale
                                order.y += model_segment * order.cont

                                orders[ordernum] = order

                            # Process the data and model
                            orders = GenericSearch.Process_Data(
                                orders,
                                badregions=[],
                                interp_regions=interp_regions,
                                extensions=extensions,
                                trimsize=0,
                                vsini=vsini_prim,
                                logspacing=True,
                                reject_outliers=True,
                            )
                            model_orders = GenericSearch.process_model(
                                search_model.copy(),
                                orders,
                                vsini_model=vsini_sec,
                                vsini_primary=vsini_prim,
                                debug=debug,
                                logspace=False,
                            )

                            # Do the correlation
                            corr = Correlate.Correlate(
                                orders,
                                model_orders,
                                addmode=addmode,
                                outputdir="Sensitivity/",
                                get_weights=get_weights,
                                prim_teff=max(primary_temp),
                                debug=debug,
                            )
                            if debug:
                                corr, ccf_orders = corr

                            # Determine if we found the companion, and output
                            check_detection(corr, params, mode="hdf5", tol=tolerance, hdf5_file=output_file)

                    # Delete the model. We don't need it anymore and it just takes up ram.
                    modeldict[temp][gravity][metallicity][alpha][vsini_sec] = []

    return
    "lte{:05d}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(
        comp_temp))

unnorm_host_spec = Spectrum(flux=fits.getdata(host_phoenix), xaxis=phoenix_wl)
unnorm_comp_spec = Spectrum(flux=fits.getdata(comp_phoenix), xaxis=phoenix_wl)

# Waveleght limits. The result is sensitive to these limits.
min_wav = 500
max_wav = 3500

unnorm_host_spec.wav_select(min_wav, max_wav)
unnorm_comp_spec.wav_select(min_wav, max_wav)

# Black body from spectrum temp.
norm_host_spec = Spectrum(flux=unnorm_host_spec.flux /
                          blackbody_lambda(phoenix_wl * u_nm, host_temp),
                          xaxis=phoenix_wl)
norm_comp_spec = Spectrum(flux=unnorm_comp_spec.flux /
                          blackbody_lambda(phoenix_wl * u_nm, host_temp),
                          xaxis=phoenix_wl)

norm_host_spec.wav_select(min_wav, max_wav)
norm_comp_spec.wav_select(min_wav, max_wav)

plt.subplot(311)
plt.plot(norm_host_spec.xaxis, norm_host_spec.flux, label="Host")
plt.plot(norm_comp_spec.xaxis, norm_comp_spec.flux, label="Comp")
plt.title("Unnormalized")
plt.legend()

plt.subplot(312)
示例#19
0
 def flux(self, wl_in):
     energy_in_flat = blackbody_lambda(wl_in, self.temp)
     return self.factor * energy_in_flat
示例#20
0
from astropy import constants as const
from astropy import units as u
from astropy.analytic_functions import blackbody_lambda
from scipy.integrate import quad
from numpy import trapz

'''Plot of flux vs. wavelength for a black body of a given temperature'''

temperature = 10000.0 * u.K

# Wien's displacement law
wavemax = (const.b_wien / temperature).to(u.AA) 

waveset = np.logspace(0, np.log10(10 * wavemax.value), num=1000) * u.AA
with np.errstate(all='ignore'):
    flux = blackbody_lambda(waveset, temperature)

# Converting waveset and flux to SI units
SI_convert_m = u.meter
waveset_SI = waveset.to(SI_convert_m)
wavemax_SI = wavemax.to(SI_convert_m)

SI_convert = u.J / (u.second * (u.meter ** 2) * u.meter * u.sr)
flux_SI = flux.to(SI_convert)

print "We have considered a black body at a temperature of %s" %temperature
print "Wavelength at the maximum value of flux is %s or %s" %(wavemax_SI, wavemax)
print "Total radiated flux = %s" %(const.sigma_sb * (temperature ** 4))

# Plotting flux vs wavelength
fig, ax = plt.subplots(figsize=(8, 5))
示例#21
0
 def __init__(self, wl, flux, temp):
     super(BlackBody, self).__init__(wl, flux)
     self.temp = temp
     self.scale = flux / blackbody_lambda(wl, self.temp)
temperature= 6000 * u.K

#     Plots B as a function of lambda
#                        2 * h * c^2                   1
#     B ( lambda , T ) = ---------------  *  -----------------------------
#                           lambda^5          e^(h*c / (lambda*k_b*T) - 1


# wein's constant
b_wein= (2.8977729* 10**-3)*u.m*u.K

# finding the maximum wavelength using wein's formula
wavelength_max= (b_wein / temperature).to(u.AA)

# setting up the x axis upto the maximum wavelength
wavelengths=np.logspace(0
                       ,np.log10(wavelength_max.value + 10 * wavelength_max.value)
                       ,num=1000)* u.AA

#using the function blackbody_lambda from the astropy library
flux = blackbody_lambda(wavelengths, temperature)

# plot flux vs the wavelength
plt.plot(wavelengths.value, flux.value)

plt.xlabel('wavelength ( Angstroms )')

plt.ylabel('Flux ( ' + str(flux.unit) +' )' )

#show the plot
plt.show()
示例#23
0
    def test_flare_magnitudes_mixed_with_dummy(self):
        """
        Test that we get the expected magnitudes out
        """
        db = MLT_test_DB(database=self.db_name, driver='sqlite')

        # load the quiescent SEDs of the objects in our catalog
        sed_list = SedList(['lte028-5.0+0.5a+0.0.BT-Settl.spec.gz'] * 4,
                           [17.1, 17.2, 17.3, 17.4],
                           galacticAvList=[2.432, 1.876, 2.654, 2.364])

        bp_dict = BandpassDict.loadTotalBandpassesFromFiles()

        # calculate the quiescent fluxes of the objects in our catalog
        baseline_fluxes = bp_dict.fluxListForSedList(sed_list)

        bb_wavelen = np.arange(100.0, 1600.0, 0.1)
        bb_flambda = blackbody_lambda(bb_wavelen * 10.0, 9000.0)

        # this data is taken from the setUpClass() classmethod above
        t0_list = [456.2, 41006.2, 117.2, 10456.2]
        av_list = [2.432, 1.876, 2.654, 2.364]
        parallax_list = np.array([0.25, 0.15, 0.3, 0.22])
        distance_list = 1.0 / (206265.0 *
                               radiansFromArcsec(0.001 * parallax_list))
        distance_list *= 3.0857e16  # convert to cm

        dtype = np.dtype([('id', int), ('u', float), ('g', float)])

        photParams = PhotometricParameters()

        ss = Sed()

        quiet_cat_name = os.path.join(self.scratch_dir,
                                      'mlt_mixed_with_dummy_quiet_cat.txt')
        flare_cat_name = os.path.join(self.scratch_dir,
                                      'mlt_mixed_with_dummy_flaring_cat.txt')

        # loop over several MJDs and verify that, to within a
        # milli-mag, our flaring model gives us the magnitudes
        # expected, given the light curves specified in
        # setUpClass()
        for mjd in (59580.0, 60000.0, 70000.0, 80000.0):

            obs = ObservationMetaData(mjd=mjd)

            quiet_cat = QuiescentCatalog(db, obs_metadata=obs)
            quiet_cat.write_catalog(quiet_cat_name)

            flare_cat = FlaringCatalogDummy(db, obs_metadata=obs)
            flare_cat._mlt_lc_file = self.mlt_lc_name
            flare_cat.write_catalog(flare_cat_name)

            quiescent_data = np.genfromtxt(quiet_cat_name,
                                           dtype=dtype,
                                           delimiter=',')
            flaring_data = np.genfromtxt(flare_cat_name,
                                         dtype=dtype,
                                         delimiter=',')

            for ix in range(len(flaring_data)):
                obj_id = flaring_data['id'][ix]
                self.assertEqual(obj_id, ix)

                msg = (
                    'failed on object %d; mjd %.2f\n u_quiet %e u_flare %e\n g_quiet %e g_flare %e'
                    % (obj_id, mjd, quiescent_data['u'][obj_id],
                       flaring_data['u'][obj_id], quiescent_data['g'][obj_id],
                       flaring_data['g'][obj_id]))

                self.assertEqual(quiescent_data['id'][obj_id],
                                 flaring_data['id'][obj_id],
                                 msg=msg)
                self.assertAlmostEqual(ss.magFromFlux(
                    baseline_fluxes[obj_id][0]),
                                       quiescent_data['u'][obj_id],
                                       3,
                                       msg=msg)
                self.assertAlmostEqual(ss.magFromFlux(
                    baseline_fluxes[obj_id][1]),
                                       quiescent_data['g'][obj_id],
                                       3,
                                       msg=msg)
                if obj_id != 3:

                    # the models below are as specified in the
                    # setUpClass() method
                    if obj_id == 0 or obj_id == 1:
                        amp = 1.0e32
                        dt = 3652.5
                        t_min = flare_cat._survey_start - t0_list[obj_id]

                        tt = mjd - t_min
                        while tt > dt:
                            tt -= dt

                        u_flux = amp * (1.0 + np.power(np.sin(tt / 100.0), 2))
                        g_flux = amp * (1.0 + np.power(np.cos(tt / 100.0), 2))
                    elif obj_id == 2:
                        amp = 2.0e31
                        dt = 365.25
                        t_min = flare_cat._survey_start - t0_list[obj_id]

                        tt = mjd - t_min
                        while tt > dt:
                            tt -= dt
                        u_flux = amp * (1.0 + np.power(np.sin(tt / 50.0), 2))
                        g_flux = amp * (1.0 + np.power(np.cos(tt / 50.0), 2))

                    # calculate the multiplicative effect of dust on a 9000K
                    # black body
                    bb_sed = Sed(wavelen=bb_wavelen, flambda=bb_flambda)
                    u_bb_flux = bb_sed.calcFlux(bp_dict['u'])
                    g_bb_flux = bb_sed.calcFlux(bp_dict['g'])
                    a_x, b_x = bb_sed.setupCCMab()
                    bb_sed.addCCMDust(a_x, b_x, A_v=av_list[obj_id])
                    u_bb_dusty_flux = bb_sed.calcFlux(bp_dict['u'])
                    g_bb_dusty_flux = bb_sed.calcFlux(bp_dict['g'])

                    dust_u = u_bb_dusty_flux / u_bb_flux
                    dust_g = g_bb_dusty_flux / g_bb_flux

                    area = 4.0 * np.pi * np.power(distance_list[obj_id], 2)
                    tot_u_flux = baseline_fluxes[obj_id][
                        0] + u_flux * dust_u * photParams.effarea / area
                    tot_g_flux = baseline_fluxes[obj_id][
                        1] + g_flux * dust_g * photParams.effarea / area

                    self.assertAlmostEqual(ss.magFromFlux(tot_u_flux),
                                           flaring_data['u'][obj_id],
                                           3,
                                           msg=msg)
                    self.assertAlmostEqual(ss.magFromFlux(tot_g_flux),
                                           flaring_data['g'][obj_id],
                                           3,
                                           msg=msg)

                    self.assertGreater(np.abs(flaring_data['g'][obj_id] -
                                              quiescent_data['g'][obj_id]),
                                       0.001,
                                       msg=msg)
                    self.assertGreater(np.abs(flaring_data['u'][obj_id] -
                                              quiescent_data['u'][obj_id]),
                                       0.001,
                                       msg=msg)
                else:
                    self.assertAlmostEqual(flaring_data['g'][obj_id],
                                           quiescent_data['g'][obj_id] + 3 *
                                           (mjd - 59580.0) / 10000.0,
                                           3,
                                           msg=msg)
                    self.assertAlmostEqual(flaring_data['u'][obj_id],
                                           quiescent_data['u'][obj_id] + 2 *
                                           (mjd - 59580.0) / 10000.0,
                                           3,
                                           msg=msg)

        if os.path.exists(quiet_cat_name):
            os.unlink(quiet_cat_name)
        if os.path.exists(flare_cat_name):
            os.unlink(flare_cat_name)
示例#24
0
def run():
    arguments = docopt.docopt(__doc__)
    parameter_file = arguments['<parameter_file>']

    with open(parameter_file, 'r') as ymlfile:
        cfg = yaml.load(ymlfile)

    logger.info('WFC3Sim Started, parsing config file')

    threads = cfg['general']['threads']

    outdir = cfg['general']['outdir']
    params.outdir = outdir

    oec_path = cfg['general']['oec_location']
    if oec_path:
        exodb = exodata.OECDatabase(oec_path, stream=True)
    else:
        exodb = plc.oec_catalogue()

    if not os.path.exists(outdir):
        os.mkdir(outdir)

    # copy parfile to output
    shutil.copy2(parameter_file,
                 os.path.join(outdir, os.path.basename(parameter_file)))

    try:
        seed = cfg['general']['seed']
    except KeyError:
        seed = None
    if not seed or seed is None:
        np.random.seed(seed)
        params.seed = np.random.get_state()[1][
            0]  # tell params what the seed is for exp header
    else:
        np.random.seed(seed)

    grisms = {
        'G141': grism.G141(),
        'G102': grism.G102()
    }
    chosen_grism = grisms[cfg['observation']['grism']]
    det = detector.WFC3_IR()

    rebin_resolution = cfg['target']['rebin_resolution']

    # Check for transmission spectroscopy mode
    try:
        planet_spectrum = cfg['target']['planet_spectrum_file']
        shutil.copy2(planet_spectrum,
                     os.path.join(outdir, os.path.basename(planet_spectrum)))

        transmission_spectroscopy = True
    except KeyError:
        transmission_spectroscopy = False

    if transmission_spectroscopy:
        try:
            planet = exodb.planetDict[cfg['target']['name']]
        except KeyError:
            planet = cfg['target']['name']

        # modify planet params if given Note that exodata uses a different unit
        # package at present
        try:
            transittime = cfg['target']['transit_time'] * aq.JD
        except KeyError:
            transittime = None

        try:
            period = cfg['target']['period'] * aq.day
        except KeyError:
            period = None

        try:
            rp = cfg['target']['rp'] * aq.R_j
        except KeyError:
            rp = None

        try:
            sma = cfg['target']['sma'] * aq.au
        except KeyError:
            sma = None

        try:
            stellar_radius = cfg['target']['stellar_radius'] * aq.R_s
        except KeyError:
            stellar_radius = None

        try:
            inclination = cfg['target']['inclination'] * aq.deg
        except KeyError:
            inclination = None

        try:
            eccentricity = cfg['target']['eccentricity']
        except KeyError:
            eccentricity = None

        try:
            ldcoeffs = cfg['target']['ldcoeffs']
        except KeyError:
            ldcoeffs = None

        try:
            periastron = cfg['target']['periastron']
        except KeyError:
            periastron = None

        wl_planet, depth_planet = tools.load_and_sort_spectrum(planet_spectrum)
        wl_planet, depth_planet = np.array(
            tools.crop_spectrum(0.9, 1.8, wl_planet, depth_planet))

        wl_planet = wl_planet * u.micron

        if rebin_resolution:
            new_wl = tools.wl_at_resolution(
                rebin_resolution, chosen_grism.wl_limits[0].value,
                chosen_grism.wl_limits[1].value)

            depth_planet = tools.rebin_spec(wl_planet.value, depth_planet,
                                            new_wl)
            wl_planet = new_wl * u.micron

    else:
        depth_planet = None
        planet = None
        transittime = None
        period = None
        rp = None
        sma = None
        stellar_radius = None
        inclination = None
        eccentricity = None
        ldcoeffs = None
        periastron = None
        try:
            planet = exodb.planetDict[cfg['target']['name']]
        except KeyError:
            planet = cfg['target']['name']

    stellar_spec_file = cfg['target']['stellar_spectrum_file']
    if stellar_spec_file:
        wl_star, flux_star = tools.load_pheonix_stellar_grid_fits(
            stellar_spec_file)

        if transmission_spectroscopy:
            flux_star = tools.rebin_spec(wl_star, flux_star,
                                         np.array(wl_planet))
        elif rebin_resolution:  # not transmission spectro mode
            new_wl = tools.wl_at_resolution(
                rebin_resolution, chosen_grism.wl_limits[0].value,
                chosen_grism.wl_limits[1].value)

            flux_star = tools.rebin_spec(wl_star, flux_star, new_wl)
            wl_star = new_wl

        flux_units = u.erg / (u.angstrom * u.s * u.sr * u.cm ** 2)
        flux_star = flux_star * flux_units
    else:  # use blackbody
        if transmission_spectroscopy:
            flux_star = blackbody_lambda(wl_planet, planet.star.T)
        else:
            raise WFC3SimConfigError(
                "Must give the stellar spectrum if not using transmission spectroscopy")

    stellar_flux_scaled = flux_star * cfg['target']['flux_scale'] * u.sr

    if transmission_spectroscopy:
        wl = wl_planet
    else:
        wl = wl_star * u.micron

    x_ref = cfg['observation']['x_ref']
    y_ref = cfg['observation']['y_ref']
    NSAMP = cfg['observation']['NSAMP']
    SAMPSEQ = cfg['observation']['SAMPSEQ']
    SUBARRAY = cfg['observation']['SUBARRAY']

    # convert pq to u
    start_JD = cfg['observation']['start_JD'] * u.day
    num_orbits = cfg['observation']['num_orbits']

    spatial_scan = cfg['observation']['spatial_scan']

    if spatial_scan:
        sample_rate = cfg['observation']['sample_rate'] * u.ms
        scan_speed = cfg['observation']['scan_speed'] * (u.pixel / u.s)
    else:
        sample_rate = False
        scan_speed = False

    ssv_classes = {
        'sine': scan_speed_varations.SSVSine,
        'mod-sine': scan_speed_varations.SSVModulatedSine,
    }
    ssv_type = cfg['observation']['ssv_type']
    if ssv_type:
        try:
            ssv_class = ssv_classes[ssv_type]
        except KeyError:
            raise WFC3SimConfigError("Invalid ssv_type given")

        ssv_coeffs = cfg['observation']['ssv_coeffs']
        ssv_gen = ssv_class(*ssv_coeffs)
    else:
        ssv_gen = None

    x_shifts = cfg['observation']['x_shifts']
    x_jitter = cfg['observation']['x_jitter']
    y_shifts = cfg['observation']['y_shifts']
    y_jitter = cfg['observation']['y_jitter']

    noise_mean = cfg['observation']['noise_mean']
    noise_std = cfg['observation']['noise_std']

    add_dark = cfg['observation']['add_dark']
    add_flat = cfg['observation']['add_flat']
    add_gain_variations = cfg['observation']['add_gain_variations']
    add_non_linear = cfg['observation']['add_non_linear']
    add_read_noise = cfg['observation']['add_read_noise']
    add_stellar_noise = cfg['observation']['add_stellar_noise']
    add_initial_bias = cfg['observation']['add_initial_bias']

    sky_background = cfg['observation']['sky_background']
    cosmic_rate = cfg['observation']['cosmic_rate']

    clip_values_det_limits = cfg['observation']['clip_values_det_limits']

    try:
        exp_start_times = cfg['observation']['exp_start_times']
    except KeyError:
        exp_start_times = False

    if exp_start_times:  # otherwise we use the visit planner
        logger.info('Visit planner disabled: using start times from {}'.format(
            exp_start_times))
        exp_start_times = np.loadtxt(exp_start_times) * u.day

    # check to see if we have numbers of file paths, and load accordingly
    if isinstance(x_ref, str):
        x_ref = np.loadtxt(x_ref)

    if isinstance(y_ref, str):
        y_ref = np.loadtxt(y_ref)

    if isinstance(sky_background, str):
        sky_background = np.loadtxt(sky_background)
    sky_background = sky_background * u.count / u.s

    obs = observation.Observation(outdir)

    obs.setup_detector(det, NSAMP, SAMPSEQ, SUBARRAY)
    obs.setup_grism(chosen_grism)
    obs.setup_target(planet, wl, depth_planet, stellar_flux_scaled,
                     transittime,
                     ldcoeffs, period, rp, sma, inclination, eccentricity,
                     periastron,
                     stellar_radius)
    obs.setup_visit(start_JD, num_orbits, exp_start_times)
    obs.setup_reductions(add_dark, add_flat, add_gain_variations,
                         add_non_linear,
                         add_initial_bias)
    obs.setup_observation(x_ref, y_ref, spatial_scan, scan_speed)
    obs.setup_simulator(sample_rate, clip_values_det_limits, threads)
    obs.setup_trends(ssv_gen, x_shifts, x_jitter, y_shifts, y_jitter)
    obs.setup_noise_sources(sky_background, cosmic_rate, add_read_noise, add_stellar_noise)
    obs.setup_gaussian_noise(noise_mean, noise_std)

    visit_trend_coeffs = cfg['trends']['visit_trend_coeffs']

    if visit_trend_coeffs:
        obs.setup_visit_trend(visit_trend_coeffs)

    obs.show_lightcurve()
    plt.savefig(os.path.join(outdir, 'visit_plan.png'))
    plt.close()

    obs.run_observation()
# V band
# Angstroms, Angstroms
V_start, V_end = 5070, 5950

# R band
# Angstroms, Angstroms
R_start, R_end = 5890, 7270


# temperature of Vega
temperature = 9602 * u.K #Kelvin

# radius of Vega
radius = 2.818 # solar radii

# distance of Vega from the earth
distance_from_earth = 25.05 # light years

flux_Vega = lambda x: (
   float(blackbody_lambda(x,temperature).value) *
   float( (float(SR*radius)/float(LY*distance_from_earth))**2 )
)

print 'Flux in U band of Vega: ' + \
     str( integrate.quad(flux_Vega,U_start,U_end)[0] )
print 'Flux in B band of Vega: ' + \
     str( integrate.quad(flux_Vega,B_start,B_end)[0] )
print 'Flux in V band of Vega: ' + \
     str( integrate.quad(flux_Vega,V_start,V_end)[0] )
print 'Flux in R band of Vega: ' + \
     str( integrate.quad(flux_Vega,R_start,R_end)[0] )
def plot_expected(orders, prim_spt, Tsec, instrument, vsini=None, rv=0.0, twoaxes=False):
    """
    Plot the data orders, with a model spectrum added at appropriate flux ratio

    Parameters
    ==========
    - orders:        A list of kglib.utils.Datastructures.xypoint instances
                     The observed spectra, split into echelle orders

    - prim_spt:      string
                     The primary star spectral type

    - Tsec:          float
                     The secondary temperature

    - instrument:    string
                     The name of the instrument the observation came from

    - vsini:         float
                     The vsini of the companion, in km/s

    - rv:            float
                     The rv shift of the companion
    """

    sns.set_context("paper", font_scale=2.0)
    sns.set_style("white")
    sns.set_style("ticks")

    # First, get the model
    dir_prefix = "/media/ExtraSpace"
    if not os.path.exists(dir_prefix):
        dir_prefix = "/Volumes/DATADRIVE"
    inst2hdf5 = {
        "TS23": "{}/PhoenixGrid/TS23_Grid.hdf5".format(dir_prefix),
        "HRS": "{}/PhoenixGrid/HRS_Grid.hdf5".format(dir_prefix),
        "CHIRON": "{}/PhoenixGrid/CHIRON_Grid.hdf5".format(dir_prefix),
        "IGRINS": "{}/PhoenixGrid/IGRINS_Grid.hdf5".format(dir_prefix),
    }
    hdf5_int = StellarModel.HDF5Interface(inst2hdf5[instrument])
    wl = hdf5_int.wl
    pars = {"temp": Tsec, "logg": 4.5, "Z": 0.0, "alpha": 0.0}
    fl = hdf5_int.load_flux(pars)

    # Broaden, if requested
    if vsini is not None:
        m = DataStructures.xypoint(x=wl, y=fl)
        m = Broaden.RotBroad(m, vsini * units.km.to(units.cm))
        wl, fl = m.x, m.y

    # get model continuum
    c = FittingUtilities.Continuum(wl, fl, fitorder=5, lowreject=2, highreject=10)

    # Interpolate the model
    x = wl * units.angstrom
    plt.plot(wl, fl)
    plt.plot(wl, c)
    plt.show()
    modelfcn = interp(x.to(units.nm), fl / c)

    # Get the wavelength-specific flux ratio between the primary and secondary star
    MS = SpectralTypeRelations.MainSequence()
    Tprim = MS.Interpolate("temperature", prim_spt)
    Rprim = MS.Interpolate("radius", prim_spt)
    sec_spt = MS.GetSpectralType("temperature", Tsec, prec=1e-3)
    Rsec = MS.Interpolate("radius", sec_spt)
    flux_ratio = blackbody_lambda(x, Tprim) / blackbody_lambda(x, Tsec) * (Rprim / Rsec) ** 2
    fluxratio_fcn = interp(x.to(units.nm), 1.0 / flux_ratio)

    # Loop over the orders:
    if twoaxes:
        fig, axes = plt.subplots(2, 1, sharex=True)
        top, bottom = axes
        for order in orders:
            order.cont = FittingUtilities.Continuum(order.x, order.y, fitorder=3, lowreject=2, highreject=5)
            top.plot(order.x, order.y, "k-", alpha=0.4)
            top.plot(order.x, order.cont, "r--")

            total = order.copy()

            xarr = total.x * (1 + rv / constants.c.to(units.km / units.s).value)
            model = (modelfcn(xarr) - 1.0) * fluxratio_fcn(xarr)
            total.y += total.cont * model
            top.plot(total.x, total.y, "g-", alpha=0.4)

            bottom.plot(total.x, total.y - order.y, "k-", alpha=0.4)

        return fig, [top, bottom], orders

    fig, ax = plt.subplots(1, 1)
    for order in orders:
        order.cont = FittingUtilities.Continuum(order.x, order.y, fitorder=3, lowreject=2, highreject=5)
        ax.plot(order.x, order.y, "k-", alpha=0.4)

        total = order.copy()

        xarr = total.x * (1 + rv / constants.c.to(units.km / units.s).value)
        model = (modelfcn(xarr) - 1.0) * fluxratio_fcn(xarr)
        total.y += total.cont * model
        ax.plot(total.x, total.y, "g-", alpha=0.4)

    # Label
    ax.set_xlabel("Wavelength (nm)")
    ax.set_ylabel("Flux (arbitrary units)")
    xlim = ax.get_xlim()
    ylim = ax.get_ylim()
    ax.plot([9e9], [9e9], "k-", label="Actual data")
    ax.plot([9e9], [9e9], "g-", label="Expected data")
    ax.set_xlim(xlim)
    ax.set_ylim(ylim)
    leg = ax.legend(loc="best", fancybox=True)

    return fig, ax, orders
示例#27
0
 def generate(self, wl):
     # WL in AA, temp in K
     return self.scale * blackbody_lambda(wl, self.temp)
示例#28
0
h = (wv2 - wv1) / 1000 
for i in range(1, 1000, 2):
    total += 4 * curve(wv1 + i * h , temp)
for i in range(2, 999, 2):
    total += 2 * curve(wv1 + i * h , temp)
total = total * h * u.m / 3

# rectangular integration
total2 = 0
for i in np.linspace(wv1, wv2, 1000):
    total2 += curve(i, temp)*h*u.m

# this is to test the validity of the curve function
first = 2e-8
one = curve(1e-8, temp)
two = curve(2e-8, temp)
while one < two:
    first += 1e-8
    one, two = two, curve(first, temp)
 

print("Simpson's Integration: ", total)
print("Rectagle Integration: ", total2)
print("Stefan-Boltzmann law: ", cst.sigma_sb*(temp*u.K)**4)
print("Peak at ", first)
print("Peak should be at ", cst.b_wien/(temp*u.K))
print("curve value 502nm", curve (502e-9, temp)/1e12, "Trillion")
print("Exact: ", fxn.blackbody_lambda(5020, temp).to(u.W/(u.m**3*u.sr)))
print("curve value 150nm", curve (150e-6, temp)/1e12, "Trillion")
print("Exact: ", fxn.blackbody_lambda(150*u.um, temp).to(u.W/(u.m**3*u.sr)))