Esempio n. 1
0
def downsample_to_cassis_spectres(df):
    """Downsample to match the wavelength grid of CASSIS."""
    def spline(x, y, new_x):
        spline_model = splrep(x=x, y=y)
        new_y = splev(x=new_x, tck=spline_model)
        return new_y

    wave = df['wavelength']
    flux = df['flux']
    spec_error = df['spec_error']
    norm_error = df['norm_error']

    wave = wave.values
    flux = flux.values
    spec_error = spec_error.values
    norm_error = norm_error.values

    new_wave = cassis_wave
    new_flux, new_spec_error = spectres(new_spec_wavs=new_wave,
                                        old_spec_wavs=wave,
                                        spec_fluxes=flux,
                                        spec_errs=spec_error)
    _, new_norm_error = spectres(new_spec_wavs=new_wave,
                                 old_spec_wavs=wave,
                                 spec_fluxes=flux,
                                 spec_errs=norm_error)

    col_stack = np.column_stack(
        [new_wave, new_flux, new_spec_error, new_norm_error])
    col_names = ['wavelength', 'flux', 'spec_error', 'norm_error']

    df2 = pd.DataFrame(col_stack, columns=col_names)

    return df2
Esempio n. 2
0
    def _prepare_CvD18_respfun(self):
        """ Prepare response functions from CvD models. """
        # Read one spectrum to get name of columns
        with open(self.rf_infiles[0]) as f:
            header = f.readline().replace("#", "")
        fields = [_.strip() for _ in header.split(",")]
        fields[fields.index("C+")] = "C+0.15"
        fields[fields.index("C-")] = "C-0.15"
        fields[fields.index("T+")] = "T+50"
        fields[fields.index("T-")] = "T-50"
        fields = ["{}0.3".format(_) if _.endswith("+") else _ for _ in fields ]
        fields = ["{}0.3".format(_) if _.endswith("-") else _ for _ in fields]
        elements = set([_.split("+")[0].split("-")[0] for _ in fields if
                        any(c in _ for c in ["+", "-"])])
        signal = ["+", "-"]
        velscale = int(self.sigma / 4)
        kernel_sigma = np.sqrt(self.sigma**2 - 100**2) / velscale
        rfsout = dict([(element, []) for element in elements])
        parsout = dict([(element, []) for element in elements])
        desc = "Preparing response functions"
        for i, fname in enumerate(tqdm(self.rf_infiles, desc=desc)):
            spec = os.path.split(fname)[1]
            T = float(spec.split("_")[2][1:])
            Z = float(spec.split("_")[3].split(".abun")[0][1:].replace(
                "p", "+").replace("m", "-"))
            data = np.loadtxt(fname)
            w = data[:, 0]
            data = data.T
            if self.sigma > 100:
                wvel = disp2vel(w, velscale)
                rebin = spectres(wvel, w, data)
                broad = gaussian_filter1d(rebin, kernel_sigma,
                                          mode="constant", cval=0.0)
                data = spectres(self.wave, wvel, broad).T

            else:
                data = spectres(self.wave, w, data).T
            fsun = data[:, 1]
            for element in elements:
                # Adding solar response
                p = Table([[Z], [T], [0.]], names=["Z", "Age", element])
                rfsout[element].append(np.ones(len(self.wave)))
                parsout[element].append(p)
                # Adding non-solar responses
                for sign in signal:
                    name = "{}{}".format(element, sign)
                    cols = [(i,f) for i, f in enumerate(fields) if
                            f.startswith(name)]
                    for i, col in cols:
                        val = float("{}1".format(sign)) * \
                              float(col.split(sign)[1])
                        t = Table([[Z], [T], [val]],
                                  names=["Z", "Age", element])
                        parsout[element].append(t)
                        rf = data[:, i] / fsun
                        rfsout[element].append(rf)
        rfs = dict([(e, np.array(rfsout[e])) for e in elements])
        rfpars = dict([(e, vstack(parsout[e])) for e in elements])
        return rfs, rfpars
Esempio n. 3
0
def prepare_spectrum(spec_file, outfile, overwrite=False):
    """ Preparing the spectrum of a single galaxy for the fitting. """
    if os.path.exists(outfile) and not overwrite:
        return
    wave, flux, fluxerr, mask, res_kms = np.loadtxt(spec_file, unpack=True)
    mask = mask.astype(np.bool).astype(np.int)
    idx = np.where(mask > 0)[0]
    f_interp = interp1d(wave[idx], flux[idx], fill_value="extrapolate")
    flux = f_interp(wave)
    ferr_interp = interp1d(wave[idx], fluxerr[idx], fill_value="extrapolate")
    fluxerr = ferr_interp(wave)
    # Calculating resolution in FWHM
    c = const.c.to("km/s").value
    fwhms = res_kms / c * wave * 2.355
    # Homogeneize the resolution
    target_res = np.array([200, 100])  # Rounding up the ideal resolution
    velscale = (target_res / 3).astype(np.int)
    # Splitting the data to work with different resolutions
    wave_ranges = [[4200, 6680], [8200, 8900]]
    names = ["wave", "flux", "fluxerr", "mask"]
    hdulist = [fits.PrimaryHDU()]
    for i, (w1, w2) in enumerate(wave_ranges):
        idx = np.where((wave >= w1) & (wave < w2))[0]
        w = wave[idx]
        f = flux[idx]
        ferr = fluxerr[idx]
        m = mask[idx]
        # res = res_kms[idx] # This was used to check a good target_res
        fwhm = fwhms[idx]
        target_fwhm = target_res[i] / c * w * 2.355
        fbroad, fbroaderr = pb.broad2res(w, f, fwhm, target_fwhm, fluxerr=ferr)
        # Resampling data
        owave = pb.disp2vel([w[0], w[-1]], velscale[i])
        oflux, ofluxerr = spectres(owave,
                                   w,
                                   fbroad,
                                   spec_errs=fbroaderr,
                                   fill=0,
                                   verbose=False)
        # Filtering the high variance of the output error for the error.
        ofluxerr = gaussian_filter1d(ofluxerr, 3)
        omask = spectres(owave, w, m, fill=0,
                         verbose=False).astype(np.int).astype(np.bool)
        ########################################################################
        # Include mask for borders of spectrum
        wmin = owave[omask].min()
        wmax = owave[omask].max()
        omask[owave < wmin + 5] = False
        omask[owave > wmax - 5] = False
        ########################################################################
        obsmask = -1 * (omask.astype(np.int) - 1)
        table = Table([owave, oflux, ofluxerr, obsmask], names=names)
        hdu = fits.BinTableHDU(table)
        hdulist.append(hdu)
    hdulist = fits.HDUList(hdulist)
    hdulist.writeto(outfile, overwrite=True)
    return
Esempio n. 4
0
def cal_partial_f_rv(teff,
                     logg,
                     m_h,
                     wav_start,
                     wav_end,
                     vbroad_in,
                     vmicro_in,
                     rv_in,
                     wav_in,
                     abun_change=None,
                     diff_rv=0.1,
                     line_list='vald_winered'):

    # convert rv to wavelength
    del_wav = rv_in / 3e5 * private.np.mean(wav_in)

    s = synth.synth(teff,
                    logg,
                    m_h,
                    wav_start - 0.75 - del_wav,
                    wav_end + 0.75 + del_wav,
                    20000,
                    line_list=line_list,
                    weedout=True)
    s.prepare_file(vmicro=vmicro_in,
                   smooth_para=['g', vbroad_in, 0, 0, 0, 0],
                   abun_change=abun_change)
    s.run_moog()
    s.read_spectra()
    s.wav = s.wav * (1 + (rv_in + diff_rv) / 3e5)
    flux_p = spectres.spectres(wav_in, s.wav, s.flux)

    s_ = synth.synth(teff,
                     logg,
                     m_h,
                     wav_start - 0.75 - del_wav,
                     wav_end + 0.75 + del_wav,
                     20000,
                     line_list=line_list,
                     weedout=True)
    s_.prepare_file(vmicro=vmicro_in,
                    smooth_para=['g', vbroad_in, 0, 0, 0, 0],
                    abun_change=abun_change)
    s_.run_moog()
    s_.read_spectra()
    s_.wav = s_.wav * (1 + (rv_in - diff_rv) / 3e5)
    flux_m = spectres.spectres(wav_in, s_.wav, s_.flux)

    partial_flux = (flux_p - flux_m) / (2 * diff_rv)

    return partial_flux
Esempio n. 5
0
def read_pycoco_template_outdir(pycoco_out_dir, canonical_lamb):
    time, fluxlst, fluxerrlst = [], [], []

    def path2t(pth):
        return float(basename(pth).split('_')[0])

    for path in sorted(glob(join(pycoco_out_dir, '*.txt')), key=path2t):
        t = path2t(path)
        lamb, flux, fluxerr = zip(np.loadtxt(path, comments='#', delimiter='\t', unpack=True))
        if isinstance(fluxerr, tuple):
            fluxerr = fluxerr[0]
        if isinstance(lamb, tuple):
            lamb = lamb[0]
        if isinstance(flux, tuple):
            flux = flux[0]

        lamb, flux, fluxerr = zip(*sorted(zip(lamb, flux, fluxerr), key=lambda tup: tup[0]))
        lamb, flux, fluxerr = np.array(lamb), np.array(flux), np.array(fluxerr)

        flux, fluxerr = spectres(canonical_lamb, lamb, flux, fluxerr, np.nan, False)

        time.append(t)
        fluxlst.append(flux)
        fluxerrlst.append(fluxerr)

    flux_matrix = np.row_stack(fluxlst)
    fluxerr_matrix = np.row_stack(fluxerrlst)
    time = np.array(time)
    return time, canonical_lamb, flux_matrix, fluxerr_matrix
Esempio n. 6
0
def calibrated_radiance(spectra, spectra_info, dark_spectra, cal_per_wl,
                        sensor_area):

    # we have no saturated spectra due to adaptive measurement

    # convert integration time from us to s
    spectra_info['integration_time'] = (spectra_info['integration_time'] /
                                        (1000 * 1000))

    cal_per_wl.index = spectra.columns
    dark_spectra.columns = spectra.columns
    uj_per_pixel = (spectra - dark_spectra) * cal_per_wl.T.values[0]
    wls = uj_per_pixel.columns.to_numpy(dtype='float')
    nm_per_pixel = np.hstack([(wls[1] - wls[0]), (wls[2:] - wls[:-2]) / 2,
                              (wls[-1] - wls[-2])])
    uj_per_nm = uj_per_pixel / nm_per_pixel
    uj_per_cm2_per_nm = uj_per_nm / sensor_area.loc[0, 0]
    uw_per_cm2_per_nm = uj_per_cm2_per_nm.div(spectra_info['integration_time'],
                                              axis='rows')

    # Resample
    wls = np.arange(380, 781)
    uw_per_cm2_per_nm = spectres.spectres(
        wls, spectra.columns.to_numpy(dtype='float'),
        uw_per_cm2_per_nm.to_numpy())
    uw_per_cm2_per_nm = np.where(uw_per_cm2_per_nm < 0, 0, uw_per_cm2_per_nm)
    w_per_m2_per_nm = pd.DataFrame(uw_per_cm2_per_nm * 0.01)
    w_per_m2_per_nm.columns = pd.Int64Index(wls)
    return w_per_m2_per_nm
Esempio n. 7
0
    def zp_shift_correction( self, shift ):
        '''
        '''

        for i, ( img, err, hdr ) in enumerate( zip( self.imgs, self.errs, self.hdrs ) ):

            print( F'[Zeropoint correction] Correction for {i+1} of { self.num } images' )
            if self.slit_along == 'row': data = img.T; erro = err.T
            if self.slit_along == 'col': data = img*1; erro = err*1

            # Resample
            # --------
            new_data = np.zeros( data.shape )
            new_erro = np.zeros( erro.shape )
            x = np.arange( data.shape[1] )
            for j in range( data.shape[0] ):
                print( F'\r[Zeropoint correction] Resampling ({j+1}/{data.shape[0]})-th row', end = '', flush = True )
                if shift[j] <= 0: fill = data[j][ 0]
                if shift[j] >  0: fill = data[j][-1]
                new_data[j], new_erro[j] = spectres( x, x - shift[j], data[j], erro[j], fill, verbose = False )
            print( '' )

            if self.slit_along == 'row': new_data = new_data.T; new_erro = new_erro.T
            
            self.imgs[i], self.errs[i] = new_data, new_erro
            self.hdrs[i]['COMMENT'] = 'Zeropoint shifted'
        
        return deepcopy( self.imgs ), deepcopy( self.errs ), deepcopy( self.hdrs )
Esempio n. 8
0
    def resample_spectrum(self, wavel_resample: np.ndarray) -> None:
        """
        Method for resampling the spectrum with ``spectres`` to a new
        wavelength grid.

        Parameters
        ----------
        wavel_resample : np.ndarray
            Wavelength points (um) to which the spectrum will be
            resampled.

        Returns
        -------
        NoneType
            None
        """

        self.flux = spectres.spectres(
            wavel_resample,
            self.wavelength,
            self.flux,
            spec_errs=None,
            fill=np.nan,
            verbose=True,
        )

        self.wavelength = wavel_resample
Esempio n. 9
0
def read_pycoco_template_sed(sedpath, canonical_lamb):
    times, lambs, fluxes = zip(np.loadtxt(sedpath, comments='#', delimiter=' ', unpack=True))

    if isinstance(times, tuple):
        times = times[0]
    if isinstance(lambs, tuple):
        lambs = lambs[0]
    if isinstance(fluxes, tuple):
        fluxes = fluxes[0]

    dct = {}
    for t, lm, fl in zip(times, lambs, fluxes):
        if t in dct:
            dct[t].append((lm, fl))
        else:
            dct[t] = [(lm, fl)]

    fluxlst = []
    time = np.array(sorted(dct.keys()))
    for t in time:
        lsttup = sorted(dct[t], key=lambda tup: tup[0])
        lamb, flux = zip(*lsttup)

        flux = spectres(canonical_lamb, np.array(lamb), np.array(flux), None, np.nan, False)

        fluxlst.append(flux)

    flux_matrix = np.row_stack(fluxlst)
    return time, canonical_lamb, flux_matrix, flux_matrix * np.nan
Esempio n. 10
0
def resample(wav, wav_band, R, flux):
    wav_min, wav_max = wav_band
    wav_central = (wav_min + wav_max) / 2
    wav_delta = wav_central / R
    wav_resampled = np.arange(wav_min, wav_max, wav_delta)
    flux_resampled = spectres(wav_resampled, wav, flux)
    return wav_resampled, flux_resampled
Esempio n. 11
0
    def lnlike(self, theta, model, **kwargs):
        '''Compute the likelihood of the photometry given the model. 

        The likelihood is computed as:
        .. math:: \frac{1}{2} N \ln\left(2\pi\right) - \frac{1}{2}\ln\left(\mathrm{det}C\right) - \frac{1}{2} \left(\alpha F_\mathrm{obs} - F_\mathrm{mod})^T C^{-1} \left(\alpha F_\mathrm{obs} - F_\mathrm{mod}\right)
        where N is the number of points in the spectrum, C is the covariance matrix, and F_obs and F_mod are the observed and predicted photmetry, respectively.

        Parameters
        ----------
        theta: empty, included for compatibility reasons
        model: an instance of Model or a subclass

        Returns
        -------
        probFlux: float
            The natural logarithm of the likelihood of the data given the model
        '''
        ''' First take the model values (passed in) and compute synthetic Spectrum '''

        try:
            scaleFac = theta[0]
        except IndexError:  #Only possible if theta is scalar or can't be indexed
            scaleFac = theta

        #print(self)
        #wavelength = self.wavelength
        #modSpec = model.modelFlux #
        modSpec = spectres(
            self.wavelength[self.mask], model.wavelength, model.modelFlux
        )  #For some reason spectres isn't cooperating :/ actually, looks like it was just a stupid mistake
        ''' then update the covariance matrix for the parameters passed in '''
        #skip this for now
        #self.covMat =
        self.cov(theta[1:])
        #import matplotlib.pyplot as plt
        #plt.imshow(self.covMat)
        #plt.show()
        ''' then compute the likelihood for each photometric point in a vectorised statement '''
        a = scaleFac * self.value[self.mask] - modSpec
        #if not np.all(np.isfinite(a)):
        #    print(a)
        #    print(modSpec)

        #make this a try: except OverflowError to protect against large spectra (which will be most astronomical ones...)?
        b = 0  #np.log10(1./((np.float128(2.)*np.pi)**(len(self.value)) * np.linalg.det(self.covMat))
        #)

        b = -0.5 * len(self.value[self.mask]) * np.log(2 * np.pi) - (
            0.5 * self.logDetCovMat
        )  #less computationally intensive version of above
        #pass
        probFlux = b + (
            -0.5 *
            (np.matmul(a.T, np.matmul(inv(self.covMat[self.cov_mask]), a))))
        #print(((np.float128(2.)*np.pi)**(len(self.value))), np.linalg.det(self.covMat))
        #print(((np.float128(2.)*np.pi)**(len(self.value)) * np.linalg.det(self.covMat)))
        #print(b, probFlux)
        #exit()
        return probFlux
Esempio n. 12
0
 def rebinData(self, new_wlen_bins):
     for name in self.observation_files.keys():
         print(name + " before " + str(len(self.data_wlen[name])))
         self.data_flux_nu[name], self.data_flux_nu_error[name] = \
             spectres(new_wlen_bins,self.data_wlen[name],\
                      self.data_flux_nu[name],self.data_flux_nu_error[name])
         self.data_wlen[name] = new_wlen_bins
         print(name + "after" + str(len(self.data_wlen[name])))
Esempio n. 13
0
    def fit(self):

        # Set up zvals - grid points to be distributed among cores
        zvals = np.linspace(0., self.max_redshift, self.n_grid)

        core_zvals = mpi_split_array(zvals)

        core_all_chisq = np.zeros(
            (self.spec_cube.shape[0], core_zvals.shape[0]))

        for i in range(core_zvals.shape[0]):

            # Redshift at which to fit
            z = core_zvals[i]
            print(z)
            # Resample (redshifted) model grid to desired wavelengths
            spec_grid_res = spectres(self.spec_wavs, self.pc_wavs * (1. + z),
                                     self.spec_grid)

            # Do PCA on model grid at the chosen redshift
            pca = PCA(n_components=self.n_components)
            pca.fit(spec_grid_res)

            # Do principal component decomposition of observed spectra
            coefs = np.dot(pca.components_, self.spec_cube.T)

            # Calculate chi-squared values for best PCA decomposition
            # Could be made into an array operation, not limiting step
            for j in range(self.spec_cube.shape[0]):
                best_model = np.sum(coefs[:, j] * pca.components_.T, axis=1)
                resid = (best_model -
                         self.spec_cube[j, :]) / self.err_cube[j, :]
                chisq = np.sum(resid**2)
                dof = float(self.spec_wavs.shape[0] - self.n_components)
                core_all_chisq[j, i] = chisq / dof

                if self.make_plots and ((z - self.z_input[j])**2 < 0.01**2):
                    best_model = np.sum(coefs[:, j] * pca.components_.T,
                                        axis=1)
                    spec = np.c_[self.spec_wavs, best_model,
                                 self.spec_cube[j, :], self.err_cube[j, :]]

                    np.savetxt("best_model/" + self.IDs[j] + ".txt", spec)

        all_chisq = mpi_combine_array(core_all_chisq.T, self.n_grid).T

        if rank == 0:
            dd.io.save("all_chisq_" + self.run + ".h5", all_chisq)

            best_z = np.argmin(all_chisq, axis=1) * self.redshift_interval
            cat = pd.DataFrame(np.c_[self.IDs, best_z],
                               columns=["#ID", "z_best"])

            cat.to_csv("best_z_" + self.run + ".txt", sep="\t", index=False)
Esempio n. 14
0
def zp_shift_corr(imglist, hdrlist, imagetype, slit_along, shift, work_path):
    '''
    '''

    for i, (img, hdr) in enumerate(zip(imglist, hdrlist)):

        print(
            F'[Zeropoint Shift] Correction for {i+1} of {len(imglist)} {imagetype} images'
        )
        if slit_along == 'row':
            data = img[0].T
            erro = img[1].T
        else:
            data = img[0]
            erro = img[1]

        # Resample
        # --------
        new_data = np.zeros(data.shape)
        new_erro = np.zeros(erro.shape)
        x = np.arange(data.shape[1])
        for j in range(data.shape[0]):
            print(
                F'\r[Zeropoint Shift] Resampling ({j+1}/{data.shape[0]})-th row',
                end='',
                flush=True)
            if shift[j] <= 0: fill = data[j][0]
            if shift[j] > 0: fill = data[j][-1]
            new_data[j], new_erro[j] = spectres(x,
                                                x - shift[j],
                                                data[j],
                                                erro[j],
                                                fill,
                                                verbose=False)
        print('')

        if slit_along == 'row':
            new_data = new_data.T
            new_erro = new_erro.T
        new_data = (new_data.astype(np.float32), new_erro.astype(np.float32))

        # Write to file
        # -------------
        print(
            F'[Zeropoint Shift] Write to `{os.path.join( work_path, F"corr/{imagetype}.{str(i+1).zfill(len(str(len(imglist))))}.fits" )}`\n'
        )
        hdr['COMMENT'] = 'Zeropoint shifted'
        fits.writeto(os.path.join(
            work_path,
            F'corr/{imagetype}.{str(i+1).zfill(len(str(len(imglist))))}.fits'),
                     data=new_data,
                     header=hdr,
                     overwrite=True)
Esempio n. 15
0
	def convertSpectrum(self,redshift):
		"""
		Shifts the spectrum in the rest-frame and creates a spectrum with the sampling desired.
		Uses the spectres package from A.C. Carnall
		:param redshift: redshift of the spectrum
		return the new flux and erro flux array
		"""	
		nwave=self.wavelength/(1+redshift)
		
		#inL=(self.wave>nwave.min())&(self.wave<nwave.max())
		#outL=(inL==False)

		#points=interp1d(nwave,nwave * self.fluxl)
		#pts=points(self.wave[inL]) / self.wave[inL]
		#res=n.ones_like(self.wave)*self.dV
		#res[inL]=pts

		#pointsErr=interp1d(nwave,nwave * self.fluxlErr)
		#ptsErr=pointsErr(self.wave[inL]) / self.wave[inL]
		#resErr=n.ones_like(self.wave)*self.dV
		#resErr[inL]=ptsErr

		#return res, resErr
		wavelength_spectrum = n.hstack(( 
			self.wave[0]-10, 
			self.wave[0]-5,
			n.min(nwave)-10,
			n.min(nwave)-5,
			nwave,
			n.max(nwave)+5,
			n.max(nwave)+10,
			self.wave[-1]+5, 
			self.wave[-1]+10
			))
		#
		flux_spectrum = n.hstack(( 
			self.dV,self.dV,self.dV,self.fluxl[0],
			self.fluxl,
			self.fluxl[-1],self.dV,self.dV,self.dV
			))
		#
		flux_error_spectrum = n.hstack(( 
			self.dV,self.dV,self.dV,self.dV,
			self.fluxlErr,
			self.dV,self.dV,self.dV,self.dV
			))	
		#
		final_spectrum, final_spectrum_err = sp.spectres(
			self.wave, 
			wavelength_spectrum, 
			flux_spectrum, 
			flux_error_spectrum )
		return final_spectrum, final_spectrum_err
Esempio n. 16
0
def mtrdr_color_matching(wave_list):
    """Adjusts the CIE color matching function to span the given wavelength range."""
    #Import CIE color matching function
    #Index 0 - wavelengths, Index 1 - red matching function
    #Index 2 - green matching function, Index 3 - blue matching function
    cie_matrix = np.genfromtxt("matching_functions/cie-cmf.txt")

    #Import tab-delimited file of wavelength axis
    mtrdr_axis = modify_mtrdr_axis()

    #Find mtrdr axis indices with closest values to user-specified values.
    short = find_band(mtrdr_axis, wave_list[0])
    long = find_band(mtrdr_axis, wave_list[1])

    ##Now use normalization to rescale wavelength axis of CIE color matching functions
    #to user-specified wavelength range...
    cie_matrix[:, 0] = (mtrdr_axis[long] - mtrdr_axis[short]) / (
        cie_matrix[-1, 0] - cie_matrix[0, 0]) * (
            cie_matrix[:, 0] - cie_matrix[-1, 0]) + mtrdr_axis[long]

    #..then resample CIE function values using MTRDR axis values
    red = spec.spectres(mtrdr_axis[short:long],
                        cie_matrix[:, 0],
                        cie_matrix[:, 1],
                        fill=0,
                        verbose=False)
    green = spec.spectres(mtrdr_axis[short:long],
                          cie_matrix[:, 0],
                          cie_matrix[:, 2],
                          fill=0,
                          verbose=False)
    blue = spec.spectres(mtrdr_axis[short:long],
                         cie_matrix[:, 0],
                         cie_matrix[:, 3],
                         fill=0,
                         verbose=False)

    #Concatenate the results
    new_mat = np.stack([red, green, blue], axis=-1)
    return (new_mat)
Esempio n. 17
0
    def resample_spectrum(self,
                          wavel_points: np.ndarray,
                          model_param: Optional[Dict[str, float]] = None,
                          apply_mask: bool = False) -> box.SpectrumBox:
        """
        Function for resampling of a spectrum and uncertainties onto a new wavelength grid.

        Parameters
        ----------
        wavel_points : np.ndarray
            Wavelength points (um).
        model_param : dict, None
            Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
        apply_mask : bool
            Exclude negative values and NaN values.

        Returns
        -------
        species.core.box.SpectrumBox
            Box with the resampled spectrum.
        """

        calibbox = self.get_spectrum()

        if apply_mask:
            indices = np.where(calibbox.flux > 0.)[0]

            calibbox.wavelength = calibbox.wavelength[indices]
            calibbox.flux = calibbox.flux[indices]
            calibbox.error = calibbox.error[indices]

        flux_new, error_new = spectres.spectres(wavel_points,
                                                calibbox.wavelength,
                                                calibbox.flux,
                                                spec_errs=calibbox.error,
                                                fill=0.,
                                                verbose=False)

        if model_param is not None:
            flux_new = model_param['scaling'] * flux_new
            error_new = model_param['scaling'] * error_new

        return box.create_box(boxtype='spectrum',
                              spectrum='calibration',
                              wavelength=wavel_points,
                              flux=flux_new,
                              error=error_new,
                              name=self.tag,
                              simbad=None,
                              sptype=None,
                              distance=None)
Esempio n. 18
0
    def _prepare_CvD18_ssps(self):
        """ Process SSP models. """
        ssp_files = glob.glob(os.path.join(self.libpath, "VCJ*.s100"))
        if len(ssp_files) == 0:
            raise ValueError(f"Stellar populations not found in libpath: "
                             f"{self.libpath}")
        nimf = 16
        imfs = 0.5 + np.arange(nimf) / 5
        x2s, x1s=  np.stack(np.meshgrid(imfs, imfs)).reshape(2, -1)
        velscale = int(self.sigma / 4)
        kernel_sigma = np.sqrt(self.sigma ** 2 - 100 ** 2) / velscale
        ssps, params = [], []
        for fname in tqdm(ssp_files, desc="Processing SSP files"):
            spec = os.path.split(fname)[1]
            T = float(spec.split("_")[3][1:])
            Z = float(spec.split("_")[4][1:-8].replace("p", "+").replace(
                        "m", "-"))
            for i, (x1, x2) in enumerate(zip(x1s, x2s)):
                params.append(Table([[Z], [T], [x1], [x2]],
                                    names=["Z", "Age", "x1", "x2"]))
            data = np.loadtxt(fname)
            w = data[:,0]
            if self.sigma > 100:
                wvel = disp2vel(w, velscale)
            ssp = data[:, 1:].T
            if self.sigma <= 100:
                newssp = spectres(self.wave, w, ssp)
            else:
                ssp_rebin = spectres(wvel, w, ssp)
                ssp_broad = gaussian_filter1d(ssp_rebin, kernel_sigma,
                                              mode="constant", cval=0.0)
                newssp = spectres(self.wave, wvel, ssp_broad)

            ssps.append(newssp)
        ssps = np.vstack(ssps)
        params = vstack(params)
        return ssps, params
Esempio n. 19
0
def rebin(wlen, flux, wlen_data, flux_err=None, method='linear'):
    #wlen larger than wlen_data

    #if method == 'linear':
    #extends wlen linearly outside of wlen_data using the spacing on each side
    if method == 'linear':
        stepsize_left = abs(wlen_data[1] - wlen_data[0])

        N_left = int((wlen_data[0] - wlen[0]) / stepsize_left) - 1
        wlen_left = np.linspace(wlen_data[0] - N_left * stepsize_left,
                                wlen_data[0],
                                N_left,
                                endpoint=False)

        stepsize_right = wlen_data[-1] - wlen_data[-2]

        N_right = int((wlen[-1] - wlen_data[-1]) / stepsize_right) - 1
        wlen_right = np.linspace(wlen_data[-1] + stepsize_right,
                                 wlen_data[-1] +
                                 (N_right + 1) * stepsize_right,
                                 N_right,
                                 endpoint=False)

        wlen_temp = np.concatenate((wlen_left, wlen_data, wlen_right))
    elif method == 'datalike':
        wlen_temp = wlen_data
    if flux_err is not None:
        assert (np.shape(flux_err) == np.shape(flux))
        flux_temp, flux_new_err = spectres(wlen_temp,
                                           wlen,
                                           flux,
                                           spec_errs=flux_err)
        return wlen_temp, flux_temp, flux_new_err
    else:
        flux_temp = spectres(wlen_temp, wlen, flux)
        return wlen_temp, flux_temp
Esempio n. 20
0
    def rebin_spectra(self, spec, wl, resample_wl):
        """
        Resample spectrum on a new wavelength grid using the `spectres` package

        Args:
            spec: (array[N,l]) spectra
            wl: (array[l])
            resample_wl (array[L]) l

        Returns:
            resamp_spec (array[N,L])
        """
        resamp_spec = spectres.spectres(resampling=resample_wl,
                                        spec_fluxes=spec.T,
                                        spec_wavs=wl).T

        return resamp_spec
 def __init__(self, velscale, sigma, _noread=False):
     """ Load Miles templates for simulations. """
     self.velscale = velscale
     self.sigma = sigma
     miles_path = os.path.join(context.basedir, "ppxf/miles_models")
     # Search for spectra and their properties
     fitsfiles = [_ for _ in os.listdir(miles_path) if _.endswith(".fits")]
     # Define the different values of metallicities and ages of templates
     self.metals = np.unique([
         float(
             _.split("Z")[1].split("T")[0].replace("m",
                                                   "-").replace("p", "+"))
         for _ in fitsfiles
     ])
     self.ages = np.unique([
         float(
             _.split("T")[1].split("_iP")[0].replace("m",
                                                     "-").replace("p", "+"))
         for _ in fitsfiles
     ])
     # Defining arrays
     self.ages2D, self.metals2D = np.meshgrid(self.ages, self.metals)
     self.metals1D = self.metals2D.reshape(-1)
     self.ages1D = self.ages2D.reshape(-1)
     if _noread:
         return
     templates, norms = [], []
     for metal, age in zip(self.metals1D, self.ages1D):
         template_file = os.path.join(miles_path,
                                      self.miles_filename(metal, age))
         spec = read_fits_spectrum1d(template_file)
         wave = spec.dispersion
         flux = spec.flux
         speclog, logwave, _ = util.log_rebin([wave[0], wave[-1]],
                                              flux,
                                              velscale=self.velscale)
         speclog = gaussian_filter1d(speclog, sigma / velscale)
         wave = wave[1:-2]
         flux = spectres(wave, np.exp(logwave), speclog)
         norm = np.sum(flux)
         templates.append(flux / norm)
     self.templates = np.array(templates)
     self.norms = np.array(norms)
     self.wave = wave
     return
Esempio n. 22
0
def add_drift_phoenix(input_path: str,
                      database: h5py._hl.files.File,
                      wavel_range: Optional[Tuple[float, float]] = None,
                      teff_range: Optional[Tuple[float, float]] = None,
                      spec_res: float = None) -> None:
    """
    Function for adding the DRIFT-PHOENIX atmospheric models to the database. The original spectra
    were downloaded from http://svo2.cab.inta-csic.es/theory/newov2/index.php?models=drift and have
    been resampled to a spectral resolution of R = 2000 from 0.1 to 50 um.

    Parameters
    ----------
    input_path : str
        Folder where the data is located.
    database : h5py._hl.files.File
        Database.
    wavel_range : tuple(float, float), None
        Wavelength range (um). The full wavelength range (0.1-50 um) is stored if set to ``None``.
        Only used in combination with ``spec_res``.
    teff_range : tuple(float, float), None
        Effective temperature range (K). All available temperatures are stored if set to ``None``.
    spec_res : float, None
        Spectral resolution. The data is stored with the spectral resolution of the input spectra
        (R = 2000) if set to ``None``. Only used in combination with ``wavel_range``.

    Returns
    -------
    NoneType
        None
    """

    if not os.path.exists(input_path):
        os.makedirs(input_path)

    input_file = 'drift-phoenix.tgz'
    url = 'https://people.phys.ethz.ch/~ipa/tstolker/drift-phoenix.tgz'

    data_folder = os.path.join(input_path, 'drift-phoenix/')
    data_file = os.path.join(input_path, input_file)

    if not os.path.exists(data_folder):
        os.makedirs(data_folder)

    if not os.path.isfile(data_file):
        print('Downloading DRIFT-PHOENIX model spectra (229 MB)...',
              end='',
              flush=True)
        urllib.request.urlretrieve(url, data_file)
        print(' [DONE]')

    print('Unpacking DRIFT-PHOENIX model spectra (229 MB)...',
          end='',
          flush=True)
    tar = tarfile.open(data_file)
    tar.extractall(data_folder)
    tar.close()
    print(' [DONE]')

    teff = []
    logg = []
    feh = []
    flux = []

    if wavel_range is not None and spec_res is not None:
        wavelength = read_util.create_wavelengths(wavel_range, spec_res)
    else:
        wavelength = None

    for _, _, files in os.walk(data_folder):
        for filename in files:
            if filename[:14] == 'drift-phoenix_':
                file_split = filename.split('_')

                teff_val = float(file_split[2])
                logg_val = float(file_split[4])
                feh_val = float(file_split[6])

                if teff_range is not None:
                    if teff_val < teff_range[0] or teff_val > teff_range[1]:
                        continue

                print_message = f'Adding DRIFT-PHOENIX model spectra... {filename}'
                print(f'\r{print_message:<88}', end='')

                data_wavel, data_flux = np.loadtxt(os.path.join(
                    data_folder, filename),
                                                   unpack=True)

                teff.append(teff_val)
                logg.append(logg_val)
                feh.append(feh_val)

                if wavel_range is None or spec_res is None:
                    if wavelength is None:
                        wavelength = np.copy(data_wavel)  # (um)

                    if np.all(np.diff(wavelength) < 0):
                        raise ValueError(
                            'The wavelengths are not all sorted by increasing value.'
                        )

                    flux.append(data_flux)  # (W m-2 um-1)

                else:
                    flux_resample = spectres.spectres(wavelength,
                                                      data_wavel,
                                                      data_flux,
                                                      spec_errs=None,
                                                      fill=np.nan,
                                                      verbose=False)

                    if np.isnan(np.sum(flux_resample)):
                        raise ValueError(
                            f'Resampling is only possible if the new wavelength '
                            f'range ({wavelength[0]} - {wavelength[-1]} um) falls '
                            f'sufficiently far within the wavelength range '
                            f'({data_wavel[0]} - {data_wavel[-1]} um) of the input '
                            f'spectra.')

                    flux.append(flux_resample)  # (W m-2 um-1)

    print_message = 'Adding DRIFT-PHOENIX model spectra... [DONE]'
    print(f'\r{print_message:<88}')

    data_sorted = data_util.sort_data(np.asarray(teff), np.asarray(logg),
                                      np.asarray(feh), None, None, wavelength,
                                      np.asarray(flux))

    data_util.write_data('drift-phoenix', ['teff', 'logg', 'feh'], database,
                         data_sorted)
Esempio n. 23
0
def add_btsettl(input_path: str, database: h5py._hl.files.File,
                wavel_range: Optional[Tuple[float, float]],
                teff_range: Optional[Tuple[float, float]],
                spec_res: Optional[float]) -> None:
    """
    Function for adding the BT-Settl atmospheric models (solar metallicity) to the database.
    The spectra had been downloaded from the Theoretical spectra web server
    (http://svo2.cab.inta-csic.es/svo/theory/newov2/index.php?models=bt-settl) and resampled
    to a spectral resolution of 5000 from 0.1 to 100 um.

    Parameters
    ----------
    input_path : str
        Folder where the data is located.
    database : h5py._hl.files.File
        Database.
    wavel_range : tuple(float, float), None
        Wavelength range (um). The original wavelength points are used if set to ``None``.
    teff_range : tuple(float, float), None
        Effective temperature range (K). All temperatures are selected if set to ``None``.
    spec_res : float, None
        Spectral resolution. Not used if ``wavel_range`` is set to ``None``.

    Returns
    -------
    NoneType
        None
    """

    if not os.path.exists(input_path):
        os.makedirs(input_path)

    input_file = 'bt-settl.tgz'

    data_folder = os.path.join(input_path, 'bt-settl/')
    data_file = os.path.join(input_path, input_file)

    if not os.path.exists(data_folder):
        os.makedirs(data_folder)

    url = 'https://people.phys.ethz.ch/~ipa/tstolker/bt-settl.tgz'

    if not os.path.isfile(data_file):
        print('Downloading Bt-Settl model spectra (130 MB)...',
              end='',
              flush=True)
        urllib.request.urlretrieve(url, data_file)
        print(' [DONE]')

    print('Unpacking BT-Settl model spectra (130 MB)...', end='', flush=True)
    tar = tarfile.open(data_file)
    tar.extractall(data_folder)
    tar.close()
    print(' [DONE]')

    teff = []
    logg = []
    flux = []

    if wavel_range is not None and spec_res is not None:
        wavelength = read_util.create_wavelengths(wavel_range, spec_res)
    else:
        wavelength = None

    for _, _, file_list in os.walk(data_folder):
        for filename in sorted(file_list):
            if filename[:9] == 'bt-settl_':
                file_split = filename.split('_')

                teff_val = float(file_split[2])
                logg_val = float(file_split[4])

                if teff_range is not None:
                    if teff_val < teff_range[0] or teff_val > teff_range[1]:
                        continue

                print_message = f'Adding BT-Settl model spectra... {filename}'
                print(f'\r{print_message:<69}', end='')

                data_wavel, data_flux = np.loadtxt(os.path.join(
                    data_folder, filename),
                                                   unpack=True)

                teff.append(teff_val)
                logg.append(logg_val)

                if wavel_range is None or spec_res is None:
                    if wavelength is None:
                        wavelength = np.copy(data_wavel)  # (um)

                    if np.all(np.diff(wavelength) < 0):
                        raise ValueError(
                            'The wavelengths are not all sorted by increasing value.'
                        )

                    flux.append(data_flux)  # (W m-2 um-1)

                else:
                    flux_resample = spectres.spectres(wavelength,
                                                      data_wavel,
                                                      data_flux,
                                                      spec_errs=None,
                                                      fill=np.nan,
                                                      verbose=False)

                    if np.isnan(np.sum(flux_resample)):
                        raise ValueError(
                            f'Resampling is only possible if the new wavelength '
                            f'range ({wavelength[0]} - {wavelength[-1]} um) falls '
                            f'sufficiently far within the wavelength range '
                            f'({data_wavel[0]} - {data_wavel[-1]} um) of the input '
                            f'spectra.')

                    flux.append(flux_resample)  # (W m-2 um-1)

    print_message = 'Adding BT-Settl model spectra... [DONE]'
    print(f'\r{print_message:<69}')

    data_sorted = data_util.sort_data(np.asarray(teff), np.asarray(logg), None,
                                      None, None, wavelength, np.asarray(flux))

    data_util.write_data('bt-settl', ['teff', 'logg'], database, data_sorted)
Esempio n. 24
0
def get_residuals(datatype: str,
                  spectrum: str,
                  parameters: Dict[str, float],
                  objectbox: box.ObjectBox,
                  inc_phot: Union[bool, List[str]] = True,
                  inc_spec: Union[bool, List[str]] = True,
                  **kwargs_radtrans: Optional[dict]) -> box.ResidualsBox:
    """
    Parameters
    ----------
    datatype : str
        Data type ('model' or 'calibration').
    spectrum : str
        Name of the atmospheric model or calibration spectrum.
    parameters : dict
        Parameters and values for the spectrum
    objectbox : species.core.box.ObjectBox
        Box with the photometry and/or spectra of an object. A scaling and/or error inflation of
        the spectra should be applied with :func:`~species.util.read_util.update_spectra`
        beforehand.
    inc_phot : bool, list(str)
        Include photometric data in the fit. If a boolean, either all (``True``) or none
        (``False``) of the data are selected. If a list, a subset of filter names (as stored in
        the database) can be provided.
    inc_spec : bool, list(str)
        Include spectroscopic data in the fit. If a boolean, either all (``True``) or none
        (``False``) of the data are selected. If a list, a subset of spectrum names (as stored
        in the database with :func:`~species.data.database.Database.add_object`) can be
        provided.

    Keyword arguments
    -----------------
    kwargs_radtrans : dict
        Dictionary with the keyword arguments for the ``ReadRadtrans`` object, containing
        ``line_species``, ``cloud_species``, and ``scattering``.

    Returns
    -------
    species.core.box.ResidualsBox
        Box with the residuals.
    """

    if 'filters' in kwargs_radtrans:
        warnings.warn('The \'filters\' parameter has been deprecated. Please use the \'inc_phot\' '
                      'parameter instead. The \'filters\' parameter is ignored.')

    if isinstance(inc_phot, bool) and inc_phot:
        inc_phot = objectbox.filters

    if inc_phot:
        model_phot = multi_photometry(datatype=datatype,
                                      spectrum=spectrum,
                                      filters=inc_phot,
                                      parameters=parameters)

        res_phot = {}

        for item in inc_phot:
            transmission = read_filter.ReadFilter(item)
            res_phot[item] = np.zeros(objectbox.flux[item].shape)

            if objectbox.flux[item].ndim == 1:
                res_phot[item][0] = transmission.mean_wavelength()
                res_phot[item][1] = (objectbox.flux[item][0]-model_phot.flux[item]) / \
                    objectbox.flux[item][1]

            elif objectbox.flux[item].ndim == 2:
                for j in range(objectbox.flux[item].shape[1]):
                    res_phot[item][0, j] = transmission.mean_wavelength()
                    res_phot[item][1, j] = (objectbox.flux[item][0, j]-model_phot.flux[item]) / \
                        objectbox.flux[item][1, j]

    else:
        res_phot = None

    if inc_spec:
        res_spec = {}

        readmodel = None

        for key in objectbox.spectrum:
            if isinstance(inc_spec, bool) or key in inc_spec:
                wavel_range = (0.9*objectbox.spectrum[key][0][0, 0],
                               1.1*objectbox.spectrum[key][0][-1, 0])

                wl_new = objectbox.spectrum[key][0][:, 0]
                spec_res = objectbox.spectrum[key][3]

                if spectrum == 'planck':
                    readmodel = read_planck.ReadPlanck(wavel_range=wavel_range)

                    model = readmodel.get_spectrum(model_param=parameters, spec_res=1000.)

                    flux_new = spectres.spectres(wl_new,
                                                 model.wavelength,
                                                 model.flux,
                                                 spec_errs=None,
                                                 fill=0.,
                                                 verbose=True)

                else:
                    if spectrum == 'petitradtrans':
                        # TODO change back
                        pass

                        # radtrans = read_radtrans.ReadRadtrans(line_species=kwargs_radtrans['line_species'],
                        #                                       cloud_species=kwargs_radtrans['cloud_species'],
                        #                                       scattering=kwargs_radtrans['scattering'],
                        #                                       wavel_range=wavel_range)
                        #
                        # model = radtrans.get_model(parameters, spec_res=None)
                        #
                        # # separate resampling to the new wavelength points
                        #
                        # flux_new = spectres.spectres(wl_new,
                        #                              model.wavelength,
                        #                              model.flux,
                        #                              spec_errs=None,
                        #                              fill=0.,
                        #                              verbose=True)

                    else:
                        readmodel = read_model.ReadModel(spectrum, wavel_range=wavel_range)

                        # resampling to the new wavelength points is done in teh get_model function

                        model_spec = readmodel.get_model(parameters,
                                                         spec_res=spec_res,
                                                         wavel_resample=wl_new,
                                                         smooth=True)

                        flux_new = model_spec.flux

                data_spec = objectbox.spectrum[key][0]
                res_tmp = (data_spec[:, 1]-flux_new) / data_spec[:, 2]

                res_spec[key] = np.column_stack([wl_new, res_tmp])

    else:
        res_spec = None

    print('Calculating residuals... [DONE]')

    print('Residuals (sigma):')

    if res_phot is not None:
        for item in inc_phot:
            if res_phot[item].ndim == 1:
                print(f'   - {item}: {res_phot[item][1]:.2f}')

            elif res_phot[item].ndim == 2:
                for j in range(res_phot[item].shape[1]):
                    print(f'   - {item}: {res_phot[item][1, j]:.2f}')

    if res_spec is not None:
        for key in objectbox.spectrum:
            if isinstance(inc_spec, bool) or key in inc_spec:
                print(f'   - {key}: min: {np.nanmin(res_spec[key]):.2f}, '
                      f'max: {np.nanmax(res_spec[key]):.2f}')

    return box.create_box(boxtype='residuals',
                          name=objectbox.name,
                          photometry=res_phot,
                          spectrum=res_spec)
Esempio n. 25
0
def add_exo_rem(input_path: str,
                database: h5py._hl.files.File,
                wavel_range: Optional[Tuple[float, float]] = None,
                teff_range: Optional[Tuple[float, float]] = None,
                spec_res: Optional[float] = None) -> None:
    """
    Function for adding the Exo-REM atmospheric models to the database.

    Parameters
    ----------
    input_path : str
        Folder where the data is located.
    database : h5py._hl.files.File
        Database.
    wavel_range : tuple(float, float), None
        Wavelength range (um). The original wavelength points with a spectral resolution of 5000
        are used if set to ``None``.
    teff_range : tuple(float, float), None
        Effective temperature range (K). All temperatures are selected if set to ``None``.
    spec_res : float, None
        Spectral resolution. Not used if ``wavel_range`` is set to ``None``.

    Returns
    -------
    NoneType
        None
    """

    if not os.path.exists(input_path):
        os.makedirs(input_path)

    input_file = 'exo-rem.tgz'
    url = 'https://people.phys.ethz.ch/~ipa/tstolker/exo-rem.tgz'

    data_folder = os.path.join(input_path, 'exo-rem/')
    data_file = os.path.join(data_folder, input_file)

    if not os.path.exists(data_folder):
        os.makedirs(data_folder)

    if not os.path.isfile(data_file):
        print('Downloading Exo-REM model spectra (790 MB)...',
              end='',
              flush=True)
        urllib.request.urlretrieve(url, data_file)
        print(' [DONE]')

    print('Unpacking Exo-REM model spectra (790 MB)...', end='', flush=True)
    tar = tarfile.open(data_file)
    tar.extractall(data_folder)
    tar.close()
    print(' [DONE]')

    teff = []
    logg = []
    feh = []
    co_ratio = []
    flux = []

    if wavel_range is not None and spec_res is not None:
        wavelength = read_util.create_wavelengths(wavel_range, spec_res)
    else:
        wavelength = None

    for _, _, files in os.walk(data_folder):
        for filename in files:
            if filename[:8] == 'exo-rem_':
                file_split = filename.split('_')

                teff_val = float(file_split[2])
                logg_val = float(file_split[4])
                feh_val = float(file_split[6])
                co_val = float(file_split[8])

                if logg_val == 5.:
                    continue

                if co_val in [0.8, 0.85]:
                    continue

                if teff_range is not None:
                    if teff_val < teff_range[0] or teff_val > teff_range[1]:
                        continue

                print_message = f'Adding Exo-REM model spectra... {filename}'
                print(f'\r{print_message:<84}', end='')

                data_wavel, data_flux = np.loadtxt(os.path.join(
                    data_folder, filename),
                                                   unpack=True)

                teff.append(teff_val)
                logg.append(logg_val)
                feh.append(feh_val)
                co_ratio.append(co_val)

                if wavel_range is None or spec_res is None:
                    if wavelength is None:
                        wavelength = np.copy(data_wavel)  # (um)

                    if np.all(np.diff(wavelength) < 0):
                        raise ValueError(
                            'The wavelengths are not all sorted by increasing value.'
                        )

                    flux.append(data_flux)  # (W m-2 um-1)

                else:
                    flux_resample = spectres.spectres(wavelength,
                                                      data_wavel,
                                                      data_flux,
                                                      spec_errs=None,
                                                      fill=np.nan,
                                                      verbose=False)

                    if np.isnan(np.sum(flux_resample)):
                        raise ValueError(
                            f'Resampling is only possible if the new wavelength '
                            f'range ({wavelength[0]} - {wavelength[-1]} um) falls '
                            f'sufficiently far within the wavelength range '
                            f'({data_wavel[0]} - {data_wavel[-1]} um) of the input '
                            f'spectra.')

                    flux.append(flux_resample)  # (W m-2 um-1)

    print_message = 'Adding Exo-REM model spectra... [DONE]'
    print(f'\r{print_message:<84}')

    print('Grid points with the following parameters have been excluded:')
    print('   - log(g) = 5')
    print('   - C/O = 0.8')
    print('   - C/O = 0.85')

    data_sorted = data_util.sort_data(np.asarray(teff), np.asarray(logg),
                                      np.asarray(feh), np.asarray(co_ratio),
                                      None, wavelength, np.asarray(flux))

    data_util.write_data('exo-rem', ['teff', 'logg', 'feh', 'co'], database,
                         data_sorted)
Esempio n. 26
0
    n = len(X)
    ids = np.zeros(n, dtype=ID_DTYPE)
    ids["lmjd"], ids["planid"], ids["spid"], ids[
        "fiberid"] = lmjds, planids, spids, fiberids

    # add labels
    y = np.isin(ids, qso_ids, assume_unique=True)

    # resample
    N_WAVES = 2048
    LOGLAMMIN, LOGLAMMAX = 3.5843, 3.9501
    EPS = 0.00005
    # original and new wavelengths (EPS not to get NaNs)
    lam = np.logspace(LOGLAMMIN, LOGLAMMAX, 3659)
    new_lam = np.logspace(LOGLAMMIN + EPS, LOGLAMMAX - EPS, N_WAVES)
    X = spectres(new_lam, lam, X, verbose=True).astype(np.float32, copy=False)

    # minmax scale each spectrum
    X = minmax_scale(X, feature_range=(-1, 1), axis=1, copy=False)

    # split into training, validation and test set (sizes according to ILSVRC)
    # size according to ILSVRC
    N_VAL, N_TEST = 50000, 100000
    n_tr = n - N_VAL - N_TEST
    # seed from random.org
    rng = np.random.default_rng(seed=26)
    rnd_idx = rng.permutation(n)
    idx_tr, idx_va, idx_te = rnd_idx[:n_tr], rnd_idx[n_tr:n_tr +
                                                     N_VAL], rnd_idx[n_tr +
                                                                     N_VAL:]
Esempio n. 27
0
flux_blue = onedspec_blue.science_spectrum_list[0].flux_resampled

flux_red_err = onedspec_red.science_spectrum_list[0].flux_err_resampled
flux_blue_err = onedspec_blue.science_spectrum_list[0].flux_err_resampled

# trim the last ~100A from the blue and the first ~100A from the red
# in the combined spectrum
red_limit = 5000
blue_limit = 5800

blue_mask = (wave_blue >= red_limit) & (wave_blue <= blue_limit)
red_mask = (wave_red >= red_limit) & (wave_red <= blue_limit)

# resample the red to match blue resolution
flux_red_resampled, flux_red_resampled_err = spectres(wave_blue[blue_mask],
                                                      wave_red[red_mask],
                                                      flux_red[red_mask],
                                                      flux_red_err[red_mask])

flux_weighted_combine = (flux_red_resampled / flux_red_resampled_err +
                         flux_blue[blue_mask] / flux_blue_err[blue_mask]) / (
                             1 / flux_red_resampled_err +
                             1 / flux_blue_err[blue_mask])

plt.figure(1, figsize=(16, 8))
plt.clf()
plt.plot(wave_blue, flux_blue, color='blue', label='Blue arm (ASPIRED)')
plt.plot(wave_red, flux_red, color='red', label='Red arm (ASPIRED)')
plt.xlim(3300., 10500.)
plt.ylim(
    0,
    max(np.nanpercentile(flux_red_resampled, 99.5),
Esempio n. 28
0
def get_residuals(
    datatype: str,
    spectrum: str,
    parameters: Dict[str, float],
    objectbox: box.ObjectBox,
    inc_phot: Union[bool, List[str]] = True,
    inc_spec: Union[bool, List[str]] = True,
    radtrans: Optional[read_radtrans.ReadRadtrans] = None,
) -> box.ResidualsBox:
    """
    Function for calculating the residuals from fitting model or
    calibration spectra to a set of spectra and/or photometry.

    Parameters
    ----------
    datatype : str
        Data type ('model' or 'calibration').
    spectrum : str
        Name of the atmospheric model or calibration spectrum.
    parameters : dict
        Parameters and values for the spectrum
    objectbox : species.core.box.ObjectBox
        Box with the photometry and/or spectra of an object. A scaling
        and/or error inflation of the spectra should be applied with
        :func:`~species.util.read_util.update_spectra` beforehand.
    inc_phot : bool, list(str)
        Include photometric data in the fit. If a boolean, either all
        (``True``) or none (``False``) of the data are selected. If a
        list, a subset of filter names (as stored in the database) can
        be provided.
    inc_spec : bool, list(str)
        Include spectroscopic data in the fit. If a boolean, either all
        (``True``) or none (``False``) of the data are selected. If a
        list, a subset of spectrum names (as stored in the database
        with :func:`~species.data.database.Database.add_object`) can be
        provided.
    radtrans : read_radtrans.ReadRadtrans, None
        Instance of :class:`~species.read.read_radtrans.ReadRadtrans`.
        Only required with ``spectrum='petitradtrans'`. Make sure that
        the ``wavel_range`` of the ``ReadRadtrans`` instance is
        sufficiently broad to cover all the photometric and
        spectroscopic data of ``inc_phot`` and ``inc_spec``. Not used
        if set to ``None``.

    Returns
    -------
    species.core.box.ResidualsBox
        Box with the residuals.
    """

    if isinstance(inc_phot, bool) and inc_phot:
        inc_phot = objectbox.filters

    if inc_phot:
        model_phot = multi_photometry(
            datatype=datatype,
            spectrum=spectrum,
            filters=inc_phot,
            parameters=parameters,
            radtrans=radtrans,
        )

        res_phot = {}

        for item in inc_phot:
            transmission = read_filter.ReadFilter(item)
            res_phot[item] = np.zeros(objectbox.flux[item].shape)

            if objectbox.flux[item].ndim == 1:
                res_phot[item][0] = transmission.mean_wavelength()
                res_phot[item][1] = (
                    objectbox.flux[item][0] -
                    model_phot.flux[item]) / objectbox.flux[item][1]

            elif objectbox.flux[item].ndim == 2:
                for j in range(objectbox.flux[item].shape[1]):
                    res_phot[item][0, j] = transmission.mean_wavelength()
                    res_phot[item][1, j] = (
                        objectbox.flux[item][0, j] -
                        model_phot.flux[item]) / objectbox.flux[item][1, j]

    else:
        res_phot = None

    if inc_spec:
        res_spec = {}

        if spectrum == "petitradtrans":
            # Calculate the petitRADTRANS spectrum only once
            model = radtrans.get_model(parameters)

        for key in objectbox.spectrum:

            if isinstance(inc_spec, bool) or key in inc_spec:
                wavel_range = (
                    0.9 * objectbox.spectrum[key][0][0, 0],
                    1.1 * objectbox.spectrum[key][0][-1, 0],
                )

                wl_new = objectbox.spectrum[key][0][:, 0]
                spec_res = objectbox.spectrum[key][3]

                if spectrum == "planck":
                    readmodel = read_planck.ReadPlanck(wavel_range=wavel_range)

                    model = readmodel.get_spectrum(model_param=parameters,
                                                   spec_res=1000.0)

                    # Separate resampling to the new wavelength points

                    flux_new = spectres.spectres(
                        wl_new,
                        model.wavelength,
                        model.flux,
                        spec_errs=None,
                        fill=0.0,
                        verbose=True,
                    )

                elif spectrum == "petitradtrans":
                    # Separate resampling to the new wavelength points
                    flux_new = spectres.spectres(
                        wl_new,
                        model.wavelength,
                        model.flux,
                        spec_errs=None,
                        fill=0.0,
                        verbose=True,
                    )

                else:
                    # Resampling to the new wavelength points
                    # is done by the get_model method

                    readmodel = read_model.ReadModel(spectrum,
                                                     wavel_range=wavel_range)

                    if "teff_0" in parameters and "teff_1" in parameters:
                        # Binary system

                        param_0 = read_util.binary_to_single(parameters, 0)

                        model_spec_0 = readmodel.get_model(
                            param_0,
                            spec_res=spec_res,
                            wavel_resample=wl_new,
                            smooth=True,
                        )

                        param_1 = read_util.binary_to_single(parameters, 1)

                        model_spec_1 = readmodel.get_model(
                            param_1,
                            spec_res=spec_res,
                            wavel_resample=wl_new,
                            smooth=True,
                        )

                        flux_comb = (
                            parameters["spec_weight"] * model_spec_0.flux +
                            (1.0 - parameters["spec_weight"]) *
                            model_spec_1.flux)

                        model_spec = box.create_box(
                            boxtype="model",
                            model=spectrum,
                            wavelength=wl_new,
                            flux=flux_comb,
                            parameters=parameters,
                            quantity="flux",
                        )

                    else:
                        # Single object

                        model_spec = readmodel.get_model(
                            parameters,
                            spec_res=spec_res,
                            wavel_resample=wl_new,
                            smooth=True,
                        )

                    flux_new = model_spec.flux

                data_spec = objectbox.spectrum[key][0]
                res_tmp = (data_spec[:, 1] - flux_new) / data_spec[:, 2]

                res_spec[key] = np.column_stack([wl_new, res_tmp])

    else:
        res_spec = None

    print("Calculating residuals... [DONE]")

    print("Residuals (sigma):")

    if res_phot is not None:
        for item in inc_phot:
            if res_phot[item].ndim == 1:
                print(f"   - {item}: {res_phot[item][1]:.2f}")

            elif res_phot[item].ndim == 2:
                for j in range(res_phot[item].shape[1]):
                    print(f"   - {item}: {res_phot[item][1, j]:.2f}")

    if res_spec is not None:
        for key in objectbox.spectrum:
            if isinstance(inc_spec, bool) or key in inc_spec:
                print(f"   - {key}: min: {np.nanmin(res_spec[key]):.2f}, "
                      f"max: {np.nanmax(res_spec[key]):.2f}")

    chi2_stat = 0
    n_dof = 0

    if res_phot is not None:
        for key, value in res_phot.items():
            chi2_stat += value[1]**2
            n_dof += 1

    if res_spec is not None:
        for key, value in res_spec.items():
            chi2_stat += np.sum(value[:, 1]**2)
            n_dof += value.shape[0]

    for item in parameters:
        if item not in ["mass", "luminosity", "distance"]:
            n_dof -= 1

    chi2_red = chi2_stat / n_dof

    print(f"Reduced chi2 = {chi2_red:.2f}")
    print(f"Number of degrees of freedom = {n_dof}")

    return box.create_box(
        boxtype="residuals",
        name=objectbox.name,
        photometry=res_phot,
        spectrum=res_spec,
        chi2_red=chi2_red,
    )
Esempio n. 29
0
def resample_test():
    root = os.path.join(os.environ['MANGA_SPECTRO_REDUX'],
                        os.environ['MANGADRP_VER'])

    pltifu = '7815-1901'
    hdu = fits.open(
        os.path.join(root,
                     pltifu.split('-')[0], 'stack',
                     'manga-{0}-LOGCUBE.fits.gz'.format(pltifu)))

    drpall_file = os.path.join(
        root, 'drpall-{0}.fits'.format(os.environ['MANGADRP_VER']))
    drpall = fits.open(drpall_file)[1].data

    indx = drpall['PLATEIFU'] == pltifu
    z = drpall['NSA_Z'][indx][0]
    print(z)

    old_wave = hdu['WAVE'].data
    old_flux = numpy.ma.MaskedArray(
        (hdu['FLUX'].data[:, 10:12, 10]).T,
        mask=(hdu['MASK'].data[:, 10:12, 10]).T > 0)
    old_flux[:, (old_wave > 5570) & (old_wave < 5586)] = numpy.ma.masked
    old_ferr = numpy.ma.power((hdu['IVAR'].data[:, 10:12, 10]).T, -0.5)
    indx = (old_wave > old_wave[0] / (1 + z)) & (old_wave < old_wave[-2] /
                                                 (1 + z))

    t = time.perf_counter()
    new_flux_spectres = numpy.empty((old_flux.shape[0], numpy.sum(indx)),
                                    dtype=float)
    new_ferr_spectres = numpy.empty((old_flux.shape[0], numpy.sum(indx)),
                                    dtype=float)
    for i in range(old_flux.shape[0]):
        new_flux_spectres[i,:], new_ferr_spectres[i,:] \
                = spectres.spectres(old_wave[indx], old_wave/(1+z), old_flux[i,:].filled(0.0),
                                    spec_errs=old_ferr[i,:].filled(0.0))
    print('SpectRes Time: ', time.perf_counter() - t)

    t = time.perf_counter()
    borders = _pixel_borders(numpy.array([old_wave[0], old_wave[-1]]),
                             old_wave.size,
                             log=True)[0]
    _p = numpy.repeat(borders, 2)[1:-1].reshape(-1, 2)
    new_flux_brute = numpy.array([
        passband_integral(old_wave / (1 + z), f, passband=_p, log=True)
        for f in old_flux.filled(0.0)
    ])
    new_flux_brute /= (_p[:, 1] - _p[:, 0])[None, :]
    print('Brute Force Time: ', time.perf_counter() - t)

    t = time.perf_counter()
    r = Resample(old_flux,
                 e=old_ferr,
                 x=old_wave / (1 + z),
                 newRange=[old_wave[0], old_wave[-1]],
                 inLog=True,
                 newLog=True)
    print('Resample Time: ', time.perf_counter() - t)

    print('Mean diff:')
    print('    spectres - brute    = {0:.5e}'.format(
        numpy.mean(numpy.absolute(new_flux_spectres -
                                  new_flux_brute[:, indx]))))
    print('    spectres - resample = {0:.5e}'.format(
        numpy.mean(numpy.absolute(new_flux_spectres - r.outy[:, indx]))))
    print('    brute - resample    = {0:.5e}'.format(
        numpy.mean(numpy.absolute(new_flux_brute - r.outy))))

    for i in range(old_flux.shape[0]):
        pyplot.plot(old_wave / (1 + z), old_flux[i, :])
        pyplot.plot(old_wave[indx], new_flux_spectres[i, :])
        pyplot.plot(old_wave, new_flux_brute[i, :])
        pyplot.plot(r.outx, r.outy[i, :])
        pyplot.plot(r.outx, r.outf[i, :])
        pyplot.show()
Esempio n. 30
0
model_wavs = np.genfromtxt("bc2003_hr_stelib_m62_chab_ssp.ised_ASCII",
                           skip_header=6,
                           skip_footer=233,
                           usecols=np.arange(1, 6918, dtype="int"))

# Load up the model grid, the first axis must run over wavelength, the second may contain different spectra to be resampled
model_grid = np.genfromtxt("bc2003_hr_stelib_m62_chab_ssp.ised_ASCII",
                           skip_header=7,
                           skip_footer=12,
                           usecols=np.arange(1, 6918, dtype="int")).T

# Specify the wavelength sampling to be applied to the spectrum or spectra
regrid = np.arange(3000., 5000., 5.)

# Call the spectres function to resample the input spectrum or spectra to the new wavelength grid
model_resampled_5A = spectres(model_wavs, model_grid, regrid)

# Resample to a lower resolution
model_resampled_10A = spectres(model_wavs, model_grid,
                               np.arange(3000., 5000., 10.))

# Load up the age values for each of the models we have resampled
model_ages = np.genfromtxt("bc2003_hr_stelib_m62_chab_ssp.ised_ASCII",
                           skip_header=0,
                           skip_footer=239)[1:]

# Find the index of the column most closely corresponding to a 1Gyr old burst of star formation
col_1gyr = np.argmin(np.abs(model_ages - 10**9))

# Plotting code:
plt.figure(figsize=(12, 6))