def test_normalize(): w = np.arange(4000., 5000., 10.) coeff = [1., 0.1, 0.1] pol = Polynomial(coeff) obs_spectrum = Spectrum1D.from_array(w, pol(w), dispersion_unit=u.AA) norm = Normalize(obs_spectrum, npol=3) model = Spectrum1D.from_array(w, np.ones_like(w), dispersion_unit=u.AA) fit = norm(model) npt.assert_allclose(fit.flux, obs_spectrum.flux) npt.assert_allclose(norm.polynomial(obs_spectrum.wavelength.value), obs_spectrum.flux) npt.assert_allclose(norm.polynomial.convert().coef, np.array(coeff + [0.]), rtol=1e-5, atol=1.e-10)
def test_spectrum1d_fromarray_quantity(): test_spec = Spectrum1D.from_array(np.arange(3000, 9000) * u.angstrom, np.random.random(6000)) assert hasattr(test_spec, 'wavelength') assert test_spec.dispersion_unit == u.angstrom
def test_spectrum1d_flux1(): test_spec = Spectrum1D.from_array(np.arange(3000, 9000) * u.angstrom, np.random.random(6000) * u.erg/u.s, ) assert not hasattr(test_spec.data, 'unit') assert test_spec.flux.unit == u.erg / u.s assert test_spec.unit == u.erg / u.s
def test_interpolate(): obs_spectrum = Spectrum1D.from_array(np.arange(1,26), np.zeros(25), dispersion_unit='micron', unit='erg/(cm^2 s Angstrom)') test_spectrum = Spectrum1D.from_array(np.arange(0.5, 26.5, 1), np.arange(0.5, 26.5, 1), dispersion_unit='micron', unit='erg/(cm^2 s Angstrom)') interpolate_plugin = Interpolate(obs_spectrum) interpolated_test_spectrum = interpolate_plugin(test_spectrum) expected_interpolated_flux = np.arange(1,26) interpolated_test_flux = interpolated_test_spectrum.flux npt.assert_array_almost_equal(interpolated_test_flux, expected_interpolated_flux, decimal=6)
def read_spec(ispec, second_file=None): '''Parse spectrum out of the input Parameters: ----------- ispec: Spectrum1D, str, or tuple Returns: ----------- spec: XSpectrum1D spec_file: str ''' from specutils import Spectrum1D from linetools.spectra.utils import XSpectrum1D # if isinstance(ispec,basestring): spec_fil = ispec spec = lsi.readspec(spec_fil) # Second file? if not second_file is None: spec2 = lsi.readspec(second_file) if spec2.sig is None: spec2.sig = np.zeros(spec.flux.size) # Scale for convenience of plotting xper1 = xstats.basic.perc(spec.flux, per=0.9) xper2 = xstats.basic.perc(spec2.flux, per=0.9) scl = xper1[1]/xper2[1] # Stitch together wave3 = np.append(spec.dispersion, spec2.dispersion) flux3 = np.append(spec.flux, spec2.flux*scl) sig3 = np.append(spec.sig, spec2.sig*scl) spec3 = Spectrum1D.from_array(wave3, flux3, uncertainty=StdDevUncertainty(sig3)) # Overwrite spec = spec3 spec.filename = spec_fil elif isinstance(ispec,Spectrum1D): spec = ispec # Assuming Spectrum1D spec_fil = spec.filename # Grab from Spectrum1D elif isinstance(ispec,tuple): # Units try: wv_unit = ispec[0].unit except AttributeError: wv_unit = u.AA uwave = u.Quantity(ispec[0], unit=wv_unit) # Generate if len(ispec) == 2: # wave, flux spec = XSpectrum1D.from_array(uwave, u.Quantity(ispec[1])) else: spec = XSpectrum1D.from_array(uwave, u.Quantity(ispec[1]), uncertainty=StdDevUncertainty(ispec[2])) # spec_fil = 'none' spec.filename = spec_fil else: raise ValueError('Bad input to read_spec') # Return return spec, spec_fil
def __call__(self, spectrum): wavelength, flux = spectrum.wavelength.value, spectrum.flux interpolated_flux = np.interp(self.observed.wavelength.value, wavelength, flux) return Spectrum1D.from_array( self.observed.wavelength, interpolated_flux, dispersion_unit=self.observed.wavelength.unit, unit=self.observed.unit)
def test_spectrum1d_fromarray_quantity2(): test_spec = Spectrum1D.from_array(np.arange(3000, 9000) * u.angstrom, np.random.random(6000), dispersion_unit='nm') assert hasattr(test_spec, 'wavelength') assert test_spec.dispersion_unit == u.nm nptesting.assert_allclose(test_spec.wavelength.value, np.arange(3000, 9000) / 10.)
def __call__(self, spectrum): from specutils import extinction extinction_factor = 10**(-0.4*extinction.extinction_ccm89( spectrum.wavelength, a_v=self.a_v, r_v=self.r_v)) return Spectrum1D.from_array( spectrum.wavelength.value, extinction_factor * spectrum.flux, dispersion_unit=spectrum.wavelength.unit, unit=spectrum.unit)
def __call__(self, spectrum): wavelength, flux = spectrum.wavelength.value, spectrum.flux log_grid_log_wavelength = np.arange(np.log(wavelength.min()), np.log(wavelength.max()), self.resolution.to(1).value) log_grid_wavelength = np.exp(log_grid_log_wavelength) log_grid_flux = np.interp(log_grid_wavelength, wavelength, flux) profile = self.rotational_profile() log_grid_convolved = nd.convolve1d(log_grid_flux, profile) convolved_flux = np.interp(wavelength, log_grid_wavelength, log_grid_convolved) return Spectrum1D.from_array(spectrum.wavelength, convolved_flux, dispersion_unit=spectrum.wavelength.unit, unit=spectrum.unit)
def test_spectrum1d_flux2(): test_spec = Spectrum1D.from_array(np.arange(3000, 9000) * u.angstrom, np.random.random(6000) * u.erg/u.s, ) assert not hasattr(test_spec.data, 'unit') assert test_spec.flux.unit == u.erg / u.s assert test_spec.unit == u.erg / u.s new_flux = np.random.random(6000) * u.W test_spec.flux = new_flux assert test_spec.flux.unit == u.erg / u.s nptesting.assert_allclose(new_flux.to(u.erg / u.s).value, test_spec.data)
def to_spectrum(self): try: from specutils import Spectrum1D except ImportError: raise ImportError('specutils needed for this functionality') from xtool.fix_spectrum1d import Spectrum1D if getattr(self, 'amplitude_uncertainty', None) is None: uncertainty = None else: uncertainty = self.amplitude_uncertainty spec = Spectrum1D.from_array( self.wavelength_pixels * u.nm, self.amplitude.value, uncertainty=uncertainty) return spec
def get_query_data(self, filter_tuple, plugin, warning_threshold=1 * u.gigabyte): """ Write spectra to disk :param filter_tuple: :param models: :return: """ query = self.get_spectrum_query(filter_tuple) sample_spectrum_row = query.first() sample_spectrum_flux = plugin(sample_spectrum_row.get_spectrum1d().flux) no_spectra = query.count() size_of_spectra = (query.count() * len(sample_spectrum_flux)) * 8 * u.byte if size_of_spectra > warning_threshold: continue_query = raw_input('The size of the spectra are {0:.2f}. ' 'Continue [y/N]'.format( size_of_spectra.to(warning_threshold.unit))) if continue_query.strip().lower() != 'y': raise ValueError('Size of requested grid ({:.2f}) to ' 'large for user ... aborting'.format( size_of_spectra.to(warning_threshold.unit))) fluxes = np.empty((query.count(), len(sample_spectrum_flux))) parameters = [] param_names = [item.name for item in sample_spectrum_row.parameter_set.parameters] for i, spectrum_row in enumerate(query): print "{0}/{1}".format(i, no_spectra) spectrum = spectrum_row.get_spectrum1d() fluxes[i] = plugin(spectrum.flux) parameters.append([getattr(spectrum_row.parameter_set, key) for key in param_names]) parameters = pd.DataFrame(parameters, columns= param_names) output_sample_spectrum = Spectrum1D.from_array( plugin.output_wavelength * u.angstrom, sample_spectrum_flux) return output_sample_spectrum, parameters, fluxes
def __call__(self,spectrum): R = self.resolution Lambda = self.central_wavelength.value wavelength = spectrum.dispersion.value conversionfactor = 2 * np.sqrt(2 * np.log(2)) deltax = np.mean(wavelength[1:] - wavelength[0:-1]) FWHM = Lambda/R sigma = (FWHM/deltax)/conversionfactor flux = spectrum.flux convolved_flux = gaussian_filter1d(flux, sigma, axis=0, order=0) return Spectrum1D.from_array( spectrum.dispersion, convolved_flux, dispersion_unit=spectrum.dispersion.unit, unit=spectrum.unit)
def test_export_data(specviz_gui, tmpdir): fname = str(tmpdir.join('export.ecsv')) workspace = specviz_gui._workspaces[0] data_item = workspace.current_item workspace.export_data_item(data_item, fname, '*.ecsv') assert os.path.isfile(fname) exported = Spectrum1D.read(fname, format='ECSV') original = data_item.data_item.spectrum assert_quantity_allclose(exported.flux, original.flux) assert_quantity_allclose(exported.spectral_axis, original.spectral_axis) if original.uncertainty is None: assert exported.uncertainty is None else: assert_quantity_allclose(exported.uncertainty, original.uncertainty)
def test_convolution(): my_spec = Spectrum1D.from_array(np.linspace(6000, 8000, 2000), np.ones(2000), dispersion_unit='Angstrom', unit='erg/(cm^2 s Angstrom)') my_spec.flux[1000] = 0.0 R = 5000. central_wavelength = 7000. * u.Angstrom convolve_plugin = Convolve(R, central_wavelength) conv_spectrum = convolve_plugin(my_spec) assert conv_spectrum.flux[999] < 1. assert conv_spectrum.flux[1000] > 0 integral_pre_convolved = simps(my_spec.flux, my_spec.wavelength.value) integral_convolved = simps(conv_spectrum.flux, conv_spectrum.wavelength.value) npt.assert_allclose(integral_pre_convolved, integral_convolved, rtol=1.0, atol=0.0002)
def eval(self, teff, logg, feh): """ Interpolating on the grid to the necessary parameters Parameters ---------- teff: float effective temperature logg: float base ten logarithm of surface gravity in cgs feh: float [Fe/H] """ flux = self.interpolate_grid(teff, logg, feh) return Spectrum1D.from_array(self.wavelength, flux, dispersion_unit=u.angstrom, unit=u.Unit('erg/ (cm2 s Angstrom)'))
def eval(self, *args): """ Interpolating on the grid to the necessary parameters Parameters ---------- teff: float effective temperature logg: float base ten logarithm of surface gravity in cgs feh: float [Fe/H] """ flux = self.interpolate_grid(*args) return Spectrum1D.from_array(self.wavelength.value, flux, dispersion_unit=self.wavelength.unit, unit=self.flux_unit)
def extract_spectrum(self): """Extract 1D spectrum from the information provided so far and createa `~specutils.Spectrum1D` object """ try: from specutils import Spectrum1D except: from .spectrum1d import Spectrum1D if self.wavelength is None: raise ValueError('wavelength is None') if self.wavelength_unit is None: raise ValueError('wavelength_unit is None') if self.flux is None: raise ValueError('flux is None') if self.flux_unit is None: raise ValueError('flux_unit is None') wave = self.wavelength * self.wavelength_unit flux = self.flux * self.flux_unit return Spectrum1D.from_array(wave, flux)
def __call__(self, model): rcond = (len(self.observed.flux) * np.finfo(self.observed.flux.dtype).eps) # V[:,0]=mfi/e, Vp[:,1]=mfi/e*w, .., Vp[:,npol]=mfi/e*w**npol V = self._Vp * (model.flux / self.uncertainty)[:,np.newaxis] # normalizes different powers scl = np.sqrt((V*V).sum(0)) sol, resids, rank, s = np.linalg.lstsq(V/scl, self.signal_to_noise, rcond) sol = (sol.T/scl).T if rank != self._Vp.shape[-1] - 1: msg = "The fit may be poorly conditioned" warnings.warn(msg) fit = np.dot(V, sol) * self.uncertainty # keep coefficients in case the outside wants to look at it self.polynomial = Polynomial(sol, domain=self.domain.value, window=self.window.value) return Spectrum1D.from_array( self.observed.wavelength.value, fit, unit=self.observed.unit, dispersion_unit=self.observed.wavelength.unit)
def __call__(self, img, trace_object): self.last_trace = trace_object self.last_img = img if self.apwidth < 1: raise ValueError('apwidth must be >= 1') if self.skysep < 1: raise ValueError('skysep must be >= 1') if self.skywidth < 1: raise ValueError('skywidth must be >= 1') trace_line = trace_object.line onedspec = np.zeros_like(trace_line) skysubflux = np.zeros_like(trace_line) fluxerr = np.zeros_like(trace_line) for i in range(0, len(trace_line)): # first do the aperture flux # juuuust in case the trace gets too close to an edge widthup = self.apwidth / 2 widthdn = self.apwidth / 2 if (trace_line[i] + widthup > img.shape[0]): widthup = img.shape[0] - trace_line[i] - 1 if (trace_line[i] - widthdn < 0): widthdn = trace_line[i] - 1 # simply add up the total flux around the trace_line +/- width onedspec[i] = np.nansum( img[int(trace_line[i] - widthdn):int(trace_line[i] + widthup + 1), i] ) # now do the sky fit itrace_line = int(trace_line[i]) sky_y = np.append( np.arange( itrace_line - self.apwidth - self.skysep - self.skywidth, itrace_line - self.apwidth - self.skysep ), np.arange( itrace_line + self.apwidth + self.skysep + 1, itrace_line + self.apwidth + self.skysep + self.skywidth + 1 ) ) sky_flux = img[sky_y, i] if (self.skydeg > 0): # fit a polynomial to the sky in this column pfit = np.polyfit(sky_y, sky_flux, self.skydeg) # define the aperture in this column ap = np.arange( trace_line[i] - self.apwidth, trace_line[i] + self.apwidth + 1 ) # evaluate the polynomial across the aperture, and sum skysubflux[i] = np.nansum(np.polyval(pfit, ap)) elif (self.skydeg == 0): skysubflux[i] = np.nanmean(sky_flux) * (self.apwidth * 2.0 + 1) # finally, compute the error in this pixel sigma_bkg = np.nanstd(sky_flux) # stddev in the background data n_bkg = np.float(len(sky_y)) # number of bkgd pixels n_ap = self.apwidth * 2. + 1 # number of aperture pixels # based on aperture phot err description by F. Masci, Caltech: # http://wise2.ipac.caltech.edu/staff/fmasci/ApPhotUncert.pdf fluxerr[i] = np.sqrt( np.nansum(onedspec[i] - skysubflux[i]) + (n_ap + n_ap**22 / n_bkg) * (sigma_bkg**2) ) spec = Spectrum1D( spectral_axis=np.arange(len(onedspec)) * u.pixel, flux=onedspec * img.unit, uncertainty=StdDevUncertainty(fluxerr) ) skyspec = Spectrum1D( spectral_axis=np.arange(len(onedspec)) * u.pixel, flux=skysubflux * img.unit ) return spec, skyspec
def get_spectrum1d(self): flux = self._read_flux() return Spectrum1D.from_array(self.wavelength, flux, unit=u.Unit(self.flux_unit))
max(mplot2.y[right:]), num=len(mplot2.x)) y_model = mplot2.y y_cn = y_model / y_cont dcontplot = DataPlot() dcont = Data1D("normalized", mplot.x, y_cn) dcontplot.prepare(dcont) dcontplot.plot() os.chdir(r"/home/dtyler/Desktop/DocumentsDT/outputs/standard_" + fits_image_filename) plt.savefig(fits_image_filename[:-4] + "norm_continuum.png") os.chdir(r"/home/dtyler/Desktop/DocumentsDT/Programs") ################################### ### calculate equivalent width spectrum = Spectrum1D(flux=y_cn * u.dimensionless_unscaled, spectral_axis=mplot.x * u.AA) eqw = equivalent_width(spectrum) """print("EW", eqw.value) print('norm factor', norm_factor) for par in bmdl.pars: print(par.fullname, par.val)""" ### print output to file os.chdir( r"/home/dtyler/Desktop/DocumentsDT/Analysis/standard_eq_widths") fil = open(fits_image_filename[:-4] + ".txt", 'w') fil.write('eqw' + '\t' + str(eqw.value) + '\n') fil.write('norm' + '\t' + str(norm_factor) + '\n') for par in bmdl.pars:
from specutils import Spectrum1D from astropy import units import numpy as np dispersion = np.arange(4000, 5000, 0.12) flux = np.random.randn(len(dispersion)) mySpectrum = Spectrum1D.from_array(dispersion, flux, dispersion_unit=units.m) hBeta = mySpectrum.slice_dispersion(4851.0, 4871.0) hBeta
def compprep(spectrum, sn_name, z, source): old_wave = spectrum[:, 0] # wavelengths old_flux = spectrum[:, 1] # fluxes try: old_error = spectrum[:, 2] # check if supernovae has error array except IndexError: old_error = np.array([0]) # if not, set default old_ivar = df.genivar(old_wave, old_flux, old_error) # generate inverse variance snr = prep.getsnr(old_flux, old_ivar) if source == 'cfa': # choosing source dataset # z = ReadParam() sne = prep.ReadExtin('extinction.dat') if source == 'bsnip': sne = prep.ReadExtin('extinctionbsnip.dat') if source == 'csp': sne = prep.ReadExtin('extinctioncsp.dat') old_wave *= 1 + float(z) # Redshift back if source == 'uv': sne = prep.ReadExtin('extinctionuv.dat') if source == 'other': sne = prep.ReadExtin('extinctionother.dat') # host_reddened = ReadExtin('../data/info_files/ryan_av.txt') newdata = [] old_wave = old_wave * u.Angstrom # wavelengths old_flux = old_flux * u.Unit('W m-2 angstrom-1 sr-1') spec1d = Spectrum1D.from_array(old_wave, old_flux) test_flux = test_dered.dered( sne, sn_name, spec1d.wavelength, spec1d.flux ) # Deredenning (see if sne in extinction files match the SN name) # new_flux = host_correction(sne, sn_name, old_wave, new_flux) # new_flux = old_flux new_flux = test_flux.value old_wave = old_wave.value old_wave = old_wave / (1. + z) old_flux = np.asarray(old_flux) new_flux = np.asarray(new_flux) s = scale_composites_in_range(old_flux, new_flux) old_flux = s * old_flux # plt.rc('font', family='serif') # fig, ax = plt.subplots(1,1) # fig.set_size_inches(10, 8, forward = True) # ax.get_yaxis().set_ticks([]) # plt.plot(old_wave, old_flux, linewidth = 2, color = 'r') # plt.plot(old_wave, new_flux, linewidth = 2, color = '#3F5D7D') # plt.ylabel('Relative Flux') # plt.xlabel('Wavelength ' + "($\mathrm{\AA}$)") # # plt.savefig('../../Paper_Drafts/MW_corr.png', dpi = 300, bbox_inches = 'tight') # plt.show() av = .1294 #2006sr # av = 2.9711 #2005a name = '2006sr' # name = '2005a' host_wave = old_wave * u.Angstrom # wavelengths host_flux = new_flux * u.Unit('W m-2 angstrom-1 sr-1') spec1d = Spectrum1D.from_array(host_wave, host_flux) new_flux_host, new_ivar_host = test_dered.host_correction( av, 2.5, name, spec1d.wavelength, spec1d.flux, [0]) new_flux = np.asarray(new_flux) new_flux_host = np.asarray(new_flux_host.value) s = scale_composites_in_range(new_flux_host, new_flux) new_flux_host = s * new_flux_host norm = 1. / np.amax(new_flux_host) new_flux_host = new_flux_host * norm new_flux = new_flux * norm old_flux = old_flux * norm plt.rc('font', family='serif') fig, ax = plt.subplots(1, 1) fig.set_size_inches(10, 8, forward=True) plt.minorticks_on() plt.xticks(fontsize=20) ax.xaxis.set_ticks( np.arange(np.round(old_wave[0], -3), np.round(old_wave[-1], -3), 1000)) plt.yticks(fontsize=20) plt.tick_params(which='major', bottom='on', top='on', left='on', right='on', length=10) plt.tick_params(which='minor', bottom='on', top='on', left='on', right='on', length=5) plt.plot(old_wave, old_flux, linewidth=2, color='#d95f02') plt.plot(old_wave, new_flux, linewidth=2, color='#1b9e77') plt.plot(host_wave.value, new_flux_host, linewidth=2, color='#7570b3') plt.ylabel('Relative Flux', fontsize=30) plt.xlabel('Rest Wavelength ' + "($\mathrm{\AA}$)", fontsize=30) plt.savefig('../../Paper_Drafts/red_corr.pdf', dpi=300, bbox_inches='tight') # plt.ylim([-.2,1.01]) # plt.savefig('../../Paper_Drafts/red_corr_large.pdf', dpi = 300, bbox_inches = 'tight') plt.show() # new_wave = old_wave/(1.+z) # Deredshifting new_wave = old_wave new_error = old_error # Placeholder if it needs to be changed norm = 1. / np.amax(new_flux) new_flux = new_flux * norm new_ivar = df.genivar(new_wave, new_flux, new_error) # generate new inverse variance #var = new_flux*0+1 newdata = prep.Interpo(new_wave, new_flux, new_ivar) # Do the interpolation plt.rc('font', family='serif') fig, ax = plt.subplots(1, 1) fig.set_size_inches(10, 8, forward=True) plt.minorticks_on() plt.xticks(fontsize=20) ax.xaxis.set_ticks( np.arange(np.round(old_wave[0], -3), np.round(old_wave[-1], -3), 1000)) plt.yticks(fontsize=20) plt.tick_params(which='major', bottom='on', top='on', left='on', right='on', length=10) plt.tick_params(which='minor', bottom='on', top='on', left='on', right='on', length=5) plt.plot(old_wave, new_flux, linewidth=2, color='r') plt.plot(newdata[0], newdata[1], linewidth=2, color='#3F5D7D') plt.ylabel('Relative Flux', fontsize=30) plt.xlabel('Rest Wavelength ' + "($\mathrm{\AA}$)", fontsize=30) plt.savefig('../../Paper_Drafts/interp.pdf', dpi=300, bbox_inches='tight') # plt.ylim([-.3,1.]) # plt.savefig('../../Paper_Drafts/interp_large.pdf', dpi = 300, bbox_inches = 'tight') plt.show() # print 'new spectra',newdata return newdata, snr
def mos_niriss_parser(app, data_dir, obs_label=""): """ Attempts to parse all data for a NIRISS dataset in the specified directory, which should include: - *_direct_*_cal.fits : Direct 2D image - *_direct_*_cat.ecsv : Source catalog - *_WFSSR_*_cal.fits : 2D spectra in first orientation - *_WFSSC_*_cal.fits : 2D spectra in second orientation - *_WFSSR_*_x1d.fits : 1D spectra in first orientatiom - *_WFSSC_*_x1d.fits : 1D spectra in second orientatiom The spectra from the "C" files (horizontal orientation) are showed in the viewers by default. """ p = Path(data_dir) if not p.is_dir(): raise ValueError("{} is not a valid directory path".format(data_dir)) source_cat = sorted(list(p.glob("{}*_direct_*_cat.ecsv".format(obs_label)))) direct_image = sorted(list(p.glob("{}*_direct_dit1*_i2d.fits".format(obs_label)))) spec2d_r = sorted(list(p.glob("{}*_WFSSR_*_cal.fits".format(obs_label)))) spec2d_c = sorted(list(p.glob("{}*_WFSSC_*_cal.fits".format(obs_label)))) spec1d_r = sorted(list(p.glob("{}*_WFSSR_*_x1d.fits".format(obs_label)))) spec1d_c = sorted(list(p.glob("{}*_WFSSC_*_x1d.fits".format(obs_label)))) file_lists = { "Source Catalog": source_cat, "Direct Image": direct_image, "2D Spectra C": spec2d_c, "2D Spectra R": spec2d_r, "1D Spectra C": spec1d_c, "1D Spectra R": spec1d_r } # Convert from pathlib Paths back to strings for key in file_lists: file_lists[key] = [str(x) for x in file_lists[key]] _warn_if_not_found(app, file_lists) # Parse relevant information from source catalog cat_fields = ["id", "sky_centroid.ra", "sky_centroid.dec"] source_ids = [] ras = [] decs = [] image_add = [] pupil_id_dict = {} # Retrieve source information for source_catalog_num in range(0, len(file_lists["Source Catalog"])): cat_file = file_lists["Source Catalog"][source_catalog_num] parsed_cat_fields = _fields_from_ecsv(cat_file, cat_fields, delimiter=" ") pupil = [x for x in cat_file.split("/")[-1].split("_") if x[0] == "F" or x[0] == "f"][0] pupil_id_dict[pupil] = {} for row in parsed_cat_fields: pupil_id_dict[pupil][int(row[0])] = (row[1], row[2]) # Read in direct image filters image_dict = {} filter_wcs = {} # Set up a dictionary of datasets to add to glue add_to_glue = {} print("Loading: Images") for image_file in file_lists["Direct Image"]: im_split = image_file.split("/")[-1].split("_") pupil = [x for x in image_file.split("/")[-1].split("_") if x[0] == "F" or x[0] == "f"][0] image_label = "Image {} {}".format(im_split[0], pupil) with fits.open(image_file) as file_obj: data_iter = get_image_data_iterator(app, file_obj, "Image", ext=None) data_obj = [d[0] for d in data_iter] # We do not use the generated labels image_data = data_obj[0] # Grab the first one. TODO: Error if multiple found? with fits.open(image_file) as temp: filter_wcs[pupil] = temp[1].header image_data.label = image_label add_to_glue[image_label] = image_data image_dict[pupil] = image_label # Parse 2D spectra spec_labels_2d = [] for f in ["2D Spectra C", "2D Spectra R"]: for fname in file_lists[f]: print(f"Loading: {f} sources") orientation = f[-1] filter_name = [x for x in fname.split("/")[-1].split("_") if x[0] == "F" or x[0] == "f"][0] with fits.open(fname, memmap=False) as temp: sci_hdus = [] wav_hdus = {} for i in range(len(temp)): if "EXTNAME" in temp[i].header: if temp[i].header["EXTNAME"] == "SCI": sci_hdus.append(i) wav_hdus[i] = ('WAVELENGTH', temp[i].header['EXTVER']) # Now get a Spectrum1D object for each SCI HDU for sci in sci_hdus: if temp[sci].header["SPORDER"] == 1: data = temp[sci].data meta = temp[sci].header # The wavelength is stored in a WAVELENGTH HDU. This is # a 2D array, but in order to be able to use Spectrum1D # we use the average wavelength for all image rows wav = temp[wav_hdus[sci]].data.mean(axis=0) * u.micron spec2d = Spectrum1D(data * u.one, spectral_axis=wav, meta=meta) spec2d.meta['INSTRUME'] = 'NIRISS' label = "{} Source {} spec2d {}".format(filter_name, temp[sci].header["SOURCEID"], orientation ) ra, dec = pupil_id_dict[filter_name][temp[sci].header["SOURCEID"]] source_ids.append("Source Catalog: {} Source ID: {}". format(filter_name, temp[sci].header["SOURCEID"])) ras.append(ra) decs.append(dec) image_add.append(image_dict[filter_name]) spec_labels_2d.append(label) add_to_glue[label] = spec2d spec_labels_1d = [] for f in ["1D Spectra C", "1D Spectra R"]: for fname in file_lists[f]: print(f"Loading: {f} sources") with fits.open(fname, memmap=False) as temp: # TODO: Remove this once valid SRCTYPE values are present in all headers for hdu in temp: if "SRCTYPE" in hdu.header and\ (hdu.header["SRCTYPE"] in ["POINT", "EXTENDED"]): pass else: hdu.header["SRCTYPE"] = "EXTENDED" specs = SpectrumList.read(temp, format="JWST x1d multi") filter_name = [x for x in fname.split("/")[-1].split("_") if x[0] == "F" or x[0] == "f"][0] # Orientation denoted by "C" or "R" orientation = f[-1] for spec in specs: if spec.meta['header']['SPORDER'] == 1 and\ spec.meta['header']['EXTNAME'] == "EXTRACT1D": label = "{} Source {} spec1d {}".format(filter_name, spec.meta['header']['SOURCEID'], orientation ) spec_labels_1d.append(label) add_to_glue[label] = spec # Add the datasets to glue - we do this in one step so that we can easily # optimize by avoiding recomputing the full link graph at every add with app.data_collection.delay_link_manager_update(): for label, data in add_to_glue.items(): app.add_data(data, label, notify_done=False) # We then populate the table inside this context manager as _add_to_table # does operations that also trigger link manager updates. _add_to_table(app, source_ids, "Source ID") _add_to_table(app, ras, "Right Ascension") _add_to_table(app, decs, "Declination") _add_to_table(app, image_add, "Images") _add_to_table(app, spec_labels_1d, "1D Spectra") _add_to_table(app, spec_labels_2d, "2D Spectra") app.get_viewer('table-viewer')._shared_image = True
def compprep(spectrum, sn_name, z, source, use_old_error=True, testing=False, filename=None, mjd=None, mjd_max=None): """ Performs clipping, deredshifting, variance spectrum generation, MW extinction correction, and interpolation. If testing is True, several plots will be made to assess the quality of this processing. """ old_wave = spectrum[:, 0] # wavelengths old_flux = spectrum[:, 1] # fluxes try: old_error = spectrum[:, 2] # check if supernovae has error array except IndexError: old_error = None # if not, set default if sn_name == '2011fe' and source == 'other': old_error = np.sqrt(old_error) if old_error is not None: old_var = old_error**2. else: old_var = None if old_var is not None: num_non_zeros = np.count_nonzero(old_var) if len(old_var) - num_non_zeros > 100: old_var = None elif old_var[-1] == 0.: old_var[-1] = old_var[-2] elif True in np.isnan(old_var): nan_inds = np.transpose(np.argwhere(np.isnan(old_var)))[0] for ind in nan_inds: if ind != 0: old_var[ind] = old_var[ind - 1] else: old_var[ind] = old_var[ind + 1] # if testing: # plt.plot(old_wave, old_flux) # plt.plot(old_wave/(1.+z), old_flux) # plt.plot(old_wave*(1.+z), old_flux) # plt.xlim(5800,6000) # # plt.show() # if old_var is not None: # plt.plot(old_wave, old_var) # plt.show() # old_var = None vexp, SNR = df.find_vexp(old_wave, old_flux, var_y=old_var) if testing: print vexp, SNR if source != 'csp': #already deredshifted old_wave = old_wave / (1. + z) #deredshift for clipping old_wave, old_flux, old_var = df.clip( old_wave, old_flux, old_var, vexp, testing=testing, filename=filename) #clip emission/absorption lines old_wave = old_wave * (1. + z) #reredshift for MW extinction correction temp_ivar, SNR = df.genivar(old_wave, old_flux, old_var, vexp=vexp, testing=testing, source=source) # generate inverse variance #code to save foundation spec for david # print filename # plt.plot(old_wave, old_flux) # plt.show() # plt.plot(old_wave, temp_ivar) # plt.show() # file_path = '../../Foundation/mod_TNS_spec/' + filename.split('.')[0] + '_modified.flm' # print file_path # with open(file_path, 'w') as file: # file.write('# Orginal file name = ' + filename + '\n') # file.write('# z = ' + str(z) + '\n') # # file.write('# MJD = ' + str(mjd) + '\n') # # file.write('# MJD_max = ' + str(mjd_max) + '\n') # file.write('\n') # err = np.sqrt(1./np.asarray(temp_ivar)) # data = np.c_[old_wave,old_flux,err] # table = tabulate(data, headers=['Wavelength', 'Flux', 'Error'], # tablefmt = 'ascii') # file.write(table) if testing: print SNR if old_var is not None: old_ivar = 1. / old_var else: old_ivar = temp_ivar # snr = getsnr(old_flux, old_ivar) if source == 'cfa': # choosing source dataset # z = ReadParam() sne = ReadExtin('extinction.dat') if source == 'bsnip': sne = ReadExtin('extinctionbsnip.dat') if source == 'csp': sne = ReadExtin('extinctioncsp.dat') if source == 'uv': sne = ReadExtin('extinctionuv.dat') if source == 'other': sne = ReadExtin('extinctionother.dat') if source == 'swift_uv': sne = ReadExtin('extinctionswiftuv.dat') if source == 'foley_hst': sne = ReadExtin('extinctionhst.dat') if source == 'foundation': sne = ReadExtin('extinctionfoundation.dat') # host_reddened = ReadExtin('../data/info_files/ryan_av.txt') newdata = [] old_wave = old_wave * u.Angstrom # wavelengths old_flux = old_flux * u.Unit('W m-2 angstrom-1 sr-1') spec1d = Spectrum1D.from_array(old_wave, old_flux) spec1d_ivar = Spectrum1D.from_array(old_wave, old_ivar) dered_flux, dered_ivar = test_dered.dered( sne, sn_name, spec1d.wavelength, spec1d.flux, spec1d_ivar.flux, source=source ) # Dereddening (see if sne in extinction files match the SN name) # new_flux = host_correction(sne, sn_name, old_wave, new_flux) # new_flux = old_flux if testing: new_flux_plot = copy.deepcopy(dered_flux) new_ivar_plot = copy.deepcopy(dered_ivar) old_wave_plot = copy.deepcopy(old_wave) new_flux = dered_flux.value new_ivar = dered_ivar.value old_wave = old_wave.value if testing: av_specific = 0.2384 #2005lz av_specific = 0.4089 #2007af r_v = 2.5 new_flux_host, new_ivar_host = test_dered.host_correction( av_specific, r_v, sn_name, old_wave_plot, new_flux_plot, new_ivar_plot) new_flux_host = new_flux_host.value old_flux = old_flux.value s = scale_composites_in_range(new_flux, old_flux) new_flux_scaled = s * new_flux s = scale_composites_in_range(new_flux_host, old_flux) new_flux_host_scaled = s * new_flux_host valid_data = np.where(old_wave > 4000) norm = 10. / np.nanmax(new_flux_host_scaled[valid_data]) old_flux_norm = old_flux * norm new_flux_norm = new_flux_scaled * norm new_flux_host_norm = new_flux_host_scaled * norm plt.rc('font', family='serif') fig, ax = plt.subplots(1, 1) fig.set_size_inches(10, 8, forward=True) plt.minorticks_on() plt.xticks(fontsize=20) # ax.xaxis.set_ticks(np.arange(np.round(wave[0],-3),np.round(wave[-1],-3),1000)) plt.yticks(fontsize=20) plt.tick_params(which='major', bottom='on', top='on', left='on', right='on', length=10) plt.tick_params(which='minor', bottom='on', top='on', left='on', right='on', length=5) plt.plot(old_wave, old_flux_norm, linewidth=2, color='#000080', label='Before Dereddening') plt.plot(old_wave, new_flux_norm, linewidth=2, color='gold', label='Milky Way Corrected') # plt.plot(old_wave, new_flux_host_norm, linewidth = 2, color = '#d95f02', label='Host Corrected') plt.ylabel('Relative Flux', fontsize=30) plt.xlabel('Rest Wavelength ' + "($\mathrm{\AA}$)", fontsize=30) plt.xlim([old_wave[0] - 200, old_wave[-1] + 200]) plt.legend(loc=1, fontsize=20) # plt.savefig('../../../Paper_Drafts/reprocessing_updated/red_corr.pdf', dpi = 300, bbox_inches = 'tight') plt.show() # plt.plot(old_wave, old_ivar) # plt.plot(old_wave, new_ivar) # plt.show() new_wave = old_wave / (1. + z) # Deredshifting if not use_old_error: new_var = None else: new_var = old_var # Placeholder if it needs to be changed #var = new_flux*0+1 # newdata = Interpo(new_wave, new_flux, new_ivar) # Do the interpolation newdata, scale, var_final = Interpo_flux_conserving(new_wave, new_flux, new_ivar, testing=testing) if testing: # newdata_test = Interpo(new_wave, new_flux_host_norm, new_ivar) newdata_test, scale, var_final = Interpo_flux_conserving( new_wave, new_flux_host_norm, new_ivar) interp_wave = newdata_test[0, :] interp_flux = newdata_test[1, :] plt.rc('font', family='serif') fig, ax = plt.subplots(1, 1) fig.set_size_inches(10, 8, forward=True) plt.minorticks_on() plt.xticks(fontsize=20) # ax.xaxis.set_ticks(np.arange(np.round(wave[0],-3),np.round(wave[-1],-3),1000)) plt.yticks(fontsize=20) plt.tick_params(which='major', bottom='on', top='on', left='on', right='on', length=10) plt.tick_params(which='minor', bottom='on', top='on', left='on', right='on', length=5) plt.plot(new_wave, 2. * new_flux_host_norm, linewidth=2, color='#d95f02', label='Before Interpolation') plt.plot(interp_wave, interp_flux, linewidth=2, color='darkgreen', label='After Interpolation') plt.ylabel('Relative Flux', fontsize=30) plt.xlabel('Rest Wavelength ' + "($\mathrm{\AA}$)", fontsize=30) plt.xlim([new_wave[0] - 200, new_wave[-1] + 200]) plt.legend(loc=1, fontsize=20) # plt.savefig('../../../Paper_Drafts/reprocessing_updated/interp_deredshift.pdf', dpi = 300, bbox_inches = 'tight') plt.show() return newdata, SNR
def combine(spectra, rv, snr, wave_range=None, desired_wavelength_units='angstrom', flux_unit='W / (m^2 micron)', fill_value=1.0): '''Read in some spectra and shift them to rest wavelengths, then combine them in a weighted average. Inputs: ------ spectra : list of spectra to input rv: array of RV values to shift the spectra snr: signal-to-noise ratio of the spectra Keywords: -------- wave_range : the range of wavelengths to have in the final spectrum Output: ------- Combined Spectrum1D object ''' flux_arr = None #print(len(spectra)) for i in np.arange(len(spectra)): if os.path.exists(spectra[i]): #print(spectra[i]) spectrum = read_fits_file.read_fits_file( spectra[i], desired_wavelength_units=desired_wavelength_units) wave = spectrum.wavelength.value flux = spectrum.flux.value #print(flux) if flux_arr is None: if wave_range is not None: good = np.where((wave >= wave_range[0]) & (wave <= wave_range[1]))[0] wave_arr = wave[good] else: wave_arr = wave flux_arr = np.ones((len(wave_arr), len(spectra))) # shift and interpolate wavelengths shift_wave = wave / (rv[i] / 3e5 + 1.0) f = interpolate.interp1d(shift_wave, flux, fill_value=fill_value, bounds_error=False) flux_arr[:, i] = f(wave_arr) #print(np.shape(flux_arr)) else: print('file not found:' + spectra[i]) average_flux = np.average(flux_arr, axis=1, weights=snr**2) ret_spec = Spectrum1D.from_array( wave_arr, average_flux, dispersion_unit=desired_wavelength_units, unit=flux_unit) return ret_spec
def vue_fit_model_to_cube(self, *args, **kwargs): if self._warn_if_no_equation(): return if self.selected_data in self.app.data_collection.labels: data = self.app.data_collection[self.selected_data] else: # User selected some subset from spectrum viewer, just use original cube data = self.app.data_collection[0] # First, ensure that the selected data is cube-like. It is possible # that the user has selected a pre-existing 1d data object. if data.ndim != 3: snackbar_message = SnackbarMessage( f"Selected data {self.selected_data} is not cube-like", color='error', sender=self) self.hub.broadcast(snackbar_message) return # Get the primary data component attribute = data.main_components[0] component = data.get_component(attribute) temp_values = data.get_data(attribute) # Transpose the axis order values = np.moveaxis(temp_values, 0, -1) * u.Unit(component.units) # We manually create a Spectrum1D object from the flux information # in the cube we select wcs = data.coords.sub([WCSSUB_SPECTRAL]) spec = Spectrum1D(flux=values, wcs=wcs) # TODO: in vuetify >2.3, timeout should be set to -1 to keep open # indefinitely snackbar_message = SnackbarMessage("Fitting model to cube...", loading=True, timeout=0, sender=self) self.hub.broadcast(snackbar_message) # Retrieve copy of the models with proper "fixed" dictionaries models_to_fit = self._reinitialize_with_fixed() try: fitted_model, fitted_spectrum = fit_model_to_spectrum( spec, models_to_fit, self.model_equation, run_fitter=True, window=self._window) except ValueError: snackbar_message = SnackbarMessage("Cube fitting failed", color='error', loading=False, sender=self) self.hub.broadcast(snackbar_message) raise # Save fitted 3D model in a way that the cubeviz # helper can access it. for m in fitted_model: temp_label = "{} ({}, {})".format(self.model_label, m["x"], m["y"]) self.app.fitted_models[temp_label] = m["model"] # Transpose the axis order back values = np.moveaxis(fitted_spectrum.flux.value, -1, 0) count = max( map(lambda s: int(next(iter(re.findall(r"\d$", s)), 0)), self.data_collection.labels)) + 1 label = f"{self.model_label} [Cube] {count}" # Create new glue data object output_cube = Data(label=label, coords=data.coords) output_cube['flux'] = values output_cube.get_component('flux').units = \ fitted_spectrum.flux.unit.to_string() # Add to data collection self.app.add_data(output_cube, label) if self.selected_viewer != 'None': # replace the contents in the selected viewer with the results from this plugin self.app.add_data_to_viewer(self.viewer_to_id.get( self.selected_viewer), label, clear_other_data=True) snackbar_message = SnackbarMessage("Finished cube fitting", color='success', loading=False, sender=self) self.hub.broadcast(snackbar_message)
sne_name = folder_path.split('/')[-1] #input command for redshift redshift = float(input('Input Redshift of SNe:')) z = 1 + redshift #turn each file name into array #apply obs to rest wavelength and flux conversion #create list of spectrum files, and continuum files cont_list = [] data = [] for epoch in file_list: file = np.genfromtxt(fname= epoch) lamb = (file[:, 0] / z) * u.AA flux = file[:, 1] * 10 ** -15 * u.Unit('erg cm-2 s-1 AA-1') spec = Spectrum1D(spectral_axis=lamb, flux=flux) data.append(spec) cont_list.append((spec /spec) * fit_generic_continuum(spec)(spec.spectral_axis)) #plot to find the lines to calculate EW spcplt = data[0] cntplt = cont_list[0] f, ax = plt.subplots() ax.step(spcplt.wavelength, spcplt.flux) ax.step(cntplt.wavelength, cntplt.flux) ax.set_xlim(6000*u.AA, 7500*u.AA) ax.grid(True) wave = [6563, 6678, 7065, 7155] for line in wave: plt.axvline(x = line)
starspectrum_uncert = np.concatenate( (starspectrum_uncert, single_order_spec.uncertainty.value[::-1])) starspectrum_flux = np.concatenate( (starspectrum_flux, single_order_spec.flux.value[::-1])) starspectrum_wavelength = np.concatenate( (starspectrum_wavelength, single_order_spec.wavelength.value[::-1])) #print allorders_path #starspectrum = read_fits_file.read_nirspec_dat(allorders_path,desired_wavelength_units='Angstrom', # wave_range=waveranges) starspectrum = Spectrum1D.from_array(dispersion=starspectrum_wavelength, flux=starspectrum_flux, dispersion_unit=u.angstrom, uncertainty=starspectrum_uncert) g = load_grid( '/u/rbentley/metallicity/grids/phoenix_t2500_6000_w20000_24000_R40000.h5') w, f = g() print len(starspectrum.flux.value) interp1 = Interpolate(starspectrum) convolve1 = InstrumentConvolveGrating.from_grid(g, R=24000) rot1 = RotationalBroadening.from_grid(g, vrot=np.array([10.0])) norm1 = Normalize(starspectrum, 2) # concatenate the spectral grid (which will have the stellar parameters) with other # model components that you want to fit model = g | rot1 | DopplerShift(vrad=0.0) | convolve1 | interp1 | norm1
def vue_fit_model_to_cube(self, *args, **kwargs): if self._warn_if_no_equation(): return data = self.app.data_collection[self._selected_data_label] # First, ensure that the selected data is cube-like. It is possible # that the user has selected a pre-existing 1d data object. if data.ndim != 3: snackbar_message = SnackbarMessage( f"Selected data {self._selected_data_label} is not cube-like", color='error', sender=self) self.hub.broadcast(snackbar_message) return # Get the primary data component attribute = data.main_components[0] component = data.get_component(attribute) temp_values = data.get_data(attribute) # Transpose the axis order values = np.moveaxis(temp_values, 0, -1) * u.Unit(component.units) # We manually create a Spectrum1D object from the flux information # in the cube we select wcs = data.coords.sub([WCSSUB_SPECTRAL]) spec = Spectrum1D(flux=values, wcs=wcs) # TODO: in vuetify >2.3, timeout should be set to -1 to keep open # indefinitely snackbar_message = SnackbarMessage("Fitting model to cube...", loading=True, timeout=0, sender=self) self.hub.broadcast(snackbar_message) # Retrieve copy of the models with proper "fixed" dictionaries # TODO: figure out why this was causing the parallel fitting to fail #models_to_fit = self._reinitialize_with_fixed() models_to_fit = self._initialized_models.values() fitted_model, fitted_spectrum = fit_model_to_spectrum( spec, models_to_fit, self.model_equation, run_fitter=True) # Save fitted 3D model in a way that the cubeviz # helper can access it. self.app._fitted_3d_model = fitted_model # Transpose the axis order back values = np.moveaxis(fitted_spectrum.flux.value, -1, 0) count = max( map(lambda s: int(next(iter(re.findall("\d$", s)), 0)), self.data_collection.labels)) + 1 label = f"{self.model_label} [Cube] {count}" # Create new glue data object output_cube = Data(label=label, coords=data.coords) output_cube['flux'] = values output_cube.get_component('flux').units = \ fitted_spectrum.flux.unit.to_string() # Add to data collection self.app.data_collection.append(output_cube) snackbar_message = SnackbarMessage("Finished cube fitting", color='success', loading=False, sender=self) self.hub.broadcast(snackbar_message)
masked_data_sl_f = np.delete(starspectrum35.flux.value,sl_mask_indices) masked_data_sl_w = np.delete(starspectrum35.wavelength.value,sl_mask_indices) masked_data_sl_u = np.delete(starspectrum35.uncertainty.value,sl_mask_indices) plt.figure(figsize=(12,10)) #plt.plot(masked_data_sl_w,masked_data_sl_f) #masked_data_sl = SKSpectrum1D.from_array(wavelength=masked_data_sl_w*u.angstrom, flux=masked_data_sl_f*u.Unit('erg/s/cm^2/angstrom'), uncertainty=masked_data_sl_f*u.Unit('erg/s/cm^2/angstrom')) masked_data_sl = Spectrum1D.from_array(dispersion=masked_data_sl_w, flux=masked_data_sl_f, dispersion_unit=u.angstrom, uncertainty=masked_data_sl_u) # interp_sl = Interpolate(masked_data_sl) convolve_sl = InstrumentConvolveGrating.from_grid(g,R=24000) rot_sl = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) norm_sl = Normalize(masked_data_sl,2) like_sl = Chi2Likelihood(masked_data_sl) model = g | rot_sl |DopplerShift(vrad=0.0)| convolve_sl | interp_sl | norm_sl masked_model_sl = model | like_sl tw,tf = model() masked_model_sl #Fits S_lambda masked model print type(mask_sl_flux), type(masked_data_sl.wavelength.value)
def dered_corr(data_path, template_path, wave_corr, z_lit, z_bound, spec_type): """Function to check remaining flexure after flexure correction has been applied. Parameters ---------- data_path : str Path to data spectrum file template_path : str Path to template spectrum file wave_corr : tuple Flexure-corrected wavelength array z_lit : float Literature redshift for target object z_bound : float Amount to add and subtract from z_lit for redshifts to test spec_type : str Indicates if looking at coadded spectrum or single 1D spectrum Returns ------- tm_result_corr : float Best-fitting redshift post flexure-correction dered_wave : tuple De-redshifted wavelength array """ #Get data data_wave_full, data_cut_wave, data_flux_full, data_cut_flux, data_noise_full, data_cut_noise = prep_data(data_path, spec_type) #Get template template_wave, smoothed_template_flux, smoothed_template_noise = prep_template(template_path) #Find redshift of new, corrected spectrum and de-redshift it to match the template #Continuum-norm over whole blue range norm_wave_corr, norm_corr_flux, norm_corr_noise = continuum_normalize(np.min(wave_corr), np.max(wave_corr), data_cut_flux, wave_corr, data_cut_noise) norm_template_wave, norm_template_flux, norm_template_noise = continuum_normalize(np.min(template_wave), np.max(template_wave), smoothed_template_flux, template_wave, smoothed_template_noise) #Plot before plt.figure(figsize=(12,4)) plt.plot(norm_wave_corr, norm_corr_flux, label='observed') plt.plot(norm_template_wave, norm_template_flux, label='template') plt.legend() #Find new redshift of whole spectrum corr_spec = Spectrum1D(spectral_axis=norm_wave_corr*u.Angstrom, flux=norm_corr_flux*(u.erg/u.s/u.cm**2/u.Angstrom), uncertainty=StdDevUncertainty(norm_corr_noise)) template_spec = Spectrum1D(spectral_axis=norm_template_wave*u.Angstrom, flux=norm_template_flux*(u.Lsun/u.micron)) pre_redshifts = np.linspace(z_lit-z_bound, z_lit+z_bound, 1000) tm_result_corr = template_redshift(observed_spectrum=corr_spec, template_spectrum=template_spec, redshift=pre_redshifts) #Plot after plt.figure(figsize=(12,4)) plt.plot(corr_spec.spectral_axis, corr_spec.flux, label='observed') plt.plot(tm_result_corr[2].spectral_axis, tm_result_corr[2].flux, label='redshifted template') plt.legend() plt.figure(figsize=(12,4)) plt.plot(template_spec.spectral_axis, template_spec.flux, label='template') plt.plot(tm_result_corr[2].spectral_axis, tm_result_corr[2].flux, label='redshifted template') plt.legend() #De-redshift data dered_wave = norm_wave_corr/(1+z_lit) plt.figure(figsize=(12,4)) plt.plot(dered_wave, norm_corr_flux, label='de-redshifted data') plt.plot(norm_template_wave, norm_template_flux, label='template') plt.legend() return tm_result_corr, dered_wave
def centroid_offsets(targ_bounds, data_wave, data_flux, sky_cents): """Returns amount by which extracted skylines are offset from model and the nearest wavelength value to each. Parameters ---------- targ_bounds : tuple List of tuples defining bounds of region around each skyline to examine data_wave : tuple Wavelength array data_flux : tuple Flux array sky_cents : tuple Skymodel centroids Returns ------- nearest_waves : tuple Nearest wavelength value to centroid offsets : tuple Offset between data and skymodel """ regions = SpectralRegion(targ_bounds[0][0]*u.Angstrom,targ_bounds[0][-1]*u.Angstrom) for i in range(1, len(targ_bounds)): regions += SpectralRegion(targ_bounds[i][0]*u.Angstrom, targ_bounds[i][-1]*u.Angstrom) #Normalize data targ_norm_wave, targ_norm_flux, targ_norm_noise = continuum_normalize(np.min(data_wave), np.max(data_wave), data_flux, data_wave, np.zeros(len(data_flux))) #Find offsets target = Spectrum1D(spectral_axis=targ_norm_wave*u.Angstrom, flux=targ_norm_flux*u.ct) sub_spec = extract_region(target, regions) offsets = np.zeros(len(sky_cents)) nearest_waves = np.zeros(len(sky_cents)) for i, sub in enumerate(sub_spec): an_disp = sub.flux.max() an_ampl = sub.flux.min() an_mean = sub.spectral_axis[sub.flux.argmax()] nearest_waves[i] = an_mean.value an_stdv = np.sqrt(np.sum((sub.spectral_axis - an_mean)**2) / (len(sub.spectral_axis) - 1)) plt.figure() plt.scatter(an_mean.value, an_disp.value, marker='o', color='#e41a1c', s=100, label='data') plt.scatter(sky_cents[i], an_disp.value, marker='o', color='k', s=100, label='archive') plt.vlines([an_mean.value - an_stdv.value, an_mean.value + an_stdv.value], sub.flux.min().value, sub.flux.max().value, color='#377eb8', ls='--', lw=2) g_init = ( models.Const1D(an_disp) + models.Gaussian1D(amplitude=(an_ampl - an_disp), mean=an_mean, stddev=an_stdv) ) g_fit = fit_lines(sub, g_init) line_fit = g_fit(sub.spectral_axis) plt.plot(sub.spectral_axis, sub.flux, color='#e41a1c', lw=2) plt.plot(sub.spectral_axis, line_fit, color='#377eb8', lw=2) plt.axvline(an_mean.value, color='#e41a1c', ls='--', lw=2) plt.legend() offsets[i] = an_mean.value - sky_cents[i].value return nearest_waves, offsets
def chunk_redshift(data_wave, data_flux, data_noise, template_path, z_lit, targ_delta, overhang, z_test, z_bound, position): """Returns the bestfit redshift of each chunk. Parameters ---------- data_wave : tuple Data wavelength array data_flux : tuple Data flux array data_noise : tuple Data noise array template_path : str Path to template spectrum file z_lit : float Literature redshift of target object targ_delta : float Wavelength chunk size in Angstroms overhang : float Amount of wavelength overhang template chunks should have in Angstroms z_test : float Starting redshift for chunks (measured by eye in 1 chunk) z_bound : float Amount to add and subtract from z_lit for redshifts to test position : str 'before' or 'after' to indicate if pre- or post-flexure correction Results ------- bestfit_redshift : tuple Best fitting redshift for each chunk best_chi2 : tuple Minimum chi squared for each chunk redshifted_spectra : tuple Redshifted chunks chi2 : tuple All chi2 """ #Get data chunks data_wave_chunks, data_flux_chunks, data_noise_chunks = data_chunks(data_wave, data_flux, data_noise, targ_delta) #Get template_chunks temp_wave_chunks, temp_flux_chunks, temp_noise_chunks, temp_central_wavelengths, central_waves = template_chunks(data_wave, data_flux, data_noise, template_path, z_lit, targ_delta, overhang, position) #Find redshifts of each chunk observed_chunks = [] temp_chunks = [] for i in range(len(data_wave_chunks)): observed_chunks.append(Spectrum1D(spectral_axis=data_wave_chunks[i]*u.Angstrom, flux=data_flux_chunks[i]*(u.erg/u.s/u.cm**2/u.Angstrom), uncertainty=InverseVariance(data_noise_chunks[i]))) temp_chunks.append(Spectrum1D(spectral_axis=temp_wave_chunks[i]*u.Angstrom, flux=temp_flux_chunks[i]*(u.Lsun/u.micron), uncertainty=StdDevUncertainty(temp_noise_chunks[i]))) redshifts_chunks = np.linspace(z_test-z_bound, z_test+z_bound, 1000) fitted_redshift_results = [] bestfit_redshift = np.zeros(len(data_wave_chunks)) best_chi2 = np.zeros(len(data_wave_chunks)) redshifted_spectra = [] chi2 = [] for i in range(len(data_wave_chunks)): fitted_redshift_results.append(template_redshift(observed_spectrum=observed_chunks[i], template_spectrum=temp_chunks[i], redshift=redshifts_chunks)) bestfit_redshift[i] = fitted_redshift_results[i][0] best_chi2[i] = fitted_redshift_results[i][1] redshifted_spectra.append(fitted_redshift_results[i][2]) chi2.append(fitted_redshift_results[i][3]) return bestfit_redshift, best_chi2, redshifted_spectra, chi2
def sum_order(self, order): wavelengths = self.es_list[0].get_order(order).wavelength total_flux = np.sum([spectrum.get_order(order).flux for spectrum in self.es_list], axis=0) return Spectrum1D.from_array(wavelengths, total_flux)
def __call__(self, image, trace_object, disp_axis=1, crossdisp_axis=0, bkgrd_prof=models.Polynomial1D(2), variance=None, mask=None, unit=None): """ Run the Horne calculation on a region of an image and extract a 1D spectrum. Parameters ---------- image : `~astropy.nddata.NDData` or array-like, required The input 2D spectrum from which to extract a source. An NDData object must specify uncertainty and a mask. An array requires use of the `variance`, `mask`, & `unit` arguments. trace_object : `~specreduce.tracing.Trace`, required The associated 1D trace object created for the 2D image. disp_axis : int, optional The index of the image's dispersion axis. [default: 1] crossdisp_axis : int, optional The index of the image's cross-dispersion axis. [default: 0] bkgrd_prof : `~astropy.modeling.Model`, optional A model for the image's background flux. [default: models.Polynomial1D(2)] variance : `~numpy.ndarray`, optional (Only used if `image` is not an NDData object.) The associated variances for each pixel in the image. Must have the same dimensions as `image`. [default: None] mask : `~numpy.ndarray`, optional (Only used if `image` is not an NDData object.) Whether to mask each pixel in the image. Must have the same dimensions as `image`. If blank, all non-NaN pixels are unmasked. [default: None] unit : `~astropy.units.core.Unit` or str, optional (Only used if `image` is not an NDData object.) The associated unit for the data in `image`. If blank, fluxes are interpreted as unitless. [default: None] Returns ------- spec_1d : `~specutils.Spectrum1D` The final, Horne extracted 1D spectrum. """ # handle image and associated data based on image's type if isinstance(image, NDData): img = np.ma.array(image.data, mask=image.mask) unit = image.unit if image.unit is not None else u.Unit() if image.uncertainty is not None: # prioritize NDData's uncertainty over variance argument if image.uncertainty.uncertainty_type == 'var': variance = image.uncertainty.array elif image.uncertainty.uncertainty_type == 'std': # NOTE: CCDData defaults uncertainties given as pure arrays # to std and logs a warning saying so upon object creation. # should we remind users again here? warnings.warn("image NDData object's uncertainty " "interpreted as standard deviation. if " "incorrect, use VarianceUncertainty when " "assigning image object's uncertainty.") variance = image.uncertainty.array**2 elif image.uncertainty.uncertainty_type == 'ivar': variance = 1 / image.uncertainty.array else: # other options are InverseVariance and UnknownVariance raise ValueError( "image NDData object has unexpected " "uncertainty type. instead, try " "VarianceUncertainty or StdDevUncertainty.") else: # ignore variance arg to focus on updating NDData object raise ValueError('image NDData object lacks uncertainty') else: if any(arg is None for arg in (variance, mask, unit)): raise ValueError('if image is a numpy array, the variance, ' 'mask, and unit arguments must be specified. ' 'consider wrapping that information into one ' 'object by instead passing an NDData image.') if image.shape != variance.shape: raise ValueError('image and variance shapes must match') if image.shape != mask.shape: raise ValueError('image and mask shapes must match') # fill in non-required arguments if empty if mask is None: mask = np.ma.masked_invalid(image) if isinstance(unit, str): unit = u.Unit(unit) else: unit = unit if unit is not None else u.Unit() # create image img = np.ma.array(image, mask=mask) # co-add signal in each image column ncols = img.shape[crossdisp_axis] xd_pixels = np.arange(ncols) # y plot dir / x spec dir coadd = img.sum(axis=disp_axis) / ncols # fit source profile, using Gaussian model as a template # NOTE: could add argument for users to provide their own model gauss_prof = models.Gaussian1D(amplitude=coadd.max(), mean=coadd.argmax(), stddev=2) # Fit extraction kernel to column with combined gaussian/bkgrd model ext_prof = gauss_prof + bkgrd_prof fitter = fitting.LevMarLSQFitter() fit_ext_kernel = fitter(ext_prof, xd_pixels, coadd) # use compound model to fit a kernel to each image column # NOTE: infers Gaussian1D source profile; needs generalization for others kernel_vals = [] norms = [] for col_pix in range(img.shape[disp_axis]): # set gaussian model's mean as column's corresponding trace value fit_ext_kernel.mean_0 = trace_object.trace[col_pix] # NOTE: support for variable FWHMs forthcoming and would be here # fit compound model to column fitted_col = fit_ext_kernel(xd_pixels) # save result and normalization kernel_vals.append(fitted_col) norms.append(fit_ext_kernel.amplitude_0 * fit_ext_kernel.stddev_0 * np.sqrt(2 * np.pi)) # transform fit-specific information kernel_vals = np.array(kernel_vals).T norms = np.array(norms) # calculate kernel normalization, masking NaNs g_x = np.ma.sum(kernel_vals**2 / variance, axis=crossdisp_axis) # sum by column weights weighted_img = np.ma.divide(img * kernel_vals, variance) result = np.ma.sum(weighted_img, axis=crossdisp_axis) / g_x # multiply kernel normalization into the extracted signal extraction = result * norms # convert the extraction to a Spectrum1D object pixels = np.arange(img.shape[disp_axis]) * u.pix spec_1d = Spectrum1D(spectral_axis=pixels, flux=extraction * unit) return spec_1d
def __call__(self, spectrum): doppler_factor = 1. + self.vrad / const.c return Spectrum1D.from_array(spectrum.wavelength * doppler_factor, spectrum.flux, dispersion_unit=spectrum.wavelength.unit)
if __name__ == "__main__": wline = [185.999] band = 'R1' order = int(band[1]) np.random.seed(0) x = np.linspace(180., 190., 100) y = 3 * np.exp(-0.5 * (x - 185.999)**2 / 0.1**2) y += np.random.normal(0., 0.2, x.shape) y_continuum = 3.2 * np.exp(-0.5 * (x - 5.6)**2 / 4.8**2) y += y_continuum #create spectrum to fit spectrum = Spectrum1D(flux=y * u.Jy, spectral_axis=x * u.um) noise_region = SpectralRegion(180. * u.um, 184. * u.um) spectrum = noise_region_uncertainty(spectrum, noise_region) #line_region = [(185.52059807*u.um, 186.47740193*u.um)] g1_fit = fit_generic_continuum(spectrum, model=models.Polynomial1D(1)) y_continuum_fitted = g1_fit(x * u.um) plt.plot(x, y, label='spectrum') plt.errorbar(x, y, yerr=spectrum.uncertainty.array, color='b') plt.plot(x, y_continuum_fitted, label='cont_0') plt.title('Continuum+line Fitting') plt.grid(True) line = LineFitterMult(spectrum, wline,
def mos_spec2d_parser(app, data_obj, data_labels=None, add_to_table=True, show_in_viewer=False): """ Attempts to parse a 2D spectrum object. Notes ----- This currently only works with JWST-type data in which the data is in the second hdu of the fits file. Parameters ---------- app : `~jdaviz.app.Application` The application-level object used to reference the viewers. data_obj : str or list or spectrum-like File path, list, or spectrum-like object to be read as a new row in the mosviz table. data_labels : str, optional The label applied to the glue data component. """ def _parse_as_spectrum1d(path): # Parse as a FITS file and assume the WCS is correct with fits.open(path) as hdulist: data = hdulist[1].data header = hdulist[1].header wcs = WCS(header) return Spectrum1D(data, wcs=wcs) # Coerce into list-like object if not isinstance(data_obj, (list, tuple, SpectrumCollection)): data_obj = [data_obj] # If we're given a string, repeat it for each object if isinstance(data_labels, str): if len(data_obj) > 1: data_labels = [f"{data_labels} {i}" for i in range(len(data_obj))] else: data_labels = [data_labels] elif data_labels is None: if len(data_obj) > 1: data_labels = [f"2D Spectrum {i}" for i in range(len(data_obj))] else: data_labels = ['2D Spectrum'] with app.data_collection.delay_link_manager_update(): for index, data in enumerate(data_obj): # If we got a filepath, first try and parse using the Spectrum1D and # SpectrumList parsers, and then fall back to parsing it as a generic # FITS file. if _check_is_file(data): try: data = Spectrum1D.read(data) except IORegistryError: try: data = Spectrum1D.read(data) except IORegistryError: data = _parse_as_spectrum1d(data) # Copy (if present) region to top-level meta object if ('header' in data.meta and 'S_REGION' in data.meta['header'] and 'S_REGION' not in data.meta): data.meta['S_REGION'] = data.meta['header']['S_REGION'] # Set the instrument # TODO: this should not be set to nirspec for all datasets data.meta['INSTRUME'] = 'nirspec' # Get the corresponding label for this data product label = data_labels[index] app.data_collection[label] = data if add_to_table: _add_to_table(app, data_labels, '2D Spectra') if show_in_viewer: if len(data_labels) > 1: raise ValueError("More than one data label provided, unclear " + "which to show in viewer") app.add_data_to_viewer("spectrum-2d-viewer", data_labels[0])
from astropy.convolution import Gaussian1DKernel, convolve INPUT_spec = 'CS31_CNO_0n.spec' fwhm = 0.20 #SIGMA=8 SIGMA = fwhm / 2.35482 * 100 wl, fl = np.genfromtxt(INPUT_spec, skip_header=2, unpack=True) wl2, fl2 = np.genfromtxt('fluxCS31_0.norm.nulbad.0.200', skip_header=2, unpack=True) spec1 = Spectrum1D(spectral_axis=wl * u.A, flux=fl * u.Jy) #spec1_tsmooth = trapezoid_smooth(spec1, width=3) #spec1_bsmooth = box_smooth(spec1, width=3) #spec1_msmooth = median_smooth(spec1, width=3) spec1_gsmooth = gaussian_smooth(spec1, stddev=SIGMA) g = Gaussian1DKernel(stddev=SIGMA) # Convolve data z = convolve(fl, g) #plt.plot(spec1.spectral_axis, spec1.flux) plt.plot(wl2, fl2) plt.plot(wl, z)
def load_aaomega_file(filename, *args, **kwargs): with read_fileobj_or_hdulist(filename, *args, **kwargs) as fits_file: fits_header = fits_file[AAOMEGA_SCIENCE_INDEX].header # fits_file is the hdulist var_idx = None rwss_idx = None for idx, extn in enumerate(fits_file): if extn.name == "VARIANCE": var_idx = idx if extn.name == "RWSS": rwss_idx = idx # science data fits_data = fits_file[AAOMEGA_SCIENCE_INDEX].data # read in Fibre table data.... ftable = Table(fits_file[AAOMEGA_FIBRE_INDEX].data) # A SpectrumList to hold all the Spectrum1D objects sl = SpectrumList() # the row var contains the pixel data from the science frame for i, row in enumerate(fits_data): # Definitely need deepcopy here, otherwise it does *NOT* work! fib_header = deepcopy(fits_header) # Adjusting some values from primary header so individual fibre # spectra have meaningful headers fib_header["FLDNAME"] = (fits_header["OBJECT"], "Name of 2dF .fld file") fib_header["FLDRA"] = ( fits_header["MEANRA"], "Right Ascension of 2dF field", ) fib_header["FLDDEC"] = (fits_header["MEANDEC"], "Declination of 2dF field") # Now for the fibre specific information from the Fibre Table # (extension 2) # Caution: RA and DEC are stored in RADIANS in the FIBRE TABLE! fib_header["RA"] = ( ftable["RA"][i] * 180.0 / np.pi, "Right Ascension of fibre from configure .fld file", ) fib_header["DEC"] = ( ftable["DEC"][i] * 180.0 / np.pi, "Declination of fibre from configure .fld file", ) fib_header["OBJECT"] = ( ftable["NAME"][i], "Name of target observed by fibre", ) fib_header["OBJCOM"] = ( ftable["COMMENT"][i], "Comment from configure .fld file for target", ) fib_header["OBJMAG"] = ( ftable["MAGNITUDE"][i], "Magnitude of target observed by fibre", ) fib_header["OBJTYPE"] = ( ftable["TYPE"][i], "Type of target observed by fibre", ) fib_header["OBJPIV"] = ( ftable["PIVOT"][i], "Pivot number used to observe target", ) fib_header["OBJPID"] = ( ftable["PID"][i], "Program ID from configure .fld file", ) fib_header["OBJX"] = ( ftable["X"][i], "X coord of target observed by fibre (microns)", ) fib_header["OBJY"] = ( ftable["Y"][i], "Y coord of target observed by fibre (microns)", ) fib_header["OBJXERR"] = ( ftable["XERR"][i], "X coord error of target observed by fibre (microns)", ) fib_header["OBJYERR"] = ( ftable["YERR"][i], "Y coord error of target observed by fibre (microns)", ) fib_header["OBJTHETA"] = ( ftable["THETA"][i], "Angle of fibre used to observe target", ) fib_header["OBJRETR"] = ( ftable["RETRACTOR"][i], "Retractor number used to observe target", ) # WLEN added around 2005 according to AAOmega obs manual... # so not always available if "WLEN" in ftable.colnames: fib_header["OBJWLEN"] = ( ftable["WLEN"][i], "Retractor of target observed by fibre", ) # ftable['TYPE'][i]: # P == program (science) # S == sky # U == unallocated or unused # F == fiducial (guide) fibre # N == broken, dead or no fibre meta = {"header": fib_header} if ftable["TYPE"][i] == "P": meta["purpose"] = "reduced" elif ftable["TYPE"][i] == "S": meta["purpose"] = "sky" else: # Don't include other fibres that are not science or sky continue wcs = compute_wcs_from_keys_and_values(fib_header, **AAOMEGA_2DF_WCS_SETTINGS) flux = row * AAOMEGA_2DF_FLUX_UNIT meta["fibre_index"] = i # Our science spectrum spectrum = Spectrum1D(wcs=wcs, flux=flux, meta=meta) # If the VARIANCE spectrum exists, add it as an additional spectrum # in the meta dict with key 'variance' if var_idx is not None: var_data = fits_file[var_idx].data var_flux = var_data[i] * AAOMEGA_2DF_FLUX_UNIT**2 spectrum.uncertainty = VarianceUncertainty(var_flux) # If the RWSS spectrum exists, add it as an additional spectrum in # the meta dict with key 'science_sky' # This is an optional extension produced by 2dfdr on request: all # spectra without the average/median sky subtraction # Useful in case users want to do their own sky subtraction. if rwss_idx is not None: rwss_data = fits_file[rwss_idx].data rwss_flux = rwss_data[i] * AAOMEGA_2DF_FLUX_UNIT rwss_meta = {"header": fib_header, "purpose": "science_sky"} spectrum.meta["science_sky"] = Spectrum1D(wcs=wcs, flux=rwss_flux, meta=rwss_meta) # Add our spectrum to the list. # The additional spectra are accessed using # spectrum.meta['variance'] and spectrum.meta['science_sky'] sl.append(spectrum) add_labels(sl) return sl
def line_fit(spec, spec_err, wave_obj, dwave=10. * u.AA, dwave_cont=100. * u.AA, sigmamax=14. * u.AA): ''' Function to fit a 1D gaussian to a HETDEX spectrum from get_spec.py Parameters ---------- spec 1D spectrum from a row in the table provided by get_spec.py. Will assume unit of 10**-17*u.Unit('erg cm-2 s-1 AA-1') if no units are provided. spec_err 1D spectral uncertainty from table provided by get_spec.py. Will assume unit of 10**-17*u.Unit('erg cm-2 s-1 AA-1') if no units are provided. wave_obj wavelength you want to fit, an astropy quantity dwave spectral region above and below wave_obj to fit a line, an astropy quantity. Default is 10.*u.AA dwave_cont spectral region to fit continuum. Default is +/- 100.*u.AA sigmamax Maximum linewidth (this is sigma/stdev of the gaussian fit) to allow for a fit. Assumes unit of u.AA if not given Returns ------- ''' try: spectrum = Spectrum1D(flux=spec, spectral_axis=(2.0 * np.arange(1036) + 3470.) * u.AA, uncertainty=StdDevUncertainty(spec_err), velocity_convention=None) except ValueError: spectrum = Spectrum1D( flux=spec * 10**-17 * u.Unit('erg cm-2 s-1 AA-1'), spectral_axis=(2.0 * np.arange(1036) + 3470.) * u.AA, uncertainty=StdDevUncertainty(spec_err * 10**-17 * u.Unit('erg cm-2 s-1 AA-1')), velocity_convention=None) # measure continuum over 2*dwave_cont wide window first: cont_region = SpectralRegion((wave_obj - dwave_cont), (wave_obj + dwave_cont)) cont_spectrum = extract_region(spectrum, cont_region) cont = np.median(cont_spectrum.flux) if np.isnan(cont): #set continuum if its NaN print('Continuum fit is NaN. Setting to 0.0') cont = 0.0 * cont_spectrum.unit # now get region to fit the continuum subtracted line sub_region = SpectralRegion((wave_obj - dwave), (wave_obj + dwave)) sub_spectrum = extract_region(spectrum, sub_region) try: line_param = estimate_line_parameters(sub_spectrum - cont, models.Gaussian1D()) except: return None if np.isnan(line_param.amplitude.value): print('Line fit yields NaN result. Exiting.') return None try: sigma = np.minimum(line_param.stddev, sigmamax) except ValueError: sigma = np.minimum(line_param.stddev, sigmamax * u.AA) if np.isnan(sigma): sigma = sigmamax g_init = models.Gaussian1D(amplitude=line_param.amplitude, mean=line_param.mean, stddev=sigma) # lineregion = SpectralRegion((wave_obj-2*sigma), (wave_obj+2*sigma)) # cont = fit_generic_continuum(sub_spectrum, exclude_regions=lineregion, # model=models.Linear1D(slope=0)) #r1 = SpectralRegion((wave_obj-dwave), (wave_obj-2*sigma)) #r2 = SpectralRegion((wave_obj+2*sigma), (wave_obj+dwave)) #fitcontregion = r1 + r2 #fit_cont_spectrum = extract_region(sub_spectrum, fitcontregion) #cont = np.mean(np.hstack([fit_cont_spectrum[0].flux, fit_cont_spectrum[1].flux])) #contspec = cont(sub_spectrum.spectral_axis) g_fit = fit_lines(sub_spectrum - cont, g_init) x = np.arange(wave_obj.value - dwave.value, wave_obj.value + dwave.value, 0.5) * u.AA y_fit = g_fit(x) line_flux_model = np.sum(y_fit * 0.5 * u.AA) chi2 = calc_chi2(sub_spectrum - cont, g_fit) sn = np.sum(np.array(sub_spectrum.flux)) / np.sqrt( np.sum(sub_spectrum.uncertainty.array**2)) line_flux_data = line_flux(sub_spectrum - cont).to(u.erg * u.cm**-2 * u.s**-1) line_flux_data_err = np.sqrt(np.sum(sub_spectrum.uncertainty.array**2)) #fitted_region = SpectralRegion((line_param.mean - 2*sigma), # (line_param.mean + 2*sigma)) #fitted_spectrum = extract_region(spectrum, fitted_region) #line_param = estimate_line_parameters(fitted_spectrum, models.Gaussian1D()) #sn = np.sum(np.array(fitted_spectrum.flux)) / np.sqrt(np.sum( # fitted_spectrum.uncertainty.array**2)) #line_flux_data = line_flux(fitted_spectrum).to(u.erg * u.cm**-2 * u.s**-1) #line_flux_data_err = np.sqrt(np.sum(fitted_spectrum.uncertainty.array**2)) return line_param, sn, chi2, sigma, line_flux_data, line_flux_model, line_flux_data_err, g_fit, cont
def test_from_spectrum1d(mode): if mode == 'wcs3d': # This test is intended to be run with the version of Spectrum1D based # on NDCube 2.0 pytest.importorskip("ndcube", minversion="1.99") # Set up simple spatial+spectral WCS wcs = WCS(naxis=3) wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ'] wcs.wcs.set() flux = np.ones((4, 4, 5))*u.Unit('Jy') uncertainty = VarianceUncertainty(np.square(flux*0.1)) mask = np.zeros((4, 4, 5)) kwargs = {'wcs': wcs, 'uncertainty': uncertainty, 'mask': mask} else: flux = [2, 3, 4, 5] * u.Jy uncertainty = VarianceUncertainty([0.1, 0.1, 0.1, 0.1] * u.Jy**2) mask = [False, False, False, False] if mode == 'wcs1d': wcs = WCS(naxis=1) wcs.wcs.ctype = ['FREQ'] wcs.wcs.set() kwargs = {'wcs': wcs, 'uncertainty': uncertainty, 'mask': mask} else: kwargs = {'spectral_axis': [1, 2, 3, 4] * u.Hz, 'uncertainty': uncertainty, 'mask': mask} spec = Spectrum1D(flux, **kwargs) data_collection = DataCollection() data_collection['spectrum'] = spec data = data_collection['spectrum'] assert isinstance(data, Data) assert len(data.main_components) == 3 assert data.main_components[0].label == 'flux' assert_allclose(data['flux'], flux.value) component = data.get_component('flux') assert component.units == 'Jy' # Check uncertainty parsing within glue data object assert data.main_components[1].label == 'uncertainty' assert_allclose(data['uncertainty'], uncertainty.array) component = data.get_component('uncertainty') assert component.units == 'Jy2' # Check round-tripping via single attribute reference spec_new = data.get_object(attribute='flux', statistic=None) assert isinstance(spec_new, Spectrum1D) assert_quantity_allclose(spec_new.spectral_axis, [1, 2, 3, 4] * u.Hz) if mode == 'wcs3d': assert_quantity_allclose(spec_new.flux, np.ones((5, 4, 4))*u.Unit('Jy')) else: assert_quantity_allclose(spec_new.flux, [2, 3, 4, 5] * u.Jy) assert spec_new.uncertainty is None # Check complete round-tripping, including uncertainties spec_new = data.get_object(statistic=None) assert isinstance(spec_new, Spectrum1D) assert_quantity_allclose(spec_new.spectral_axis, [1, 2, 3, 4] * u.Hz) if mode == 'wcs3d': assert_quantity_allclose(spec_new.flux, np.ones((5, 4, 4))*u.Unit('Jy')) assert spec_new.uncertainty is not None print(spec_new.uncertainty) print(uncertainty) assert_quantity_allclose(spec_new.uncertainty.quantity, np.ones((5, 4, 4))*0.01*u.Jy**2) else: assert_quantity_allclose(spec_new.flux, [2, 3, 4, 5] * u.Jy) assert spec_new.uncertainty is not None assert_quantity_allclose(spec_new.uncertainty.quantity, [0.1, 0.1, 0.1, 0.1] * u.Jy**2)
def test_spectrum1d_2d_data(): # This test makes sure that 2D spectra represented as Spectrum1D round-trip # Note that Spectrum1D will typically have a 1D spectral WCS even if the # data is N-dimensional, so we need to pad the WCS before passing it to # glue and un-pad it when translating back. # We test both the case where the WCS is 2D and the case where it is 1D wcs = WCS(naxis=1) wcs.wcs.ctype = ['FREQ'] wcs.wcs.cdelt = [10] wcs.wcs.set() flux = np.ones((3, 2)) * u.Unit('Jy') spec = Spectrum1D(flux, wcs=wcs, meta={'instrument': 'spamcam'}) assert spec.data.ndim == 2 assert spec.wcs.naxis == 1 data_collection = DataCollection() data_collection['spectrum'] = spec data = data_collection['spectrum'] assert isinstance(data, Data) assert len(data.main_components) == 1 assert data.main_components[0].label == 'flux' assert_allclose(data['flux'], flux.value) assert data.coords.pixel_n_dim == 2 assert data.coords.world_n_dim == 2 assert len(data.pixel_component_ids) == 2 assert len(data.world_component_ids) == 2 assert data.coordinate_components[0].label == 'Pixel Axis 0 [y]' assert data.coordinate_components[1].label == 'Pixel Axis 1 [x]' assert data.coordinate_components[2].label == 'Offset' assert data.coordinate_components[3].label == 'Frequency' assert_equal(data['Offset'], [[0, 0], [1, 1], [2, 2]]) assert_equal(data['Frequency'], [[10, 20], [10, 20], [10, 20]]) s, o = data.coords.pixel_to_world(1, 2) assert isinstance(s, SpectralCoord) # Check round-tripping of coordinates with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): px, py = data.coords.world_to_pixel(s, o) assert_allclose(px, 1) assert_allclose(py, 2) # Check round-tripping of translation spec_new = data.get_object(statistic=None) assert isinstance(spec_new, Spectrum1D) # The WCS object should be the same assert spec_new.wcs.pixel_n_dim == 1 assert spec_new.wcs.world_n_dim == 1 assert spec_new.wcs is spec.wcs # The metadata should still be present assert spec_new.meta['instrument'] == 'spamcam'
def EW(specname,name): lamb, flux= np.genfromtxt(specname, skip_header=1, unpack=True) flux = flux * u.Unit('J cm-2 s-1 AA-1') #flux = flux * u.Unit('erg cm-2 s-1 AA-1') lamb= lamb * u.AA spec = Spectrum1D(spectral_axis=lamb, flux=flux) # normalization is not so good cont_norm_spec = spec / fit_generic_continuum(spec)(spec.spectral_axis) print('-----------'+name+'------------') #line A EWa = equivalent_width(cont_norm_spec, regions=SpectralRegion(8493*u.AA, 8502*u.AA)) #FWHMa = fwhm(cont_norm_spec, regions=SpectralRegion(8493*u.AA, 8502*u.AA)) print('EW A line: '+str(EWa)) #line B EWb = equivalent_width(cont_norm_spec, regions=SpectralRegion(8533*u.AA, 8551*u.AA)) print('EW B line: '+str(EWb)) #line C EWc = equivalent_width(cont_norm_spec, regions=SpectralRegion(8655*u.AA, 8670*u.AA)) print('EW C line: '+str(EWc)) #open log file #nonlinear to metal-poor V_VHB = -2.0 EWbc= (EWb+EWc) EWbc= float(EWbc/(1. * u.AA)) EWp = (EWbc)**(-1.5) #nonlinear to metal-poor #Wl = float(EWb / (1. * u.AA)) + float(EWc / (1. * u.AA)) + (0.64 * V_VHB) #FeH= -2.81 + 0.44*Wl # FeH constants to V-VHB a=-2.87 b=0.195 c=0.458 d=-0.913 e=0.0155 #float all FeH = a + b * V_VHB + c * EWbc + d * EWp + e * EWbc * V_VHB print('[Fe/H]: '+str(FeH)) #change relampled spectrum to noise spectrum LOG = open('./EWs/EWfile-'+name+'.txt', 'w') #LOG = open('./EWs/EWfileRE-'+name+'.txt', 'w') LOG.write('Log file of '+ name +' \n \n') LOG.write('Input Spectrum: '+ specname +' \n \n') LOG.write('EW A line: '+ str(EWa) +' \n') LOG.write('EW B line: '+ str(EWb) +' \n') LOG.write('EW C line: '+ str(EWc) +' \n') LOG.write('[Fe/H]_CaT: '+ str(FeH) +' \n') f1 = plt.figure(figsize=(16,9)) ax = f1.add_subplot(111) ax.plot(cont_norm_spec.spectral_axis, cont_norm_spec.flux) ax.set_xlim([8480,8690]) ax.set_ylabel('Flux (J cm-2 s-1 AA-1)') ax.set_xlabel('Wavelength ( $\AA$ )') ax.axvspan(8498-float(EWa / (2. * u.AA)) , 8498+float(EWa / (2. * u.AA)) , alpha=0.2, color='red') ax.axvspan(8542-float(EWb / (2. * u.AA)) , 8542+float(EWb / (2. * u.AA)) , alpha=0.2, color='red') ax.axvspan(8662-float(EWc / (2. * u.AA)) , 8662+float(EWc / (2. * u.AA)) , alpha=0.2, color='red') #change relampled spectrum to noise spectrum plt.savefig('./EWs/EW-figs/EW'+name+'.pdf')
xlims = [4835, 4885] text_x_offset = 2 text_y_offset = 0.10 text_fontsize = 11 label_fontsize = 10 fig = plt.figure(figsize=(12,6)) fig.subplots_adjust(left=0.05, bottom=0.10, right=0.95, top=0.95) ax = fig.add_subplot(111) for i, (accompaning_text, spectrum_filename) in enumerate(zip(accompaning_texts, spectrum_filenames)): spectrum = Spectrum1D.load(spectrum_filename) ax.plot(spectrum.disp, spectrum.flux + i, 'k') #lhs_text, rhs_text = accompaning_text.split('\t') #ax.text(xlims[0] + text_x_offset, text_y_offset + 1 + i, lhs_text, fontsize=text_fontsize, horizontalalignment='left') #ax.text(xlims[1] - text_x_offset, text_y_offset + 1 + i, rhs_text, fontsize=text_fontsize, horizontalalignment='right') ax.text(xlims[0] + text_x_offset, text_y_offset + i + 1, accompaning_text, fontsize=text_fontsize, horizontalalignment='left') ax.set_xlabel('Wavelength, $\lambda$ (${\AA}$)', fontsize=label_fontsize) ax.set_ylabel('Flux, $F_\lambda$', fontsize=label_fontsize) ax.get_yticklabels()[0].set_visible(False) ax.set_xlim(*xlims) ax.set_ylim(0, i + 1.5)
for i in range(len(points)): fit = np.polyfit(wavelengths, values[i], 1) values[i] = values[i] / (fit[0] * wavelengths + fit[1]) fs = [] for i in vals: #range(len(points)): removed_points = points[i] removed_flux = values[i] grid.fluxes = np.delete(values, i, axis=0) grid.index = np.delete(points, i, axis=0) starspectrum = Spectrum1D.from_array(dispersion=wavelengths, flux=removed_flux, dispersion_unit=u.angstrom, uncertainty=removed_flux * (1 / 100.)) interp1 = Interpolate(starspectrum) norm1 = Normalize(starspectrum, 2) model = grid | interp1 | norm1 setattr(model, 'teff_0', removed_points[0]) setattr(model, 'logg_0', removed_points[1]) setattr(model, 'mh_0', removed_points[2]) setattr(model, 'alpha_0', removed_points[3]) ''' result = mtf.fit_array(starspectrum, model, R_fixed=25000.) print result.median
def specviz_spectrum1d_parser(app, data, data_label=None, format=None, show_in_viewer=True): """ Loads a data file or `~specutils.Spectrum1D` object into Specviz. Parameters ---------- data : str, `~specutils.Spectrum1D`, or `~specutils.SpectrumList` Spectrum1D, SpectrumList, or path to compatible data file. data_label : str The Glue data label found in the ``DataCollection``. format : str Loader format specification used to indicate data format in `~specutils.Spectrum1D.read` io method. """ # If no data label is assigned, give it a unique identifier if not data_label: data_label = "specviz_data|" + str( base64.b85encode(uuid.uuid4().bytes), "utf-8") if isinstance(data, SpectrumCollection): raise TypeError("SpectrumCollection detected." " Please provide a Spectrum1D or SpectrumList") elif isinstance(data, Spectrum1D): data = [data] data_label = [data_label] elif isinstance(data, SpectrumList): pass else: path = pathlib.Path(data) if path.is_file(): try: data = [Spectrum1D.read(str(path), format=format)] data_label = [data_label] except IORegistryError: # Multi-extension files may throw a registry error data = SpectrumList.read(str(path), format=format) else: raise FileNotFoundError("No such file: " + str(path)) if isinstance(data, SpectrumList): if not isinstance(data_label, (list, tuple)): temp_labels = [] for i in range(len(data)): temp_labels.append(f"{data_label} {i}") data_label = temp_labels elif len(data_label) != len(data): raise ValueError( f"Length of data labels list ({len(data_label)}) is different" f" than length of list of data ({len(data)})") # If there's already data in the viewer, convert units if needed current_unit = None current_spec = app.get_data_from_viewer("spectrum-viewer") if current_spec != {} and current_spec is not None: spec_key = list(current_spec.keys())[0] current_unit = current_spec[spec_key].spectral_axis.unit with app.data_collection.delay_link_manager_update(): for i in range(len(data)): spec = data[i] if current_unit is not None and spec.spectral_axis.unit != current_unit: spec = Spectrum1D( flux=spec.flux, spectral_axis=spec.spectral_axis.to(current_unit)) app.add_data(spec, data_label[i]) # Only auto-show the first spectrum in a list if i == 0 and show_in_viewer: app.add_data_to_viewer("spectrum-viewer", data_label[i])
if abs(apogee_res[i])>0.05: apogee_mask += [apogee_res[i]] apogee_mask_w += [resampled_apogeew[i]] apogee_ind += [i] apogee_ind_s = sorted(apogee_ind, reverse=True) print 'trimming' starspectrum_fmasked = np.delete(starspectrum35.flux,apogee_ind_s) starspectrum_wmasked = np.delete(starspectrum35.wavelength,apogee_ind_s) starspectrum_umasked = np.delete(starspectrum35.uncertainty,apogee_ind_s) print len(starspectrum_fmasked), len(apogee_ind) spectrum_masked = Spectrum1D.from_array(starspectrum_wmasked, starspectrum_fmasked, dispersion_unit=starspectrum35.wavelength.unit, uncertainty=starspectrum_umasked) #flux_unit=starspectrum35.flux.unit, wavelength_unit=starspectrum35.wavelength.unit) print starspectrum_fmasked interp1 = Interpolate(spectrum_masked) print 'interpolated 2' convolve1 = InstrumentConvolveGrating.from_grid(g,R=24000) print 'convolved 2' rot1 = RotationalBroadening.from_grid(g,vrot=np.array([10.0])) print 'rot broadend 2' norm1 = Normalize(spectrum_masked,2) print 'normalized 2' # concatenate the spectral grid (which will have the stellar parameters) with other # model components that you want to fit model = g | rot1 |DopplerShift(vrad=radv)| convolve1 | interp1 | norm1 print 'model concaten 2'
def read_fits_spectrum1d(filename, dispersion_unit=None, flux_unit=None): """ 1D reader for spectra in FITS format. This function determines what format the FITS file is in, and attempts to read the Spectrum. This reader just uses the primary extension in a FITS file and reads the data and header from that. It will return a Spectrum1D object if the data is linear, or a list of Spectrum1D objects if the data format is multi-spec Parameters ---------- filename : str FITS filename dispersion_unit : ~astropy.unit.Unit, optional unit of the dispersion axis - will overwrite possible information given in the FITS keywords default = None flux_unit : ~astropy.unit.Unit, optional unit of the flux Raises -------- NotImplementedError If the format can't be read currently """ if dispersion_unit: dispersion_unit = u.Unit(dispersion_unit) data = fits.getdata(filename) header = fits.getheader(filename) wcs_info = FITSWCSSpectrum(header) if wcs_info.naxis == 1: wcs = read_fits_wcs_linear1d(wcs_info, dispersion_unit=dispersion_unit) return Spectrum1D(data, wcs=wcs, unit=flux_unit) elif wcs_info.naxis == 2 and \ wcs_info.affine_transform_dict['ctype'] == ["MULTISPE", "MULTISPE"]: multi_wcs = multispec_wcs_reader(wcs_info, dispersion_unit=dispersion_unit) multispec = [] for spectrum_data, spectrum_wcs in zip(data, multi_wcs.values()): multispec.append( Spectrum1D(spectrum_data, wcs=spectrum_wcs, unit=flux_unit)) return multispec elif wcs_info.naxis == 3 and \ wcs_info.affine_transform_dict['ctype'] == ["LINEAR","LINEAR","LINEAR"]: wcs = read_fits_wcs_linear1d(wcs_info, dispersion_unit=dispersion_unit) equispec = [] for i in range(data.shape[0]): equispec.append( Spectrum1D(data[i][0], wcs=wcs, unit=flux_unit)) return equispec elif wcs_info.naxis == 3 and \ wcs_info.affine_transform_dict['ctype'] == ["MULTISPE", "MULTISPE","LINEAR"]: multi_wcs = multispec_wcs_reader(wcs_info, dispersion_unit=dispersion_unit) multispec = [] for j in range(data.shape[1]): equispec = [] for i in range(data.shape[0]): equispec.append( Spectrum1D(data[i][j], wcs=list(multi_wcs.values())[j], unit=flux_unit)) multispec.append(equispec) return multispec else: raise NotImplementedError("Either the FITS file does not represent a 1D" " spectrum or the format isn't supported yet")
def add_single_spectra_to_map( spectra_map, *, header, data, spec_info=None, wcs_info=None, units_info=None, purpose_prefix=None, all_standard_units, all_keywords, valid_wcs, index=None, ): spec_wcs_info = {} spec_units_info = {} if wcs_info is not None: spec_wcs_info.update(wcs_info) if units_info is not None: spec_units_info.update(units_info) if spec_info is not None: spec_wcs_info.update(spec_info.get("wcs", {})) spec_units_info.update(spec_info.get("units", {})) purpose = spec_info.get("purpose") else: purpose = None purpose = get_purpose( header, purpose=purpose, purpose_prefix=purpose_prefix, all_keywords=all_keywords, index=index, ) if purpose == Purpose.SKIP: return None if valid_wcs or not spec_wcs_info: wcs = WCS(header) else: wcs = compute_wcs_from_keys_and_values(header, **spec_wcs_info) if all_standard_units: spec_units_info = {"flux_unit_keyword": "BUNIT"} flux_unit = get_flux_units_from_keys_and_values(header, **spec_units_info) flux = data * flux_unit meta = {"header": header, "purpose": PURPOSE_SPECTRA_MAP[purpose]} if purpose in CREATE_SPECTRA: spectrum = Spectrum1D(wcs=wcs, flux=flux, meta=meta) spectra_map[PURPOSE_SPECTRA_MAP[purpose]].append(spectrum) elif purpose in ERROR_PURPOSES: try: spectrum = spectra_map[PURPOSE_SPECTRA_MAP[purpose]][-1] except IndexError: raise ValueError(f"No spectra to associate with {purpose}") aligned_flux = pixel_to_pixel(wcs, spectrum.wcs, flux) spectrum.uncertainty = UNCERTAINTY_MAP[purpose](aligned_flux) spectrum.meta["uncertainty_header"] = header # We never actually want to return something, this just flags it to pylint # that we know we're breaking out of the function when skip is selected return None
def to_object(self, data_or_subset, attribute=None, statistic='mean'): """ Convert a glue Data object to a Spectrum1D object. Parameters ---------- data_or_subset : `glue.core.data.Data` or `glue.core.subset.Subset` The data to convert to a Spectrum1D object attribute : `glue.core.component_id.ComponentID` The attribute to use for the Spectrum1D data statistic : {'minimum', 'maximum', 'mean', 'median', 'sum', 'percentile'} The statistic to use to collapse the dataset """ if isinstance(data_or_subset, Subset): data = data_or_subset.data subset_state = data_or_subset.subset_state else: data = data_or_subset subset_state = None if isinstance(data.coords, WCSCoordinates): # Find spectral axis spec_axis = data.coords.wcs.naxis - 1 - data.coords.wcs.wcs.spec # Find non-spectral axes axes = tuple(i for i in range(data.ndim) if i != spec_axis) kwargs = {'wcs': data.coords.wcs.sub([WCSSUB_SPECTRAL])} elif isinstance(data.coords, SpectralCoordinates): kwargs = {'spectral_axis': data.coords.spectral_axis} else: raise TypeError( 'data.coords should be an instance of WCSCoordinates or SpectralCoordinates' ) if isinstance(attribute, str): attribute = data.id[attribute] elif len(data.main_components) == 0: raise ValueError('Data object has no attributes.') elif attribute is None: if len(data.main_components) == 1: attribute = data.main_components[0] else: raise ValueError( "Data object has more than one attribute, so " "you will need to specify which one to use as " "the flux for the spectrum using the " "attribute= keyword argument.") component = data.get_component(attribute) # Collapse values to profile if data.ndim > 1: # Get units and attach to value values = data.compute_statistic(statistic, attribute, axis=axes, subset_state=subset_state) mask = None else: values = data.get_data(attribute) if subset_state is None: mask = None else: mask = data.get_mask(subset_state=subset_state) values = values.copy() values[~mask] = np.nan values = values * u.Unit(component.units) return Spectrum1D(values, mask=mask, **kwargs)
def test_pre_full(data_path, template_path, z_lit, z_bound, spec_type): """Returns the full spectrum best-fit redshift and min chi2. Tests the redshift array on the entire spectrum to ensure bounds are appropriate. Parameters ---------- data_path : str Path to data spectrum file template_path : str Path to template spectrum file z_lit : float Literature redshift for target object z_bound : float Amount to add and subtract from z_lit for redshifts to test spec_type : str Indicates if looking at coadded spectrum or single 1D spectrum Returns ------- tm_result[0] : float Best-fit redshift tm_result[1] : float Minimum chi-squared """ #Import data data_wave_full, data_cut_wave, data_flux_full, data_cut_flux, data_noise_full, data_cut_noise = prep_data(data_path, spec_type) #Import smoothed template template_wave, smoothed_template_flux, smoothed_template_noise = prep_template(template_path) #Continuum normalize over whole wavelength range norm_data_wave, norm_data_flux, norm_data_noise = continuum_normalize(np.min(data_cut_wave), np.max(data_cut_wave), data_cut_flux, data_cut_wave, data_cut_noise) norm_template_wave, norm_template_flux, norm_template_noise = continuum_normalize(np.min(template_wave), np.max(template_wave), smoothed_template_flux, template_wave, smoothed_template_noise) #Put spectra into Spectrum1D objects data_spec = Spectrum1D(spectral_axis=norm_data_wave*u.Angstrom, flux=norm_data_flux*(u.erg/u.s/u.cm**2/u.Angstrom), uncertainty=StdDevUncertainty(norm_data_noise)) template_spec = Spectrum1D(spectral_axis=norm_template_wave*u.Angstrom, flux=norm_template_flux*(u.Lsun/u.micron)) #Plot before plt.figure(figsize=(12,4)) plt.plot(data_spec.spectral_axis, data_spec.flux, label='observed') plt.plot(template_spec.spectral_axis, template_spec.flux, label='template') plt.legend() plt.show() #Fit redshifts redshifts = np.linspace(z_lit-z_bound, z_lit+z_bound, 1000) tm_result = template_redshift(observed_spectrum=data_spec, template_spectrum=template_spec, redshift=redshifts) #Plot after plt.figure(figsize=(12,4)) plt.plot(data_spec.spectral_axis, data_spec.flux, label='observed') plt.plot(tm_result[2].spectral_axis, tm_result[2].flux, label='redshifted template') plt.legend() plt.show() plt.figure(figsize=(12,4)) plt.plot(template_spec.spectral_axis, template_spec.flux, label='template') plt.plot(tm_result[2].spectral_axis, tm_result[2].flux, label='redshifted template') plt.legend() plt.show() return tm_result[0], tm_result[1]