def load_and_convert_spectra(globname): speclist = [ pyspeckit.Spectrum(fn) for fn in glob.glob(paths.spath(globname)) ] for sp in speclist: sp.data -= np.nanpercentile(sp.data, 25) spectra = pyspeckit.Spectra(speclist) beam = radio_beam.Beam.from_fits_header(spectra.header) # "baseline" #spectra.data -= np.nanpercentile(spectra.data, 10) spectra.data *= beam.jtok(spectra.xarr) spectra.unit = 'K' spectra.xarr.refX = 220 * u.GHz # hackalack return spectra
def class_to_spectra(filename, datatuple=None, **kwargs): """ Load each individual spectrum within a CLASS file into a list of Spectrum objects """ if datatuple is None: spectra, header, indexes = read_class(filename, **kwargs) else: spectra, header, indexes = datatuple spectrumlist = [] for sp, hdr, ind in zip(spectra, header, indexes): hdr.update(ind) xarr = make_axis(hdr) spectrumlist.append(pyspeckit.Spectrum(xarr=xarr, header=hdr, data=sp)) return pyspeckit.Spectra(spectrumlist)
def loadplot(fn, axis=None, norm=False, midorder=33, scale=None,offset=0.0): tempthing = pyspeckit.wrappers.load_IRAF_multispec(fn) if scale is not None: for sp in tempthing: sp.data *= scale if axis is not None: clear=False else: clear=True if norm: plort = (tempthing[midorder]/tempthing[midorder].data.max()) plort.plotter(axis=axis,clear=clear,offset=offset) tempthing[midorder].plotter.axis = plort.plotter.axis (tempthing[midorder+1]/tempthing[midorder+1].data.max()).plotter(axis=plort.plotter.axis,clear=False,color='blue',offset=offset) (tempthing[midorder-1]/tempthing[midorder-1].data.max()).plotter(axis=plort.plotter.axis,clear=False,color='red' ,offset=offset) else: tempthing[midorder].plotter(axis=axis,clear=clear,offset=offset) tempthing[midorder+1].plotter(axis=tempthing[midorder].plotter.axis,clear=False,color='blue',offset=offset) tempthing[midorder-1].plotter(axis=tempthing[midorder].plotter.axis,clear=False,color='red' ,offset=offset) sp = pyspeckit.Spectra(tempthing,xunits='angstroms') sp.plotter.axis = tempthing[midorder].plotter.axis return sp
return spectra if __name__ == "__main__": import glob import pyspeckit from astropy import wcs from astropy import coordinates import pyregion import paths import radio_beam target = 'e2e' spectra = pyspeckit.Spectra( glob.glob(paths.spath("*{0}_spw[0-9]_mean.fits".format(target)))) beam = radio_beam.Beam.from_fits_header(spectra.header) # "baseline" spectra.data -= np.nanpercentile(spectra.data, 10) spectra.data *= beam.jtok(spectra.xarr) spectra.plotter() spectra.specfit.Registry.add_fitter('ch3oh', ch3oh_fitter(), 4) spectra.specfit(fittype='ch3oh', guesses=[55, 4, 200, 5e15], limitedmin=[True] * 4, limitedmax=[True] * 4, limits=[(50, 70), (1, 4), (20, 1000), (1e13, 1e18)]) ok = slaim['Species'] == 'CH3OHvt=0'
from line_parameters import frequencies, freq_name_mapping, yoffset cores = pyregion.open(paths.rpath('cores.reg')) minvelo = 45 * u.km / u.s maxvelo = 90 * u.km / u.s data = {} for corereg in cores: name = corereg.attr[1]['text'] data[name] = {} fn = "{name}_spw{ii}_mean.fits" spectra = pyspeckit.Spectra( [paths.spath(fn.format(name=name, ii=ii)) for ii in range(4)]) spectra.data[(233.84 * u.GHz < spectra.xarr) & (spectra.xarr > 234.036 * u.GHz)] = np.nan spectra.data[(230.00 * u.GHz < spectra.xarr) & (spectra.xarr > 230.523 * u.GHz)] = np.nan scaling = np.nanmax(spectra.data) - np.nanpercentile(spectra.data, 20) assert not np.isnan(scaling) print("Scaling for {fn} = {scaling}".format(fn=fn.format(name=name, ii=0), scaling=scaling)) fig = pl.figure(1) fig.clf() for spwnum, sp in enumerate(spectra): if spwnum == 3: # flag out the middle section where apparently many antennae have been flagged
'ALMAmm14': 0.0, 'north': 0.3, 'e2nw': 0.1, } velo = { 'e8mm': 61 * u.km / u.s, 'e2e': 56 * u.km / u.s, 'e2nw': 55.626 * u.km / u.s, 'ALMAmm14': 62 * u.km / u.s, 'north': 55 * u.km / u.s, } pl.figure(1).clf() for target, species_list in spectra_to_species.items(): spectra = pyspeckit.Spectra( glob.glob(paths.spath("*{0}*fits".format(target)))) for species_tuple in species_list: species_name, chemid, tmax = species_tuple for ii in range(4): cat = Splatalogue.query_lines(spectra[ii].xarr.min(), spectra[ii].xarr.max(), chemical_name=chemid, energy_max=tmax, energy_type='eu_k', noHFS=True, line_lists=['SLAIM']) spectra[ii].plotter(figure=pl.figure(1)) spectra[ii].plotter.axis.set_ylim( snu_min[target], spectra[ii].plotter.axis.get_ylim()[1])
def fitnh3tkin(input_dict, dobaseline=True, baselinekwargs={}, crop=False, guessline='twotwo', tex=15,tkin=20,column=15.0,fortho=0.66, tau=None, thin=False, quiet=False, doplot=True, fignum=1, guessfignum=2, smooth=False, scale_keyword=None, rebase=False, npeaks=1, guesses=None, **kwargs): """ Given a dictionary of filenames and lines, fit them together e.g. {'oneone':'G000.000+00.000_nh3_11.fits'} """ spdict = dict([ (linename,pyspeckit.Spectrum(value, scale_keyword=scale_keyword)) if type(value) is str else (linename,value) for linename, value in input_dict.iteritems() ]) splist = spdict.values() for sp in splist: # required for plotting, cropping sp.xarr.convert_to_unit('km/s') if crop and len(crop) == 2: for sp in splist: sp.crop(*crop) if dobaseline: for sp in splist: sp.baseline(**baselinekwargs) if smooth and type(smooth) is int: for sp in splist: sp.smooth(smooth) spdict[guessline].specfit(fittype='gaussian', negamp=False, vheight=False, guesses='moments') ampguess,vguess,widthguess = spdict[guessline].specfit.modelpars if widthguess < 0: raise ValueError("Width guess was < 0. This is impossible.") print "RMS guess (errspec): ",spdict[guessline].specfit.errspec.mean() print "RMS guess (residuals): ",spdict[guessline].specfit.residuals.std() errguess = spdict[guessline].specfit.residuals.std() if rebase: # redo baseline subtraction excluding the centroid +/- about 20 km/s vlow = spdict[guessline].specfit.modelpars[1]-(19.8+spdict[guessline].specfit.modelpars[2]*2.35) vhigh = spdict[guessline].specfit.modelpars[1]+(19.8+spdict[guessline].specfit.modelpars[2]*2.35) for sp in splist: sp.baseline(exclude=[vlow,vhigh], **baselinekwargs) for sp in splist: sp.error[:] = errguess spdict[guessline].plotter(figure=guessfignum) spdict[guessline].specfit.plot_fit() spectra = pyspeckit.Spectra(splist) spectra.specfit.npeaks = npeaks if tau is not None: if guesses is None: guesses = [a for i in xrange(npeaks) for a in (tkin+random.random()*i, tex, tau+random.random()*i, widthguess+random.random()*i, vguess+random.random()*i, fortho)] spectra.specfit(fittype='ammonia_tau',quiet=quiet,multifit=None,guesses=guesses, thin=thin, **kwargs) else: if guesses is None: guesses = [a for i in xrange(npeaks) for a in (tkin+random.random()*i, tex, column+random.random()*i, widthguess+random.random()*i, vguess+random.random()*i, fortho)] spectra.specfit(fittype='ammonia',quiet=quiet,multifit=None,guesses=guesses, thin=thin, **kwargs) if doplot: plot_nh3(spdict,spectra,fignum=fignum) return spdict,spectra
'ALMAmm14': 62 * u.km / u.s, 'ALMAmm41': 55 * u.km / u.s, 'e2nw': 55.626 * u.km / u.s, 'e2se': 54 * u.km / u.s, 'north': 58 * u.km / u.s, } pl.figure(1).clf() for target in snu_min: files = glob.glob(paths.spath("*{0}*fits".format(target))) if len(files) == 0: print("No matches for {0}".format(target)) continue spectra = pyspeckit.Spectra(files) for ii in range(4): spectra[ii].plotter(figure=pl.figure(1)) species_names = [x[0] for x in line_to_image_list] frequencies = u.Quantity( [float(x[1].strip("GHz")) for x in line_to_image_list], unit=u.GHz) spectra[ii].plotter.axis.set_ylim( snu_min[target], spectra[ii].plotter.axis.get_ylim()[1]) spectra[ii].plotter.line_ids(species_names, u.Quantity(frequencies), velocity_offset=velo[target], plot_kwargs=plot_kwargs, annotate_kwargs=annotate_kwargs)
def mergespec(fn): tempthing = pyspeckit.wrappers.load_IRAF_multispec(fn) for x in tempthing: x.crop(250,1150,units='pixels') return pyspeckit.Spectra(tempthing,xunits=x.xarr.units)
def spectral_overlays(fn, name, freq_name_mapping, frequencies, yoffset, minvelo, maxvelo, suffix="", background_fn=None, return_spectra=False, plot_fullspec=True): object_data_dict = {} spectra = pyspeckit.Spectra( [fn.format(name=name, ii=ii) for ii in range(4)]) bad_1 = (233.74 * u.GHz < spectra.xarr.to(u.GHz)) & (spectra.xarr.to(u.GHz) < 234.036 * u.GHz) bad_2 = (230.00 * u.GHz < spectra.xarr.to(u.GHz)) & (spectra.xarr.to(u.GHz) < 230.523 * u.GHz) bad_3 = (spectra.xarr.to(u.GHz) < 218.11 * u.GHz) spectra.data[bad_1 | bad_2 | bad_3] = np.nan beams = [radio_beam.Beam.from_fits_header(sp.header) for sp in spectra] # scaling: determine how much to separate spectra by vertically scaling = np.nanmax(spectra.data) - np.nanpercentile(spectra.data, 20) print("Scaling for {fn} = {scaling}".format(fn=fn.format(name=name, ii=0), scaling=scaling)) if np.isnan(scaling): raise ValueError( "All-nan slice encountered. There is apparently no data in this file?" ) if background_fn is not None: bgspectra = pyspeckit.Spectra( [background_fn.format(name=name, ii=ii) for ii in range(4)]) bgspectra.data[bad_1 | bad_2 | bad_3] = np.nan bg = True else: bg = False fig = pl.figure(0) fig.clf() for spwnum, sp in enumerate(spectra): bad_1 = (233.74 * u.GHz < sp.xarr.to(u.GHz)) & (sp.xarr.to(u.GHz) < 234.036 * u.GHz) bad_2 = (230.00 * u.GHz < sp.xarr.to(u.GHz)) & (sp.xarr.to(u.GHz) < 230.523 * u.GHz) bad_3 = (sp.xarr.to(u.GHz) < 218.11 * u.GHz) sp.data[bad_1 | bad_2 | bad_3] = np.nan # temporary hack for bad data if bg and all(bgspectra[spwnum].data == 0): bg = False (cont, peak, peakfreq, peakfreq_shifted, bestmatch, peakvelo, velo_OK, peakspecies, argmax) = quick_analyze(sp, freq_name_mapping, minvelo, maxvelo) object_data_dict['continuum20pct{0}'.format(spwnum)] = cont object_data_dict['peak{0}freq'.format(spwnum)] = peakfreq object_data_dict['peak{0}velo'.format( spwnum)] = peakvelo if velo_OK else np.nan * u.km / u.s object_data_dict['peak{0}species'.format(spwnum)] = peakspecies object_data_dict['peak{0}'.format(spwnum)] = (peak if velo_OK else np.nan) * u.Jy / u.beam object_data_dict['beam{0}area'.format(spwnum)] = beams[spwnum].sr.value if bg: (bgcont, bgpeak, bgpeakfreq, bgpeakfreq_shifted, bgbestmatch, bgpeakvelo, bgvelo_OK, bgpeakspecies, bgargmax) = quick_analyze(bgspectra[spwnum], freq_name_mapping, minvelo, maxvelo) object_data_dict['bgpeak{0}freq'.format(spwnum)] = bgpeakfreq object_data_dict['bgpeak{0}velo'.format( spwnum)] = bgpeakvelo if bgvelo_OK else np.nan * u.km / u.s object_data_dict['bgpeak{0}species'.format(spwnum)] = bgpeakspecies object_data_dict['bgpeak{0}'.format( spwnum)] = (bgpeak if bgvelo_OK else np.nan) * u.Jy / u.beam log.debug("spw{0} peak{0}={1} line={2}".format(spwnum, peak, peakspecies)) for linename, freq in frequencies.items(): if sp.xarr.in_range(freq): print("Plotting {0} for {1} from spw {2}. Peakspecies={3}". format(linename, name, spwnum, peakspecies)) temp = np.array(sp.xarr) sp.xarr.convert_to_unit(u.km / u.s, refX=freq) if np.isnan(yoffset[linename]) or np.isnan(scaling): raise ValueError("NAN scaling is stupid.") if sp.slice(0 * u.km / u.s, 120 * u.km / u.s).data.max() > sp.slice( 0 * u.km / u.s, 120 * u.km / u.s).data.min(): sp.plotter(figure=fig, clear=False, offset=yoffset[linename] * scaling, xmin=0, xmax=120) sp.plotter.axis.text(122, yoffset[linename] * scaling, "{0}: {1}".format(spwnum, linename)) sp.xarr.convert_to_unit(u.GHz, refX=freq) np.testing.assert_allclose(temp, np.array(sp.xarr)) if bg: bgs = bgspectra[spwnum] bgs.xarr.convert_to_unit(u.km / u.s, refX=freq) if bgs.slice(0 * u.km / u.s, 120 * u.km / u.s).data.max() > bgs.slice( 0 * u.km / u.s, 120 * u.km / u.s).data.min(): bgs.plotter( axis=sp.plotter.axis, clear=False, color='b', offset=yoffset[linename] * scaling, zorder=-100, xmin=0, xmax=120, ) bgs.xarr.convert_to_unit(u.GHz, refX=freq) if sp.plotter.axis is not None: sp.plotter.axis.set_ylim( -0.1, max(yoffset.values()) * scaling + scaling) if linename == peakspecies: print('peakvelo, offset, maxdata: ', peakvelo, yoffset[linename] * scaling, sp.data[argmax]) sp.plotter.axis.plot( peakvelo, yoffset[linename] * scaling + sp.data[argmax], 'rx') okvelos = [ object_data_dict['peak{0}velo'.format(ii)] for ii in range(4) if (minvelo < object_data_dict['peak{0}velo'.format(ii)]) and ( object_data_dict['peak{0}velo'.format(ii)] < maxvelo) ] if okvelos: velo = np.median(u.Quantity(okvelos)) object_data_dict['mean_velo'] = velo sp.plotter.axis.vlines(velo.to(u.km / u.s).value, sp.plotter.axis.get_ylim()[0], sp.plotter.axis.get_ylim()[1], linestyle='--', linewidth=2, color='r', zorder=-50, alpha=0.5) else: object_data_dict['mean_velo'] = np.nan * u.km / u.s velo = 60 * u.km / u.s fig.savefig(paths.fpath( "spectral_overlays/{name}_overlaid_spectra{suffix}.png".format( name=name, suffix=suffix)), bbox_inches='tight', bbox_extra_artists=[]) # plot type #2: full spectrum, with lines ID'd if plot_fullspec: linenames = list(frequencies.keys()) freqs_ghz = list(frequencies.values()) plot_kwargs = {'color': 'r', 'linestyle': '--'} annotate_kwargs = {'color': 'r'} for spwnum, sp in enumerate(spectra): fig = pl.figure(spwnum + 1) fig.clf() sp.xarr.convert_to_unit(u.GHz) sp.plotter(figure=fig, axis=fig.gca()) # labels don't update. still don't know why sp.plotter(figure=fig, axis=fig.gca()) assert sp.plotter._xunit == u.GHz assert sp.plotter.xlabel == 'Frequency (GHz)' sp.plotter.line_ids(linenames, u.Quantity(freqs_ghz), velocity_offset=velo, plot_kwargs=plot_kwargs, annotate_kwargs=annotate_kwargs) assert sp.plotter.xlabel == 'Frequency (GHz)' if bg: bgs = bgspectra[spwnum] bgs.xarr.convert_to_unit(u.GHz) bgs.plotter( axis=sp.plotter.axis, clear=False, color='b', zorder=-100, ) assert bgs.plotter.xlabel == 'Frequency (GHz)' assert sp.plotter.xlabel == 'Frequency (GHz)' fig.savefig(paths.fpath( "spectral_overlays/{name}_spw{spw}_fullspec{suffix}.png". format(name=name, spw=spwnum, suffix=suffix)), bbox_inches='tight', bbox_extra_artists=[]) if return_spectra: return object_data_dict, spectra else: return object_data_dict
def spec_curve_fit(bin_num, map_name=map_column_dens): # following loop has not good style. One should build in some break statements or error messages # if files repeatedly appear in loop. for one_file in files: if 'NH3_11' in one_file: file_name_NH3_11 = one_file if 'NH3_22' in one_file: file_name_NH3_22 = one_file if 'NH3_33' in one_file: file_name_NH3_33 = one_file y, x, med = binning(bin_width, bin_num) s11, _, offset_velocity11, sp_av11 = averaging_over_dopplervel( file_name_NH3_11, y, x) s22, _, offset_velocity22, sp_av22 = averaging_over_dopplervel( file_name_NH3_22, y, x) s33, _, offset_velocity33, sp_av33 = averaging_over_dopplervel( file_name_NH3_33, y, x) xarr11 = SpectroscopicAxis(offset_velocity11 * u.km / u.s, velocity_convention='radio', refX=freq_dict['oneone']).as_unit(u.GHz) xarr22 = SpectroscopicAxis(offset_velocity22 * u.km / u.s, velocity_convention='radio', refX=freq_dict['twotwo']).as_unit(u.GHz) xarr33 = SpectroscopicAxis(offset_velocity33 * u.km / u.s, velocity_convention='radio', refX=freq_dict['threethree']).as_unit(u.GHz) sp11 = psk.Spectrum(data=s11, xarr=xarr11, xarrkwargs={'unit': 'km/s'}, unit='K') sp22 = psk.Spectrum(data=s22, xarr=xarr22, xarrkwargs={'unit': 'km/s'}, unit='K') sp33 = psk.Spectrum(data=s33, xarr=xarr33, xarrkwargs={'unit': 'km/s'}, unit='K') # This joins all the spectra together into one object. allspec = psk.Spectra([sp11, sp22, sp33]) allspec.xarr.as_unit('Hz', velocity_convention='radio') # This add the cold_ammonia model to the list of things we can use for fitting allspec.specfit.Registry.add_fitter('cold_ammonia', ammonia.cold_ammonia_model(), 6) # This does the fit. The values of the guess are # Kinetic Temperature (usually about 15 to 25 K) # Excitation Temperature (between 2.73 K and the excitation temperature) # Log Column Density of ammonia # Line width (~1 km/s) # Offset velocity (you will usually use 0 instead of 8.5) # Ortho fraction (leave at 0) allspec.specfit(fittype='cold_ammonia', guesses=[23, 5, 13.1, 1, 0, 0]) # You can make a plot here. fig = plt.figure() allspec.plotter() allspec.specfit.plot_fit(lw=1, components=True) plt.xlim((23.692, 23.697)) # plt.xlim((23.72,23.725)) # plt.xlim((23.8692, 23.8708)) # plt.savefig("OrionA:pyspeckit_fit_bin_width=%rthis_bin=%r.ps" %(bin_width, this_bin)) plt.savefig("Map=%r:bin_num=%r_pyspeckit_fit_NH3_11TEST.ps" % (map_name, bin_num)) plt.show() # returns the values of the fitted parameters: T_K, T_ex, N, sigma, v, F_0 return allspec.specfit.parinfo, allspec.specfit.parinfo.errors
import glob import pyspeckit import pylab as pl from spectral_cube import OneDSpectrum from astropy.io import fits from astropy import units as u pl.close(1) pl.close(2) fig = pl.figure(1, figsize=(25, 10)) spectra = pyspeckit.Spectra( pyspeckit.Spectrum(x) for x in glob.glob("spectra/*max.fits")) spectra.plotter(figure=fig) spectra.plotter.figure.savefig('full_spectrum_max.png', dpi=200, bbox_inches='tight') fig2 = pl.figure(2, figsize=(25, 10)) kspectra = [ OneDSpectrum.from_hdu(fits.open(x)).to(u.K) for x in glob.glob("spectra/*max.fits") ] kspectra_ps = [pyspeckit.Spectrum.from_hdu(kspec.hdu) for kspec in kspectra] spectra_K = pyspeckit.Spectra(kspectra_ps) spectra_K.plotter(figure=fig2) spectra_K.plotter.figure.savefig('full_spectrum_max_K.png', dpi=200,
pl.figure(2).clf() pl.plot(all_ch3cn['E_U (K)'], all_ch3cn['FittedWidth'], 'o') pl.xlabel("E$_U$ (K)") pl.ylabel("$\sigma$ (km/s)") pl.ylim(0, 3.5) pl.savefig(save_prefix + "_sigma_vs_eupper.png") pl.figure(3).clf() pl.plot(all_ch3cn['E_U (K)'], all_ch3cn['FittedCenter'], 'o') pl.xlabel("E$_U$ (K)") pl.ylabel("$v_{lsr}$ (km/s)") pl.ylim(vkms - 3, vkms + 3) pl.savefig(save_prefix + "_vcen_vs_eupper.png") spectra_center = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2e_center_W51e2_spw*fits'))) spectra_left = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2e_left_W51e2_spw*fits'))) spectra_right = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2e_right_W51e2_spw*fits'))) spectra_se_emission = pyspeckit.Spectra( glob.glob( paths.dpath('longbaseline/spectra/e2e_se_emission_W51e2_spw*fits'))) spectra_ALMAmm24 = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/ALMAmm24_W51n_spw*.fits'))) spectra_d2 = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/d2_W51n_spw*.fits'))) spectra_e2e = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2e_W51e2_spw*.fits'))) spectra_e2nw = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2nw_W51e2_spw*.fits')))
"{0}_{1}".format(row['Species'], row['Resolved QNs']): (row['Freq-GHz'] if row['Freq-GHz'] else row['Meas Freq-GHz']) * u.GHz for row in methanol_lines } figname = 'fullspectra/methanol_{0}.png'.format(row['source']) plot_whole_spectrum( speclist, title=row['source'], line_id=methanol_line_ids, figname=figname, velocity=row['velocity'] * u.km / u.s, ) spectra = pyspeckit.Spectra(speclist) spectra.xarr.convert_to_unit(u.GHz) mod = methanolmodel_osu.lte_model(spectra.xarr, row['velocity'] * u.km / u.s, 5 * u.km / u.s, 300 * u.K, 5e17 * u.cm**-2) median = np.nanmedian(spectra.data) for ii in range(1, 8): pl.subplot(7, 1, ii) pl.plot(spectra.xarr, mod + median, color='r', linewidth=1, alpha=0.5)
pl.figure(1).clf() pl.figure(2).clf() # sanity check: are the regions in the right place? F = aplpy.FITSFigure( paths.dpath('longbaseline/W51e2cax.cont.image.pbcor.fits'), figure=pl.figure(1)) F.show_grayscale(vmax=0.015) region_list = pyregion.open( paths.rpath("cores_longbaseline_spectralextractionregions.reg")) F.show_regions(region_list) F.recenter(290.9332, 14.509589, 0.5 / 3600.) spectra_se_emission = pyspeckit.Spectra( glob.glob( paths.dpath('longbaseline/spectra/e2e_se_emission_W51e2_spw*fits'))) spectra_center = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2e_center_W51e2_spw*fits'))) spectra_left = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2e_left_W51e2_spw*fits'))) spectra_right = pyspeckit.Spectra( glob.glob(paths.dpath('longbaseline/spectra/e2e_right_W51e2_spw*fits'))) spectra_center.xarr.convert_to_unit(u.GHz) spectra_right.xarr.convert_to_unit(u.GHz) spectra_left.xarr.convert_to_unit(u.GHz) spectra_center.plotter(figure=pl.figure(2)) spectra_left.plotter(axis=spectra_center.plotter.axis, clear=False, color='r') spectra_right.plotter(axis=spectra_center.plotter.axis, clear=False, color='g')
import warnings from numpy.ma.core import MaskedArrayFutureWarning warnings.filterwarnings('ignore', category=MaskedArrayFutureWarning) line_table = table.Table.read(paths.apath('full_line_table.csv')) line_table.sort('Species') regions = (pyregion.open(paths.rpath("cores.reg"))) for region in regions: name = region.attr[1]['text'] spectral_files = glob.glob(paths.spath('{0}_spw[0123]_mean.fits'.format(name))) #background_spectral_files = glob.glob(paths.spath('{0}_spw[0123]_background_mean.fits'.format(name))) assert len(spectral_files) == 4#len(background_spectral_files) == 4 spectra = pyspeckit.Spectra(spectral_files) #bgspectra = pyspeckit.Spectra(background_spectral_files) stats = spectra.stats() err = stats['std'] # overly conservative guess for sp in spectra: sp.data -= np.nanpercentile(sp.data, 25) #med = stats['median'] #if med < 0: # med = 0 line_table.add_column(table.Column(name='{0}FittedAmplitude'.format(name), data=np.zeros(len(line_table)))) line_table.add_column(table.Column(name='{0}FittedCenter'.format(name), data=np.zeros(len(line_table)))) line_table.add_column(table.Column(name='{0}FittedWidth'.format(name), data=np.zeros(len(line_table)))) line_table.add_column(table.Column(name='{0}FittedAmplitudeError'.format(name), data=np.zeros(len(line_table)))) line_table.add_column(table.Column(name='{0}FittedCenterError'.format(name), data=np.zeros(len(line_table))))
# sp1.plotter() # sp1.specfit(fittype='formaldehyde_radex',multifit=True,guesses=[4,12,3.75,0.43],quiet=False) sp1.crop(options.vmin, options.vmax) sp1.smooth(options.smooth6cm) # match to GBT resolution sp2.crop(options.vmin, options.vmax) sp1.xarr.convert_to_unit('GHz') sp1.specfit() # determine errors sp1.error = np.ones(sp1.data.shape) * sp1.specfit.residuals.std() sp1.baseline(excludefit=True) sp2.xarr.convert_to_unit('GHz') sp2.specfit() # determine errors sp2.error = np.ones(sp2.data.shape) * sp2.specfit.residuals.std() sp2.baseline(excludefit=True) sp = pyspeckit.Spectra([sp1, sp2]) sp.Registry.add_fitter( 'formaldehyde_radex', formaldehyde_radex_fitter, 4, multisingle='multi', ) sp.Registry.add_fitter( 'formaldehyde_radex_sphere', formaldehyde_radex_fitter_sphere, 4, multisingle='multi', ) sp.plotter()
guesses = sp2.specfit.moments(fittype='gaussian')[1:] sp2.specfit(fittype='gaussian', guesses=guesses) sp2.baseline(exclude=[34,42]) sp2.plotter() sp2.plotter.savefig(savedir+'nh3_22_baselined.png') sp3 = pyspeckit.Spectrum('G032.751-00.071_nh3_33_Tastar.fits') sp3.crop(0,80) sp3.smooth(4) sp3.plotter() guesses = sp3.specfit.moments(fittype='gaussian')[1:] sp3.specfit(fittype='gaussian', guesses=guesses) sp3.baseline(exclude=[30,42]) sp3.plotter() sp3.plotter.savefig(savedir+'nh3_33_baselined.png') #sp4.crop(2.3868e10,2.3871e10) spectra = pyspeckit.Spectra([sp1,sp2,sp3]) sp = spectra sp.plotter() #sp.plotter(xmin=2.36875e10,xmax=2.36924e10) #sp.plotter(xmin=-100,xmax=300) #if interactive: raw_input("Plotter") from pylab import * draw() #if interactive: raw_input('wait for plotter') ammonia_model = pyspeckit.models.ammonia_model() published_model = pyspeckit.models.ammonia.ammonia(sp1.xarr,tkin=21.57,tex=0.24+2.73,width=1.11,xoff_v=37.88,tau=3.06) # set the baseline to zero to prevent variable-height fitting # (if you don't do this, the best fit to the spectrum is dominated by the
if 'wav2rgb' not in globals(): # this is to deal with python3 not being able to execfile import wav2rgb if not 'interactive' in globals(): interactive = False if not 'savedir' in globals(): savedir = '' speclist = pyspeckit.wrappers.load_IRAF_multispec( 'evega.0039.rs.ec.dispcor.fits') for spec in speclist: spec.unit = "Counts" SP = pyspeckit.Spectra(speclist) SPa = pyspeckit.Spectra(speclist, xunit='angstrom', quiet=False) SP.plotter(figure=figure(1)) SPa.plotter(figure=figure(2)) figure(3) clf() figure(4) clf() #clr = [list(clr) for clr in matplotlib.cm.brg(linspace(0,1,len(speclist)))] clr = [wav2rgb.wav2RGB(c) + [1.0] for c in linspace(380, 780, len(speclist))] for ii, (color, spec) in enumerate(zip(clr[::-1], speclist)): spec.plotter(figure=figure(3), clear=False,