def __init__(self, cube, wcs=None, mask=None, sigma=None, empty_channel=0, keep_threshold_mask=True, distance=None, galaxy_props={}): super(BubbleFinder, self).__init__() if not isinstance(cube, SpectralCube): if wcs is None: raise TypeError("When cube is not a SpectralCube, wcs must be" " given.") cube = SpectralCube(cube, wcs) if mask is not None: cube = cube.with_mask(mask) self.cube = cube self.empty_channel = empty_channel if sigma is None: self.estimate_sigma() else: self.sigma = sigma self.keep_threshold_mask = keep_threshold_mask self._mask = None self.distance = distance self.galaxy_props = galaxy_props
def line_flux2(catalog, line_name='13co10', asgn=datadir + 'COHRS_all_asgn.fits', cubefile=datadir + 'GRS_13CO_all.fits'): flux = Column(np.zeros(len(catalog)),name=line_name) asgn = SpectralCube.read(asgn) linefile = SpectralCube.read(cubefile) previous_file='' fill_data=None previous_cube_file='' for idx, obj in enumerate(catalog): if obj['orig_file'] != previous_cube_file: print "Pulling line subcube for {0}".format(obj['orig_file']) subx1 = obj['orig_file'].split('_')[2] subx2 = obj['orig_file'].split('_')[3] subcube = linefile[:, :, int(subx1):int(subx2)] fill_cube_data = (subcube.filled_data[:].value) previous_cube_file = obj['orig_file'] outtuple = sparse_mask(obj, asgn, previous_file=previous_file, fill_data=fill_data) previous_file, fill_data, zcld, ycld, xcld = outtuple if len(xcld)>0: flux[idx] = np.nansum(fill_cube_data[zcld, ycld, xcld]) catalog.add_column(flux) return catalog
def load_and_reduce(filename, add_noise=False, rms_noise=0.001, nsig=3): ''' Load the cube in and derive the property arrays. ''' if add_noise: if rms_noise is None: raise TypeError("Must specify value of rms noise.") cube, hdr = getdata(filename, header=True) from scipy.stats import norm cube += norm.rvs(0.0, rms_noise, cube.shape) sc = SpectralCube(data=cube, wcs=WCS(hdr)) mask = LazyMask(np.isfinite, sc) sc = sc.with_mask(mask) else: sc = filename reduc = Mask_and_Moments(sc, scale=rms_noise) reduc.make_mask(mask=reduc.cube > nsig * reduc.scale) reduc.make_moments() reduc.make_moment_errors() return reduc.to_dict()
def __init__(self, highres, lowres): super(MultiResObs, self).__init__() self.highres = SpectralCube.read(highres) self.lowres = SpectralCube.read(lowres) self.highres_convolved = None self.lowres_convolved = None self.lowbeam = self.lowres.beam self.highbeam = self.highres.beam self.combined_beam = self.lowbeam.convolve(self.highbeam)
def cubegen(ymin,ymax,xmin,xmax, deltaX=30): """Generates a subcube of the specified dimensions from the .fits files, for 12CO and 13CO. Returns the subcubes for 12CO and 13CO, respectively. Argument format: "(ymin,ymax, xmin,xmax.)" ^ These are the parameters of the desired subcubes.""" cube12 = SpectralCube.read("paws-30m-12co10-23as-cube.fits") cube13 = SpectralCube.read("paws-30m-13co10-23as-cube.fits") subcube12 = cube12[:,ymin:ymax,xmin:xmax] subcube13 = cube13[:,ymin:ymax,xmin:xmax] return subcube12,subcube13
def reduce_and_save(filename, add_noise=False, rms_noise=0.001, output_path="", cube_output=None, nsig=3, slicewise_noise=True): ''' Load the cube in and derive the property arrays. ''' if add_noise: if rms_noise is None: raise TypeError("Must specify value of rms noise.") cube, hdr = getdata(filename, header=True) # Optionally scale noise by 1/10th of the 98th percentile in the cube if rms_noise == 'scaled': rms_noise = 0.1*np.percentile(cube[np.isfinite(cube)], 98) from scipy.stats import norm if not slicewise_noise: cube += norm.rvs(0.0, rms_noise, cube.shape) else: spec_shape = cube.shape[0] slice_shape = cube.shape[1:] for i in range(spec_shape): cube[i, :, :] += norm.rvs(0.0, rms_noise, slice_shape) sc = SpectralCube(data=cube, wcs=WCS(hdr)) mask = LazyMask(np.isfinite, sc) sc = sc.with_mask(mask) else: sc = filename reduc = Mask_and_Moments(sc, scale=rms_noise) reduc.make_mask(mask=reduc.cube > nsig * reduc.scale) reduc.make_moments() reduc.make_moment_errors() # Remove .fits from filename save_name = filename.split("/")[-1][:-4] reduc.to_fits(output_path+save_name) # Save the noisy cube too if add_noise: if cube_output is None: reduc.cube.hdu.writeto(output_path+save_name) else: reduc.cube.hdu.writeto(cube_output+save_name)
def warp_ellipse_to_circle(cube, a, b, pa, stop_if_huge=True): ''' Warp a SpectralCube such that the given ellipse is a circle int the warped frame. Since you should **NOT** be doing this with a large cube, we're going to assume that the given cube is a subcube centered in the middle of the cube. This requires a rotation, then scaling. The equivalent matrix is: [b cos PA b sin PA] [-a sin PA a cos PA ]. ''' if cube._is_huge: if stop_if_huge: raise Warning("The cube has the huge flag enabled. Disable " "'stop_if_huge' if you would like to continue " "anyways with the warp.") else: warn("The cube has the huge flag enabled. This may use a lot " "of memory!") # Let NaNs be 0 data = cube.with_fill_value(0.0).filled_data[:].value warped_array = [] for i in range(cube.shape[0]): warped_array.append(nd.zoom(nd.rotate(data[i], np.rad2deg(-pa)), (1, a / b))) warped_array = np.array(warped_array) # We want to mask outside of the original bounds mask = np.ones(data.shape[1:]) warp_mask = \ np.isclose(nd.zoom(nd.rotate(mask, np.rad2deg(-pa)), (1, a / b)), 1) # There's probably a clever way to transform the WCS, but all the # solutions appear to need pyast/starlink. The output of the wrap should # give a radius of b and the spectral dimension is unaffected. # Also this is hidden and users won't be able to use this weird cube # directly warped_cube = SpectralCube(warped_array * cube.unit, cube.wcs) warped_cube = warped_cube.with_mask(warp_mask) return warped_cube
def test_qglue(): from spectral_cube import SpectralCube cube = SpectralCube.read(os.path.join(DATA, 'cube_3d.fits')) data = parse_data(cube, 'x')[0] assert data.label == 'x' data['STOKES I'] assert data.shape == (2, 3, 4)
def writeplanes(save_name='/mnt/work/erosolow/GRS_13CO_all.fits'): spatial_template = fits.open('INTEG/COHRS_RELEASE1_FULL_INTEG.fit') spectral_template = SpectralCube.read('reprojected.fits') # Smoosh astrometry components together spatial_header = spatial_template[0].header spectral_header = spectral_template.header new_header = spatial_header.copy() new_header["NAXIS"] = 3 for keyword in ['NAXIS3', 'CRVAL3', 'CDELT3','CRPIX3','CUNIT3']: new_header[keyword] = spectral_header[keyword] new_header['BMAJ'] = 14./3600 new_header['BMIN'] = 14./3600 new_header['BPA'] = 0.00 if os.path.exists(save_name): raise Exception("The file name {} already " "exists".format(save_name)) # Open a file and start filling this with planes. output_fits = fits.StreamingHDU(save_name, new_header) # Again, set up a common vel axis and spin out vel = np.linspace(-30, 160, 191) for v in vel: output_fits.write(fits.getdata(planesdir + 'GRSPLANE_{0}'.format(v) + '.fits')) output_fits.close()
def S2_drawM33(vmin=40,vmax=80, deltaX=40, deltaV=6, deltadeltaX=10, deltadeltaV=1): """Activates S2_draw with each of the .py file's subcube selections, with the same args as S2_arrayM33. Argument format: "(vmin=40,vmax=80, deltaX=40, deltaV=6, deltadeltaX=10, deltadeltaV=1). These MUST match the args/kwargs used in S2_arrayM33!""" galaxyname = 'M33' filename = 'm33.co21_iram_CLEANED' cube = SpectralCube.read(filename+".fits") pixelwidthDEG = cube.header['CDELT2'] # The width of each pixel, in degrees. distancePC = 840000.0 # The distance to the galaxy that M51's .fits file deals with, in parsecs. (???) Is this number accurate, though? pixelwidthPC = pixelwidthDEG*np.pi/180.0*distancePC # The width of each pixel, in pc. ymin = np.array([350,600,650,525,300,250]) # These are the minimum "y" values of the regions that we're dealing with. ymax = np.array([550,800,850,725,500,450]) # These are the corresponding maximum "y" values of these regions. xmin = np.array([500,100,400,288,200,550]) # These are the corresponding minimum "x" values of these regions. xmax = np.array([700,300,600,488,400,750]) # These are the corresponding maximum "x" values of these regions. (Example: The first region has ymin=350, ymax=550, xmin=500, xmax=700.) sets = np.ravel(ymin.shape)[0] # This is the number of regions that we're dealing with. for i in range(0,sets): S2_draw(vmin,vmax,ymin[i],ymax[i],xmin[i],xmax[i],deltaX,deltaV,deltadeltaX,deltadeltaV,filename,galaxyname)
def moments(cube_fits, line_values, line_names, moment, save_file=False): """ cube: str The datacube in fits format to open line_values: list of floats The wavelengths of the lines. !!! In general: if moment=0 the required wavelenth should be air, if moment=1 it should be vacuum !!! line_names: list of str The identifier of the lines moment: 0 or 1 save_file: bool, optional Set to True if the result is to be saved as a fits file. Default is False. example: moment = moments('cube.fits', [4861.33, 6562.8], ['Hb', 'Ha'], moment=0) """ print line_values, line_names cube=SpectralCube.read(cube_fits) for line,stri in zip(line_values,line_names): if moment==0: mom = cube.spectral_slab((line-3)*u.AA, (line+3)*u.AA).sum(axis=0) if save_file==True: mom.hdu.writeto(str(stri)+'_moment0.fits',clobber=True) if moment==1: mom = cube.with_spectral_unit(u.km/u.s, rest_value=line*u.AA,velocity_convention='optical').spectral_slab(-300*u.km/u.s,300*u.km/u.s).moment1() if save_file==True: mom.hdu.writeto(str(stri)+'_moment1.fits',clobber=True) return mom
def FirstLook_Cepheus(): print("Now NH3(1,1)") a_rms = [ 0, 135, 290, 405, 505, 665] b_rms = [ 70, 245, 350, 455, 625, 740] index_rms=first_look.create_index( a_rms, b_rms) index_peak=np.arange(350,410) file_in='Cepheus/Cepheus_NH3_11.fits' # 1st order polynomial file_out=file_in.replace('.fits','_base1.fits') file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1) first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak) print("Now NH3(2,2)") linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21'] vsys = -3.8*u.km/u.s throw = 2.0*u.km/u.s for line in linelist: file_in = 'Cepheus/Cepheus_{0}.fits'.format(line) s = SpectralCube.read(file_in) s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio') a_rms = [s.closest_spectral_channel(vsys+3*throw), s.closest_spectral_channel(vsys-throw)] b_rms = [s.closest_spectral_channel(vsys+throw), s.closest_spectral_channel(vsys-3*throw)] index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s), s.closest_spectral_channel(vsys-3*u.km/u.s)) index_rms=first_look.create_index( a_rms, b_rms) file_out=file_in.replace('.fits','_base1.fits') file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1) first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak)
def cube_w11(region='IC348'): if region == 'IC348': OneOneFile = 'IC348mm/IC348mm-11_cvel_clean_rob05.fits' TwoTwoFile = 'IC348mm/IC348mm-11_cvel_clean_rob05.fits' vmin=7.4 vmax=10.0 elif region == 'IRAS03282': OneOneFile = 'IRAS03282/IRAS03282-11_cvel_clean_rob05.fits' TwoTwoFile = 'IRAS03282/IRAS03282-11_cvel_clean_rob05.fits' vmin=6.0 vmax=8.5 elif region == 'L1451mm': OneOneFile = 'L1451mm/L1451MM-11_cvel_clean_rob05.fits' TwoTwoFile = 'L1451mm/L1451MM-11_cvel_clean_rob05.fits' vmin=3.2 vmax=4.9 cube = SpectralCube.read(OneOneFile) vcube = cube.with_spectral_unit(u.km/u.s, rest_value=freq11, velocity_convention='radio') slab = vcube.spectral_slab( vmax*u.km/u.s, vmin*u.km/u.s) w11=slab.moment( order=0, axis=0) #beam = Beam.from_fits_header(fits.getheader(OneOneFile)) # Next line is to solve bug in spectralcube: # it should be something like this in line 2234 of spectral_cube.py: # ``` # if axis == 0 and self._meta['beam'] is not None: # meta = { blabla, 'beam':self._meta['beam']} # else: # meta = { blabla} w11._meta['beam'] = slab.beam w11.write(OneOneFile.replace('.fits','_w11.fits'), overwrite=True)
def select_cloud(idxarray, cloudcat): for idx in idxarray: entry = cloudcat[idx] asgn = SpectralCube.read(cohrsdir+'FINALASGNS/'+ entry['orig_file']+ '_fasgn.fits') data = SpectralCube.read(cohrsdir+'DATA/'+ entry['orig_file']+ '.fits') mask = (asgn == entry['_idx'] * u.dimensionless_unscaled) cube = data.with_mask(mask) cube = cube.minimal_subcube() cube.write('cohrscld_{0}'.format(entry['_idx'])+'.fits', overwrite=True)
def write_skycoord_table(data, cube_ref, **kwargs): """ Writes out a text file with flattened coordinates of the cube stacked with input array data. Additional arguments are passed to astropy's text writing function. TODO: add a useful `names` keyword? See astropy.io.ascii.write docstring for more info. Parameters ---------- data : array-like structure of the same xy-grid as cube_ref. cube_ref : a cube file to get the coordinate grid from. """ from astropy.table import Table from astropy.io import ascii from spectral_cube import SpectralCube cube = SpectralCube.read(cube_ref) flat_coords = [cube.spatial_coordinate_map[i].flatten() for i in [1,0]] # TODO: finish this up for multiple components #n_repeat = np.prod(np.array(data).shape)%np.prod(cube.shape[1:])+1 table = Table(np.vstack(flat_coords + [np.array(xy_slice).flatten() for xy_slice in data]).T) ascii.write(table, **kwargs)
def subcubes_from_ds9(cube, region_file='../nro_maps/SouthShells.reg', pad_factor=1., shape='exact'): """ Extracts subcubes using the ds9 region file. Parameters ---------- cube : SpectralCube, str The cube to be chopped. Must be type spectral_cube.SpectralCube or str filename. region_file : str Path to a ds9 region file. pad_factor : float, optional Expand the subcube around the region by this factor. shape : {'square', 'exact'} The shape of the subcube returned. 'square' returns the smallest square subcube that contains the region. 'exact' returns only the pixels contained within the region. Returns ------- subcubes: list of SpectralCube of SpectralCube """ from spectral_cube import SpectralCube import pyregion try: #If cube is a str filename, read a SpectralCube. cube = SpectralCube.read(cube) except ValueError: pass if shape == 'square': import astropy.units as u subcube_list = [] region_list = pyregion.open(region_file) for region in region_list: half_width = region.coord_list[2] * pad_factor * u.deg ra_center = region.coord_list[0] * u.deg dec_center = region.coord_list[1] * u.deg ra_range = [ra_center - half_width, ra_center + half_width] dec_range = [dec_center - half_width, dec_center + half_width] #print(ra_range, dec_range) subcube_list.append(cube.subcube(ra_range[1], ra_range[0], dec_range[0], dec_range[1])) if shape == 'exact': region_list = pyregion.open(region_file) subcube_list = [] for region in region_list: if pad_factor != 1.: new_string = '{};{}({},{},{}")'.format(region.coord_format, region.name, region.coord_list[0], region.coord_list[1], region.coord_list[2]*3600.*pad_factor) region = pyregion.parse(new_string)[0] subcube_list.append(cube.subcube_from_ds9region(pyregion.ShapeList([region]))) if len(subcube_list) == 1: return subcube_list[0] else: return subcube_list
def summary_plot(filelist): for thisfile in filelist: s = SpectralCube.read(thisfile) outfile = thisfile.replace('.fits','_summary.png') mom0 = s.moment0() f = aplpy.FITSFigure(mom0.hdu) f.show_colorscale() f.show_colorbar() f.save(outfile)
def cubegen(vmin,vmax,ymin,ymax,xmin,xmax, filename = "paws_norot", drawmap = False, mapname="3Dcube"): """ Returns a subcube of the specified dimensions from the .fits file. Also displays the subcube as it appears on the galaxy map if drawmap=True. Parameters: ----------- vmin,...,xmax : int Parameters used in relevant xi map. WARNING: Selecting too large of a vmax-vmin will hugely increase processing time in later calculations. filename : str Name of the .paws data file. "paws_norot" for M51, "m33.co21_iram_CLEANED" for M33. drawmap : bool Enables or disables drawing the subcube Tmax map. galaxyname : str Name of the galaxy. 'M51' for M51, 'M33' for M33. mapname : str Name of the saved image of the subcube's Tmax map, if drawmap==True. Returns: ----------- subcube : spectral cube (?) The data inside the selected subcube. """ cube = SpectralCube.read(filename+".fits") data = cube.filled_data[:] # Pulls "cube"'s information (position, spectral info (?)) into a 3D Numpy array. yshape = data.shape[1]/2.0 xshape = data.shape[2]/2.0 pixelwidthDEG = cube.header['CDELT2'] # The width of each pixel, in degrees. if (filename =='m33.co21_iram_CLEANED') or (filename =='m33.co21_iram_CLEANED_smooth') or (filename =='m33.co21_iram_CLEANED_blank'): # Checks if the galaxy's Header file contains its distance. distancePC = 840000.0 # The distance to the galaxy that M33's .fits file deals with, in parsecs. ONLY works on the CLEANED file! else: distancePC = cube.header['DIST'] # The distance to the galaxy that M51's .fits file deals with, in parsecs. (???) Is this number accurate, though? pixelwidthPC = pixelwidthDEG*np.pi/180.0*distancePC # The width of each pixel, in pc. subcube = cube[vmin:vmax,ymin:ymax,xmin:xmax] if drawmap == True: plt.figure(1) plt.imshow(np.nanmax(data[vmin:vmax,ymin:ymax,xmin:xmax].value,axis=0), extent=[(xmin-xshape)*pixelwidthPC,(xmax-xshape)*pixelwidthPC, \ (ymin-yshape)*pixelwidthPC,(ymax-yshape)*pixelwidthPC], origin='lower') fig = matplotlib.pyplot.gcf() #fig.set_size_inches(5, 5) # Enlarges the image so as to prevent squishing. plt.xlabel('Distance from Centre in x-direction (pc)') plt.ylabel('Distance from Centre in y-direction (pc)') plt.savefig('galaxy_'+mapname+'.png') plt.clf() # Clears the image after saving. return subcube
def FirstLook_NGC1333(): print("Now NH3(1,1)") a_rms = [ 0, 158, 315, 428, 530, 693, 751] b_rms = [ 60, 230, 327, 438, 604, 735, 760] index_rms=first_look.create_index( a_rms, b_rms) index_peak=np.arange(326,430) file_in='NGC1333/NGC1333_NH3_11.fits' # 1st order polynomial file_out=file_in.replace('.fits','_base1.fits') file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1) first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak) print("Now NH3(2,2)") a_rms = [ 0, 190, 360, 600] b_rms = [70, 300, 470, 640] index_rms=first_look.create_index( a_rms, b_rms) index_peak=np.arange(380,520) # file_in='NGC1333/NGC1333_NH3_22.fits' # # 1st order polynomial # file_out=file_in.replace('.fits','_base1.fits') # file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1) # first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak) ## 2nd order polynomial #file_out=file_in.replace('.fits','_base2.fits') #file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=2) #first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak) # # print("Now NH3(3,3)") # a_rms = [ 10, 190, 420] # b_rms = [70, 360, 500] # index_rms=first_look.create_index( a_rms, b_rms) # index_peak=np.arange(410,540) # file_in='NGC1333/NGC1333_NH3_33.fits' # 1st order polynomial # file_out=file_in.replace('.fits','_base1.fits') # file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1) # first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak) linelist = ['NH3_22','NH3_33','C2S','HC5N','HC7N_21_20','HC7N_22_21'] vsys = 8.5*u.km/u.s throw = 8*u.km/u.s for line in linelist: file_in = 'NGC1333/NGC1333_{0}.fits'.format(line) s = SpectralCube.read(file_in) s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio') a_rms = [s.closest_spectral_channel(vsys+2*throw), s.closest_spectral_channel(vsys-throw)] b_rms = [s.closest_spectral_channel(vsys+throw), s.closest_spectral_channel(vsys-2*throw)] index_peak = np.arange(s.closest_spectral_channel(vsys+3*u.km/u.s), s.closest_spectral_channel(vsys-3*u.km/u.s)) index_rms=first_look.create_index( a_rms, b_rms) file_out=file_in.replace('.fits','_base1.fits') file_new=first_look.baseline( file_in, file_out, index_clean=index_rms, polyorder=1) first_look.peak_rms( file_new, index_rms=index_rms, index_peak=index_peak)
def dendropix(fileprefix='SgrB2_b3_12M.HC3N'): cube = SpectralCube.read(dpath('{0}.image.pbcor.contsub.fits'.format(fileprefix))).minimal_subcube() noise = cube.spectral_slab(-200*u.km/u.s, -100*u.km/u.s).std(axis=0) keep_mask = cube.max(axis=0) > noise tblfile = tpath('{0}.dendrotable.ecsv'.format(fileprefix)) if os.path.exists(tblfile): table = Table.read(tblfile, format='ascii.ecsv') else: table = Table([Column(name='xpix'), Column(name='ypix'), Column(name='zpix'), Column(name='lon'), Column(name='lat'), Column(name='velo'), Column(name='peakval'),]) xpyp_done = set(zip(table['xpix'], table['ypix'])) all_keepers = zip(*np.where(keep_mask)) xpyp = [x for x in all_keepers if x not in xpyp_done] print(len(xpyp), len(all_keepers), len(xpyp_done)) if len(xpyp_done) > 0: assert len(xpyp) < len(all_keepers) for ii,(ypix,xpix) in enumerate(ProgressBar(xpyp)): data = cube[:,ypix,xpix].value error = noise[ypix,xpix].value # alternative: #error = stats.sigma_clipped_stats(data)[2] D = astrodendro.Dendrogram.compute(data, min_value=0, min_delta=2*error, min_npix=7, is_independent=astrodendro.pruning.min_peak(5*error)) if not D.leaves: table.add_row([xpix,ypix,]+[np.nan]*5) del D continue #peaks = [S.get_peak()[0][0] for S in D] #peak_vals = [S.get_peak()[1] for S in D] #peaks = [cube.spectral_axis[S.get_peak()[0][0]].to(u.km/u.s).value for S in D] for S in D: (peak_pix,),peak_val = S.get_peak() velo,lat,lon = cube.world[peak_pix, ypix, xpix] table.add_row([xpix,ypix,peak_pix,lon,lat,velo,peak_val]) if ii % 100 == 0: table.write(tblfile, format='ascii.ecsv') del D del S del data
def __init__(self, cube, scale=None, spatial_norm=None, spectral_norm=None, beam=None, method="MAD"): """ Construct a new Noise object. Parameters ---------- method : {'MAD','STD'} Chooses method for estimating noise variance either 'MAD' for median absolute deviation and 'STD' for standard deviation. Default: 'MAD' """ if isinstance(cube,SpectralCube): self.cube = cube elif isinstance(cube, str): self.cube = SpectralCube.read(cube) else: warnings.warn("Noise currently requires a SpectralCube instance.") self.spatial_footprint = np.any(self.cube.get_mask_array(), axis=0) if beam is None: try: self.beam = cube.beam except AttributeError: warnings.warn("cube object has no associated beam. All beam " "operations are disabled.") self.beam = None self.astropy_beam_flag = False else: if isinstance(beam, Beam): self.astropy_beam_flag = False elif isinstance(beam, Kernel2D): self.astropy_beam_flag = True else: warnings.warn("beam must be a radio_beam Beam object or an " "astropy Kernel2D object. All beam operations " "are disabled.") self.beam = beam # Default to a normal distribution self.distribution = ss.norm # SUGGESTION: calculate on initialization? # Fit the data if scale is None: self.calculate_scale(method=method) # [1] is the std. of a Gaussian self.spatial_norm = np.ones((self.cube.shape[1], self.cube.shape[2])) self.spectral_norm = np.ones((self.cube.shape[0])) # Compute the scale_cube self.get_scale_cube()
def plot_overview(cube='../nro_maps/12CO_20161002_FOREST-BEARS_spheroidal_xyb_grid7.5_0.099kms.fits', region_file='../nro_maps/SouthShells.reg', mode='peak', plotname='12co_peak_shells.png', interactive=False, show_shells=False): """ Show full image with all shells. Parameters ---------- cube : str, optional Description region_file : str, optional Description mode : str, optional Description plotname : str, optional Description interactive : bool, optional Description show_shells : bool, optional Description """ try: cube = SpectralCube.read(cube) except ValueError: pass if mode == "peak": image = cube.max(axis=0) fig = plt.figure() wcs = WCS(image.header) ax = WCSAxes(fig, [0.1,0.1,0.8,0.8], wcs=wcs) fig.add_axes(ax) imgplot = plt.imshow(image.data, cmap=cm.gray, origin='lower', interpolation='none', vmin=0., vmax=100) cb = plt.colorbar() cb.set_label(r'K [T$_{MB}$]') plt.title(r"$^{12}$CO Peak") if show_shells: r = pyregion.open(region_file).as_imagecoord(image.header) patch_list, artist_list = r.get_mpl_patches_texts() for p in patch_list: ax.add_patch(p) for t in artist_list: ax.add_artist(t) pass if interactive: plt.show() else: plt.savefig(plotname)
def line_flux(catalog, asgn=datadir + 'COHRS_all_asgn.fits'): thco10 = Column(np.zeros(len(catalog)), name='13co10') thco32 = Column(np.zeros(len(catalog)), name='13co32') c18o32 = Column(np.zeros(len(catalog)), name='c18o32') twco32 = Column(np.zeros(len(catalog)), name='12co32') asgn = SpectralCube.read(asgn) previous_file='' fill_data=None previous_file='' for idx, obj in enumerate(ProgressBar(catalog)): outtuple = sparse_mask(obj, asgn, previous_file=previous_file, fill_data=fill_data) if obj['orig_file'] != previous_file: print "Pulling img tiles for {0}".format(obj['orig_file']) subx1 = obj['orig_file'].split('_')[2] subx2 = obj['orig_file'].split('_')[3] co32cube = (SpectralCube.read( './COHRS_tiles/COHRS_{0}_{1}.fits'.format( subx1, subx2))).filled_data[:].value thco32cube = (SpectralCube.read( './CHIPS_13CO_tiles/CHIMPS_13CO_{0}_{1}.fits'.format( subx1, subx2))).filled_data[:].value c18o32cube = (SpectralCube.read( './CHIPS_C18O_tiles/CHIMPS_C18O_{0}_{1}.fits'.format( subx1, subx2))).filled_data[:].value grscube = (SpectralCube.read( './GRS_tiles/GRS_13CO_{0}_{1}.fits'.format( subx1, subx2))).filled_data[:].value previous_file, fill_data, z, y, x = outtuple thco10[idx] = np.nansum(grscube[z, y, x]) thco32[idx] = np.nansum(thco32cube[z, y, x]) c18o32[idx] = np.nansum(c18o32cube[z, y, x]) twco32[idx] = np.nansum(co32cube[z, y, x]) catalog.add_columns([thco10, thco32, c18o32, twco32]) return catalog
def to_spectral_cube(data, header): ''' Convert the output from input_data into a SpectralCube. ''' if not HAS_SC: raise ValueError("spectral-cube needs to be installed.") hdu = fits.PrimaryHDU(data, header) return SpectralCube.read(hdu)
def cubegen(ymin,ymax,xmin,xmax, deltaX=30): """Generates a subcube of the specified dimensions from the specified .fits file. Argument format: "(ymin,ymax, xmin,xmax.)" ^ These are the parameters of the desired subcube.""" cube = SpectralCube.read("paws-30m-12co10-23as-cube.fits") subcube = cube[:,ymin:ymax,xmin:xmax] return subcube
def load_and_reduce(filename, add_noise=False, rms_noise=0.001, nsig=3, slicewise_noise=True): ''' Load the cube in and derive the property arrays. ''' if add_noise: if rms_noise is None: raise TypeError("Must specify value of rms noise.") cube, hdr = getdata(filename, header=True) # Optionally scale noise by 1/10th of the 98th percentile in the cube if rms_noise == 'scaled': rms_noise = 0.1*np.percentile(cube[np.isfinite(cube)], 98) from scipy.stats import norm if not slicewise_noise: cube += norm.rvs(0.0, rms_noise, cube.shape) else: spec_shape = cube.shape[0] slice_shape = cube.shape[1:] for i in range(spec_shape): cube[i, :, :] += norm.rvs(0.0, rms_noise, slice_shape) sc = SpectralCube(data=cube, wcs=WCS(hdr)) mask = LazyMask(np.isfinite, sc) sc = sc.with_mask(mask) else: sc = filename reduc = Mask_and_Moments(sc, scale=rms_noise) reduc.make_mask(mask=reduc.cube > nsig * reduc.scale) reduc.make_moments() reduc.make_moment_errors() return reduc.to_dict()
def __init__(self, cube, beam=None, mask=None, method="MAD", compute=True): # Initialize cube object self.cube = SpectralCube.read(cube) if mask is not None: _check_mask(mask) self.mask = mask if beam is not None: _check_beam(mask) # Initialize noise object self.noise = Noise(self.cube, beam=beam, method=method)
def binning(f_nam, bin_width=500, thisbin=0): """A function creating brightness bins of pixels, and eventualy a map, in the given spectral cube""" cube = SpectralCube.read(f_nam) cube = cube.with_spectral_unit(u.km/u.s,velocity_convention='radio') Tmax = cube.apply_numpy_function(np.nanmax,axis=0) # array of the maximum values in the spectra of each pixel baddata = nd.morphology.binary_dilation(np.isnan(Tmax),np.ones((25,25))) Tmax[baddata]=0.0 Tmax[np.isfinite(Tmax)] bin_arr = np.sort(Tmax[np.isfinite(Tmax)]) bin_arr2 = bin_arr[:: - bin_width] # this creates an array of the bin margins, in which every bin has a width of "bin_width" np.digitize(Tmax,bin_arr2) bins = np.digitize(Tmax,bin_arr2) y, x = np.where(bins==thisbin) return y, x
def SampleWithConvolution(file, positions, beam=defaultBeam, order=1, **kwargs): s = SpectralCube.read(file) spaxis = s.spectral_axis.value spaxis.shape += (1,) spaxis_ones = np.ones_like(spaxis) s2 = s.convolve_to(beam) ravals = spaxis_ones * positions.ra.value decvals = spaxis_ones * positions.dec.value vvals = spaxis * np.ones_like(positions.ra.value) x, y, v = s.wcs.all_world2pix(ravals, decvals, vvals, 0) output = map_coordinates(s2.filled_data[:], [v, y, x], order=order, **kwargs) # import pdb; pdb.set_trace() return output
def deblend_cube(region='OrionA',vmin=0.,vmax=20.): tex = fits.getdata('{0}/parameterMaps/{0}_Tex_DR1_rebase3_flag.fits'.format(region)) mom0 = fits.getdata('{0}/{0}_NH3_11_DR1_rebase3_mom0_QA_trim.fits'.format(region)) vlsr = fits.getdata('{0}/parameterMaps/{0}_Vlsr_DR1_rebase3_flag.fits'.format(region)) sigv = fits.getdata('{0}/parameterMaps/{0}_Sigma_DR1_rebase3_flag.fits'.format(region)) nnh3 = fits.getdata('{0}/parameterMaps/{0}_N_NH3_DR1_rebase3_flag.fits'.format(region)) cube = SpectralCube.read('{0}/{0}_NH3_11_DR1_rebase3_trim.fits'.format(region)) cube = cube.with_spectral_unit(u.km/u.s,velocity_convention='radio') tpeak = mom0/(np.sqrt(2*np.pi)*sigv) vlsr[vlsr==0]=np.nan sigv[sigv==0]=np.nan deblend = np.zeros(cube.shape) hdr = cube.wcs.to_header() spaxis = cube.spectral_axis.value for plane in np.arange(deblend.shape[0]): deblend[plane,:,:] = tpeak*np.exp(-(spaxis[plane]-vlsr)**2/(2*sigv**2)) newcube = SpectralCube(deblend,cube.wcs,header=hdr) slab = newcube.spectral_slab(vmin*u.km/u.s,vmax*u.km/u.s) slab.write('{0}/{0}_singlecomp.fits'.format(region),overwrite=True)
] cubes = {} mx = {} m1 = {} for region in ("N", "M"): for spw in (0, 1, 2, 3): for band in (3, 6): #for line in freqs: for line in maser_lines: fn = 'full_SgrB2{0}_spw{1}_lines_cutout{0}_medsub.fits'.format( region, spw) fn = 'sgr_b2m.{0}.spw{1}.B{2}.lines.clarkclean1000.robust0.5.image.pbcor.medsub.fits'.format( region, spw, band) cube = SpectralCube.read(paths.eFpath(fn)) freq = freqs[line] if cube.spectral_extrema[0] < freq < cube.spectral_extrema[1]: subcube = (cube.with_spectral_unit( u.km / u.s, velocity_convention='radio', rest_value=freq).spectral_slab(55 * u.km / u.s, 110 * u.km / u.s)) try: subcube = subcube.to(u.K) except Exception as ex: print("Failed: {0}".format(ex)) if 'OBJECT' in subcube.meta: subcube.meta['OBJECT'] = subcube.meta['OBJECT'] + line else: subcube.meta['OBJECT'] = "{0}_{1}".format(region, line)
# Save table of parameters co_param_df = DataFrame(co_fit_vals, index=parnames_hwhm) co_param_df.to_latex(alltables_path("co_gaussian_totalprof_fits_hwhm_38arcsec.tex")) co_param_df.to_csv(iram_co21_14B088_data_path("smooth_2beam/tables/co_gaussian_totalprof_fits_hwhm_38arcsec.csv", no_check=True)) hi_param_df = DataFrame(hi_fit_vals, index=parnames_hwhm) hi_param_df.to_latex(alltables_path("hi_gaussian_totalprof_fits_hwhm_38arcsec.tex")) hi_param_df.to_csv(fourteenB_HI_data_wGBT_path("smooth_2beam/tables/hi_gaussian_totalprof_fits_hwhm_38arcsec.csv", no_check=True)) # Same for the radial bins. # Load in radial profiles total_spectrum_hi_radial_cent = SpectralCube.read(hi_stackpath("centroid_stacked_radial_{}.fits".format(wstring))) total_spectrum_hi_radial_peakvel = SpectralCube.read(hi_stackpath("peakvel_stacked_radial_{}.fits".format(wstring))) total_spectrum_co_radial_cent = SpectralCube.read(co_stackpath("centroid_stacked_radial_{}.fits".format(wstring))) total_spectrum_co_radial_peakvel = SpectralCube.read(co_stackpath("peakvel_stacked_radial_{}.fits".format(wstring))) labels = ["centsub", "peaksub"] # Make the bin edges # Convert these into kpc inneredge = np.arange(0, max_radius.value, dr.value) / 1000. outeredge = (inneredge + dr.to(u.kpc).value) # How do the model parameters change with radius?
import glob import radio_beam import regions fname = '/ufrc/adamginsburg/d.jeff/imaging_results/SgrB2DS_field1_spw2_cube_medsub.image.fits' #glob.glob('/ufrc/adamginsburg/d.jeff/imaging_results/*.fits') z = 0.0002333587 #chem= input('Molecule?: ') #chem=(' '+chem+' ') linelist = input( 'Linelist? (Lovas, SLAIM, JPL, CDMS, ToyoMA, OSU, Recomb, Lisa, RFI): ') speciesdata = {} imgnames = ['spw2'] #'/ufrc/adamginsburg/d.jeff/imaging_results/SgrB2DS_field1_spw1_cube_medsub.image.fits'#'/ufrc/adamginsburg/d.jeff/imaging_results/SgrB2DS_field1_spw0_cube.image.fits' cube = sc.read(fname) header = fits.getheader(fname) freqs = cube.spectral_axis freq_max = freqs[0] * (1 + z) #215*u.GHz freq_min = freqs[(len(freqs) - 1)] * (1 + z) #235*u.GHz linewidth = 0.5 * 0.0097 * u.GHz '''Generate methanol table for contaminant search''' methanol_table = utils.minimize_table( Splatalogue.query_lines(freq_min, freq_max, chemical_name=' CH3OH ', energy_max=1840, energy_type='eu_k', line_lists=[linelist],
return wcs, data_masked def cut_2d(data, position, size, wcs): cut = Cutout2D(data=data, position=position, size=size, wcs=wcs) data_cut = cut.data wcs_cut = cut.wcs return wcs_cut, data_cut ############################################################ ### 12CO 1-0 cube processing. ### name = imageDir + '12CO10/NGC5258_12CO10_combine_contsub_uvrange_pbcor.fits' imagecube = SpectralCube.read(name) common_beam = imagecube.beams.common_beam(tolerance=1e-5) # Imcube = imagecube.convolve_to(common_beam) smooth_beam = radio_beam.Beam(major=2.186 * u.arcsec, minor=1.896 * u.arcsec, pa=-87.6 * u.deg) Imcube = imagecube.convolve_to(smooth_beam) ## create rms cube rmscube = cube.calc_noise_in_cube(Imcube, spatial_average_nbeam=1.0, spectral_average_nchan=2) ## find the signal of the cube. outcube = cube.find_signal_in_cube(Imcube, rmscube,
j += 1 xi = i xf = csize subcubes.append('mwisp_' + str(j).zfill(2) + '_' + str(xi) + '_' + str(xf)) np.save(path + 'SUBCUBES/subfiles.npy', subcubes) #%&%&%&%&%&%&%&%&%&%&%&%&%&% # Slicing #%&%&%&%&%&%&%&%&%&%&%&%&%&% if do_slicing: fcube = SpectralCube.read(mpath + full_cube_name + '.fits') subfiles = np.load(path + 'SUBCUBES/subfiles.npy') print("Slicing...") #for j,subfile in enumerate(subfiles): for j in range(len(subfiles)): subfile = subfiles[j] print(subfile) xi = int(subfile.split('_')[2]) xf = int(subfile.split('_')[3]) scube = fcube[:, :, xi:xf]
def load_casa_image(filename, skipdata=False, skipvalid=False, skipcs=False, **kwargs): """ Load a cube (into memory?) from a CASA image. By default it will transpose the cube into a 'python' order and drop degenerate axes. These options can be suppressed. The object holds the coordsys object from the image in memory. """ try: from taskinit import ia except ImportError: raise ImportError( "Could not import CASA (casac) and therefore cannot read CASA .image files" ) # use the ia tool to get the file contents ia.open(filename) # read in the data if not skipdata: data = ia.getchunk() # CASA stores validity of data as a mask if not skipvalid: valid = ia.getchunk(getmask=True) # transpose is dealt with within the cube object # read in coordinate system object casa_cs = ia.coordsys() wcs = wcs_casa2astropy(casa_cs) # don't need this yet # stokes = get_casa_axis(temp_cs, wanttype="Stokes", skipdeg=False,) # if stokes == None: # order = np.arange(self.data.ndim) # else: # order = [] # for ax in np.arange(self.data.ndim+1): # if ax == stokes: # continue # order.append(ax) # self.casa_cs = ia.coordsys(order) # This should work, but coordsys.reorder() has a bug # on the error checking. JIRA filed. Until then the # axes will be reversed from the original. # if transpose == True: # new_order = np.arange(self.data.ndim) # new_order = new_order[-1*np.arange(self.data.ndim)-1] # print new_order # self.casa_cs.reorder(new_order) # close the ia tool ia.close() meta = {'filename': filename} mask = BooleanArrayMask(wcs, np.logical_not(valid)) cube = SpectralCube(data, wcs, mask, meta=meta) return cube
from __future__ import print_function # TODO: put this in a package execfile("/Users/adam/repos/w51evlareductionscripts/singledish_combine/singledish_combine.py") from spectral_cube import SpectralCube pairs = [ ('CH3CN', 'CH3CN54_hires_lmv.fits', 'SgrB2_a_03_7M.CH3CN_5-4_3.image.pbcor.fits', 91.98705*1e9), #91971465000.0), ('H41a', 'H41a_lmv.fits', 'SgrB2_a_03_7M.H41a.image.pbcor.fits', 92034434000.0), ] for species,iram,aca,restfrq in pairs: print(species) iramcube = SpectralCube.read(iram) acacube = SpectralCube.read(aca) mc = iramcube.with_spectral_unit(u.km/u.s, rest_value=restfrq*u.Hz, velocity_convention='radio') acac = acacube.with_spectral_unit(u.km/u.s, rest_value=restfrq*u.Hz, velocity_convention='radio') h1 = acacube.header jyk = (1*u.Jy).to(u.K, u.brightness_temperature(h1['BMAJ']*u.deg * h1['BMIN']*u.deg * 2 * np.pi, h1['RESTFRQ']*u.Hz)) combcube,f2 = fourier_combine_cubes(acac, mc, highresscalefactor=jyk.value, lowresfwhm=0.30*u.arcmin, return_regridded_cube2=True) f2.writeto("Regridded_"+iram, clobber=True) f1 = fits.open(aca)
from astropy.io import fits from spectral_cube import SpectralCube from astropy import coordinates, units as u centerN = coordinates.SkyCoord('17:47:19.877', '-28:22:18.39', frame='fk5', unit=(u.hour, u.deg)) centerM = coordinates.SkyCoord('17:47:20.166', '-28:23:04.68', frame='fk5', unit=(u.hour, u.deg)) for spw in (0, 1, 3, 2): print("Extracting spw{0} M".format(spw)) cube = SpectralCube.read('full_SgrB2M_spw{0}_lines.fits'.format(spw)) center_M = centerM.transform_to(getattr(coordinates, cube.wcs.wcs.radesys)) cx, cy = map( int, cube.wcs.celestial.wcs_world2pix(centerM.ra.deg, centerM.dec.deg, 0)) cutout_M = cube[:, cy - 250:cy + 250, cx - 250:cx + 250] cutout_M.write("full_SgrB2M_spw{0}_lines_cutoutM.fits".format(spw), overwrite=True) print("Extracting spw{0} N".format(spw)) cube = SpectralCube.read('full_SgrB2N_spw{0}_lines.fits'.format(spw)) center_N = centerN.transform_to(getattr(coordinates, cube.wcs.wcs.radesys)) cx, cy = map( int, cube.wcs.celestial.wcs_world2pix(centerN.ra.deg, centerN.dec.deg, 0)) cutout_N = cube[:, cy - 300:cy + 300, cx - 300:cx + 300]
0.04, #0.05, 0.06, #0.08, 0.10, #0.15, 0.20, #0.30, 0.40, ], colors=['w'] * 12, layer='alma_cont_hires') F.save(paths.fpath("NACO_green_outflows_aplpy_CONTours_hires.png")) F.save(paths.fpath("NACO_green_outflows_aplpy_CONTours_hires.pdf")) h77a = SpectralCube.read(paths.vpath('data/W51north_H77_Outflow_cutout.fits')) h77a_outflow = h77a.spectral_slab(-16 * u.km / u.s, -60 * u.km / u.s).sum(axis=0) h77a_green = paths.dpath('W51_H77a_LacyJetOutflow_Sum.fits') h77a_outflow.hdu.writeto(h77a_green, clobber=True) F.show_contour(h77a_green, levels=[0.0075, 0.015, 0.030], colors=['b'] * 6, layer='h77a_outflow') F.save(paths.fpath("NACO_green_outflows_aplpy_CONTours_hires_h77acontour.png")) F.save(paths.fpath("NACO_green_outflows_aplpy_CONTours_hires_h77acontour.pdf")) F.hide_layer('h77a_outflow') F.hide_layer('alma_cont_hires')
from astropy import units as u import paths from constants import distance from astropy import convolution import radio_beam #p303 = paths.dpath('w51_H2CO_303_202_contsub.image.pbcor.fits') #p321 = paths.dpath('w51_H2CO_321_220_contsub.image.pbcor.fits') p303 = paths.dpath('merge/W51_b6_7M_12M.H2CO303_202.regrid_medsub.fits') p321 = paths.dpath('merge/W51_b6_7M_12M.H2CO321_220.regrid_medsub.fits') p322 = paths.dpath('merge/W51_b6_7M_12M.H2CO322_221.regrid_medsub.fits') beam_size_goal = 0.4*u.arcsec # change to 0.7 if using natural if os.path.exists(p303) and os.path.exists(p321) and os.path.exists(p322): cube303 = SpectralCube.read(p303) cube321 = SpectralCube.read(p321) cube322 = SpectralCube.read(p322) else: p303_ = paths.dpath('merge/W51_b6_7M_12M.H2CO303_202.image.pbcor.fits') p321_ = paths.dpath('merge/W51_b6_7M_12M.H2CO321_220.image.pbcor.fits') p322_ = paths.dpath('merge/W51_b6_7M_12M.H2CO322_221.image.pbcor.fits') cube303 = SpectralCube.read(p303_).with_spectral_unit(u.km/u.s, velocity_convention='radio').spectral_slab(25*u.km/u.s, 90*u.km/u.s) min_slices = cube303.subcube_slices_from_mask(cube303.mask, spatial_only=True) cube321 = SpectralCube.read(p321_).with_spectral_unit(u.km/u.s, velocity_convention='radio').spectral_slab(25*u.km/u.s, 90*u.km/u.s) cube322 = SpectralCube.read(p322_).with_spectral_unit(u.km/u.s, velocity_convention='radio').spectral_slab(25*u.km/u.s, 90*u.km/u.s)
): for field in "G353.41 G008.67 G337.92 W51-E W43-MM3 G328.25 G351.77 W43-MM1 G010.62 W51-IRS2 G012.80 G333.60 W43-MM2 G327.29 G338.93".split( ): for spw in (0, 1, 2, 3, 4, 5, 6, 7): filename = ftemplate.format( spw=spw, field=field, band=band, array=array, suffix=suffix, robust=robust, ) if os.path.exists(filename): cube = SpectralCube.read(filename, use_dask=True) else: log.exception("File {0} does not exist".format(filename)) if os.path.exists(filename[:-5]): log.exception("But {0} does!!!!".format(filename[:-5])) continue for operation in ('mean', 'max', 'median'): out_fn = f'spectra/{field}_{array}_{band}_spw{spw}_robust{robust}{suffix}.{operation}spec.fits' if overwrite or not os.path.exists(out_fn): spec = getattr(cube, operation)(axis=(1, 2)) #spec = cube.apply_numpy_function(getattr(np, 'nan'+operation), # axis=(1,2), # progressbar=True, # projection=True, # how='slice',
def _parse_hdu(app, hdulist, file_name=None): if file_name is None: if hasattr(hdulist, 'file_name'): file_name = hdulist.file_name file_name = file_name or "Unknown HDU object" # WCS may only exist in a single extension (in this case, just the flux # flux extension), so first find and store then wcs information. wcs = None for hdu in hdulist: if hdu.data is None or not hdu.is_image: continue try: sc = SpectralCube.read(hdu, format='fits') except (ValueError, FITSReadError): continue else: wcs = sc.wcs # Now loop through and attempt to parse the fits extensions as spectral # cube object. If the wcs fails to parse in any case, use the wcs # information we scraped above. for hdu in hdulist: data_label = f"{file_name}[{hdu.name}]" if hdu.data is None or not hdu.is_image: continue # This is supposed to fail on attempting to load anything that # isn't cube-shaped. But it's not terribly reliable try: sc = SpectralCube.read(hdu, format='fits') except (ValueError, OSError): # This will fail if the parsing of the wcs does not provide # proper celestial axes try: hdu.header.update(wcs.to_header()) sc = SpectralCube.read(hdu) except (ValueError, AttributeError) as e: logging.warn(e) continue except FITSReadError as e: logging.warn(e) continue app.add_data(sc, data_label) # If the data type is some kind of integer, assume it's the mask/dq if hdu.data.dtype in (np.int, np.uint, np.uint32) or \ any(x in hdu.name.lower() for x in EXT_TYPES['mask']): app.add_data_to_viewer('mask-viewer', data_label) if 'errtype' in [x.lower() for x in hdu.header.keys()] or \ any(x in hdu.name.lower() for x in EXT_TYPES['uncert']): app.add_data_to_viewer('uncert-viewer', data_label) if any(x in hdu.name.lower() for x in EXT_TYPES['flux']): app.add_data_to_viewer('flux-viewer', data_label) app.add_data_to_viewer('spectrum-viewer', data_label)
def flux_hist(finaliter_prefix_b3, finaliter_prefix_b6, basepath='/home/adam/work/alma-imf/reduction/', las=None): image_b3 = SpectralCube.read(f'{finaliter_prefix_b3}.image.tt0.fits', use_dask=False, format='fits').minimal_subcube() image_b6 = SpectralCube.read(f'{finaliter_prefix_b6}.image.tt0.fits', use_dask=False, format='fits').minimal_subcube() image_b3 = image_b3 * u.beam / image_b3.beam.sr image_b6 = image_b6 * u.beam / image_b6.beam.sr fieldname = os.path.basename(finaliter_prefix_b6).split("_")[0] if las: smb3 = image_b3[0].convolve_to(radio_beam.Beam(las), allow_huge=True) image_b3 = image_b3 - smb3 smb6 = image_b6[0].convolve_to(radio_beam.Beam(las), allow_huge=True) image_b6 = image_b6 - smb6 noise_region_b3 = regions.read_ds9( f"{basepath}/reduction/noise_estimation_regions/{fieldname}_B3_noise_sampling.reg" ) noise_region_b6 = regions.read_ds9( f"{basepath}/reduction/noise_estimation_regions/{fieldname}_B6_noise_sampling.reg" ) noiseim_b3 = image_b3.subcube_from_regions(noise_region_b3)[0] noiseim_b6 = image_b6.subcube_from_regions(noise_region_b6)[0] b3_std = stats.mad_std(noiseim_b3, ignore_nan=True) b6_std = stats.mad_std(noiseim_b6, ignore_nan=True) print(fieldname, b3_std, b6_std) fig = pl.figure(2, figsize=(12, 7)) fig.clf() ax = pl.subplot(1, 2, 1) b3data = image_b3[0].value bins_b3 = np.linspace(np.nanmin(b3data), np.nanmax(b3data), 100) bins_b3b = np.linspace(np.nanmin(b3data), np.nanmax(b3data), 10000) H, L, P = ax.hist(b3data[np.isfinite(b3data)], bins=bins_b3, density=False) #ax.hist(noiseim_b3.value.ravel(), bins=bins_b3) ax.set_yscale('log') ax.set_ylim(0.5, ax.get_ylim()[1]) ax.plot(bins_b3b, H.max() * np.exp(-bins_b3b**2 / (2 * b3_std.value**2)), 'k') ax.set_xlabel("$S_{3mm}$ [Jy/sr]") ax.set_ylabel("Number of Pixels") axin = fig.add_axes([0.25, 0.6, 0.20, 0.25]) bins = np.linspace(-5 * b3_std.value, 5 * b3_std.value, 100) H, L, P = axin.hist(b3data[(b3data < 5 * b3_std.value) & (b3data > -5 * b3_std.value)], bins=bins, density=False) #axin.hist(noiseim_b3.value.ravel(), bins=bins) gauss = H.max() * np.exp(-bins**2 / (2 * b3_std.value**2)) axin.plot(bins, gauss, 'k') axin.set_xticklabels([]) axin.set_yticks(axin.get_yticks()[1:]) axin2 = fig.add_axes([0.25, 0.5, 0.2, 0.1]) loc = (L[1:] + L[:-1]) / 2 axin2.plot(loc, H - H.max() * np.exp(-loc**2 / (2 * b3_std.value**2)), drawstyle='steps', color='k') axin2.set_xlim(axin.get_xlim()) ax = pl.subplot(1, 2, 2) b6data = image_b6[0].value bins_b6 = np.linspace(np.nanmin(b6data), np.nanmax(b6data), 100) bins_b6b = np.linspace(np.nanmin(b6data), np.nanmax(b6data), 10000) H, L, P = ax.hist(b6data[np.isfinite(b6data)], bins=bins_b6, density=False) ax.plot(bins_b6b, H.max() * np.exp(-bins_b6b**2 / (2 * b6_std.value**2)), 'k') #ax.hist(noiseim_b6.value.ravel(), bins=bins_b6) ax.set_ylim(0.5, ax.get_ylim()[1]) ax.set_yscale('log') ax.yaxis.set_label_position("right") ax.yaxis.tick_right() ax.set_xlabel("$S_{1mm}$ [Jy/sr]") ax.set_ylabel("Number of Pixels") axin = fig.add_axes([0.65, 0.6, 0.20, 0.25]) bins = np.linspace(-5 * b6_std.value, 5 * b6_std.value, 100) H, L, P = axin.hist(b6data[(b6data < 5 * b6_std.value) & (b6data > -5 * b6_std.value)], bins=bins, density=False) #axin.hist(noiseim_b6.value.ravel(), bins=bins) axin.plot(bins, H.max() * np.exp(-bins**2 / (2 * b6_std.value**2)), 'k') axin.set_xticklabels([]) axin.set_yticks(axin.get_yticks()[1:]) axin2 = fig.add_axes([0.65, 0.5, 0.2, 0.1]) loc = (L[1:] + L[:-1]) / 2 axin2.plot(loc, H - H.max() * np.exp(-loc**2 / (2 * b6_std.value**2)), drawstyle='steps', color='k') axin2.set_xlim(axin.get_xlim())
def subtract_components(cube_name, remove_params_name, output_name, chunk_size=20000): ''' Subtract Gaussian components given in remove_params_name ''' with fits.open(remove_params_name) as params_hdu: params_array = params_hdu[0].data # Create huge fits ncomp_array = np.isfinite(params_array).sum(0) // 3 cube = SpectralCube.read(cube_name) assert cube.shape[1:] == params_array.shape[1:] # yposn, xposn = np.where(ncomp_array > 0) yposn, xposn = np.indices(cube.shape[1:]) vels = cube.spectral_axis.to(u.m / u.s) vels_val = vels.value yshape, xshape = ncomp_array.shape basename = os.path.basename(cube_name) # Create the output cube. from cube_analysis.io_utils import create_huge_fits create_huge_fits(output_name, cube.header, fill_nan=True) del cube # evaluate and subtract all components hdu = fits.open(output_name, mode='update') cube_hdu = fits.open(cube_name, mode='denywrite') for i, (y, x) in tqdm(enumerate(zip(yposn.ravel(), xposn.ravel())), ascii=True, desc=f"Model eval. for: {basename[:15]}", total=yposn.size): # Reload cube to release memory if i % chunk_size == 0: hdu.flush() hdu.close() del hdu hdu = fits.open(output_name, mode='update') cube_hdu.close() del cube_hdu cube_hdu = fits.open(cube_name, mode='denywrite') # No components = no change if ncomp_array[y, x] == 0: hdu[0].data[:, y, x] = cube_hdu[0].data[:, y, x] continue pars = params_array[:, y, x][np.isfinite(params_array[:, y, x])] hdu[0].data[:, y, x] = cube_hdu[0].data[:, y, x] - \ multigaussian_nolmfit(vels_val, pars) hdu.flush() hdu.close() del hdu cube_hdu.close() del cube_hdu
fifteenA_HI_BCtaper_04kms_data_wEBHIS_path("braun09_subcubes") + "/*.fits") fifteenAtapercubes.sort() noise_val = 2.8 * u.K # K # Change to data directory os.chdir( fifteenA_HI_BCtaper_04kms_data_wEBHIS_path("braun09_subcubes", no_check=True)) for i, subcube_name in enumerate(fifteenAtapercubes[::-1]): subcube_filename = os.path.split(subcube_name)[-1][:-5] subcube = SpectralCube.read(subcube_name) # Smooth to 25" # new_beam = Beam(25 * u.arcsec) new_beam = subcube.beams.common_beam() subcube = subcube.convolve_to(new_beam) # Downsample the spectral resolution to 2.3 km/s chan_width = np.diff(subcube.spectral_axis)[0] # target = 2.3 * u.km / u.s # target = chan_width * 2 # spec_kern = Gaussian1DKernel(np.sqrt((target / 2.)**2 - chan_width**2).value) # subcube = subcube.spectral_smooth(spec_kern, verbose=True, # num_cores=4) # New spectral axis
m0fn = dpath("moments/{0}_medsub_moment0.fits".format(fname)) m1fn = dpath("moments/{0}_medsub_moment1.fits".format(fname)) m2fn = dpath("moments/{0}_medsub_moment2.fits".format(fname)) madstdfn = dpath("moments/{0}_medsub_madstd.fits".format(fname)) maxfn = dpath("moments/{0}_medsub_max.fits".format(fname)) argmaxfn = dpath("moments/{0}_medsub_argmax.fits".format(fname)) if os.path.exists(m0fn) and os.path.exists(madstdfn): m0 = load_projection(m0fn) m1 = load_projection(m1fn) m2 = load_projection(m2fn) pmax = load_projection(maxfn) madstd = load_projection(madstdfn) # argmaxfn = dpath("moments/{0}_medsub_argmax.fits".format(fname)) else: cube = SpectralCube.read(dpath(fn)).minimal_subcube() vcube = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio') vcube.beam_threshold = 100 pct = 50 for key in cont_percentiles: if key in fname: pct = cont_percentiles[key] contmasked_cube = vcube.with_mask( ((vcube.spectral_axis < 35 * u.km / u.s) | (vcube.spectral_axis > 80 * u.km / u.s))[:, None, None]) med = contmasked_cube.percentile(pct, axis=0) vcube = vcube.spectral_slab(50 * u.km / u.s, 65 * u.km / u.s)
def make_comparison_image(filename1, filename2, title1='bsens', title2='cleanest', writediff=False, allow_reproj=False, nticks=12, asinh_scaling_factor=10, scalebarlength=15, diff_suffix='.preselfcal-diff'): #fh_pre = fits.open() #fh_post = fits.open() cube_pre = SpectralCube.read(filename1, format='fits' if 'fits' in filename1 else 'casa_image').with_spectral_unit(u.GHz) cube_post = SpectralCube.read(filename2, format='fits' if 'fits' in filename2 else 'casa_image').with_spectral_unit(u.GHz) if 'pbcor' in filename1: assert 'pbcor' in filename2 if 'pbcor' in filename2: assert 'pbcor' in filename1 if allow_reproj: if cube_pre.shape != cube_post.shape or (cube_post.wcs != cube_pre.wcs and cube_post.wcs.wcs != cube_pre.wcs.wcs): cube_post = cube_post.reproject(cube_pre.header) cube_pre = cube_pre.with_mask(cube_pre != 0*cube_pre.unit) cube_post = cube_post.with_mask(cube_post != 0*cube_post.unit) slices = cube_pre.subcube_slices_from_mask(cube_pre.mask & cube_post.mask, spatial_only=True)[1:] # make the cubes match the data; needed for later WCS cutouts cube_pre = cube_pre[:, slices[0], slices[1]] cube_post = cube_post[:, slices[0], slices[1]] #cube_pre = cube_pre.minimal_subcube() #cube_post = cube_post.minimal_subcube() data_pre = cube_pre[0].value * 1e3 data_post = cube_post[0].value * 1e3 #data_pre[np.abs(data_pre) < 1e-7] = np.nan #data_post[np.abs(data_post) < 1e-7] = np.nan try: diff = (data_post - data_pre) except Exception as ex: print(filename1, filename2, cube_pre.shape, cube_post.shape) raise ex ww = cube_post.wcs pixscale = wcs.utils.proj_plane_pixel_area(ww)*u.deg**2 try: beam = cube_post.beam ppbeam = (beam.sr / pixscale).decompose() assert ppbeam.unit.is_equivalent(u.dimensionless_unscaled) ppbeam = ppbeam.value except NoBeamError: beam = np.nan*u.sr ppbeam = np.nan if writediff: fits.PrimaryHDU(data=diff, header=cube_post.header).writeto(filename2.split(".fits")[0] + diff_suffix + ".fits", overwrite=True) fig = pl.figure(1, figsize=(14,6)) fig.clf() if fig.get_figheight() != 6: fig.set_figheight(6) if fig.get_figwidth() != 14: fig.set_figwidth(14) data_pre_display = np.arcsinh(data_pre*asinh_scaling_factor) data_post_display = np.arcsinh(data_post*asinh_scaling_factor) diff_display = np.arcsinh(diff*asinh_scaling_factor) minv = np.nanpercentile(data_pre_display, 0.05) maxv = np.nanpercentile(data_pre_display, 99.5) if maxv > np.arcsinh(1000): maxv = np.arcsinh(1000) if np.abs(minv) > maxv: minv = -maxv norm = visualization.simple_norm(data=diff_display.squeeze(), stretch='linear', #min_percent=0.05, max_percent=99.995,) min_cut=minv, max_cut=maxv) #cm = pl.matplotlib.cm.gray #cm.set_bad('white', 0) cm = pl.matplotlib.cm.viridis ax1 = pl.subplot(1,3,1) ax2 = pl.subplot(1,3,2) ax3 = pl.subplot(1,3,3) for ax in (ax1,ax2,ax3): ax.cla() ax1.imshow(data_pre_display, norm=norm, origin='lower', interpolation='nearest', cmap=cm) ax1.set_title(title1) # scalebar ww = cube_pre.wcs.celestial cd = (ww.pixel_scale_matrix[1,1] * 3600) blc = np.array(diff.shape)*0.1 ax1.add_patch(matplotlib.patches.Rectangle([blc[1]*0.8, blc[0]*0.9], width=scalebarlength/cd*1.4, height=blc[0]*0.6, edgecolor='k', facecolor='w', alpha=0.5)) ax1.plot([blc[1], blc[1]+scalebarlength/cd], [blc[0], blc[0]], color='k') tx = ax1.annotate(f'{scalebarlength}"', (blc[1]+scalebarlength/2/cd, blc[0]*1.1)) tx.set_horizontalalignment('center') ax2.imshow(data_post_display, norm=norm, origin='lower', interpolation='nearest', cmap=cm) ax2.set_title(title2) im = ax3.imshow(diff_display.squeeze(), norm=norm, origin='lower', interpolation='nearest', cmap=cm) ax3.set_title(f"{title2} - {title1}") for ax in (ax1,ax2,ax3): ax.set_xticks([]) ax.set_yticks([]) pl.subplots_adjust(wspace=0.0) cbax = fig.add_axes([0.91,0.18,0.03,0.64]) cb = fig.colorbar(cax=cbax, mappable=im) cb.set_label("S$_\\nu$ [mJy/beam]") mn,mx = cb.get_ticks().min(), cb.get_ticks().max() ticklocs = np.concatenate([np.linspace(-norm.vmax, 0, nticks//2)[:-1], np.linspace(0, norm.vmax, nticks//2)]) ticks = np.sinh(ticklocs) cb.update_normal(im) cb.set_ticks(ticks) ticklocs = cb.get_ticks() ticklabels = [f"{np.sinh(x/asinh_scaling_factor):0.2f}" for x in ticklocs] cb.set_ticklabels(ticklabels) meta = parse_fn(filename1) reg = get_noise_region(meta['region'], meta['band']) if reg is not None: reglist = regions.read_ds9(reg) composite_region = reduce(operator.or_, reglist) if hasattr(composite_region, 'to_mask'): msk = composite_region.to_mask() else: preg = composite_region.to_pixel(cube_pre.wcs.celestial) msk = preg.to_mask() cutout_pixels_pre = msk.cutout(data_pre, fill_value=np.nan)[msk.data.astype('bool')] mad_sample_pre = mad_std(cutout_pixels_pre, ignore_nan=True) std_sample_pre = np.nanstd(cutout_pixels_pre) if hasattr(composite_region, 'to_mask'): msk = composite_region.to_mask() else: preg = composite_region.to_pixel(cube_post.wcs.celestial) msk = preg.to_mask() cutout_pixels_post = msk.cutout(data_post, fill_value=np.nan)[msk.data.astype('bool')] mad_sample_post = mad_std(cutout_pixels_post, ignore_nan=True) std_sample_post = np.nanstd(cutout_pixels_post) if np.any(np.isnan(mad_sample_pre)): log.warning("mad_sample_pre contains some NaN values") if np.any(np.isnan(mad_sample_post)): log.warning("mad_sample_post contains some NaN values") if len(cutout_pixels_post) != len(cutout_pixels_pre): log.warning(f"cutout pixels are different size in pre vs post ({filename1} : {filename2})") if (cube_pre.wcs.celestial != cube_post.wcs.celestial) and (cube_pre.wcs.celestial.wcs != cube_post.wcs.celestial.wcs): # wcs comparisons stopped working sometime in 2019-2020 - wcs.wcs comparisons appear to work? log.warning(f"post and pre have different celestial WCSes ({filename1} : {filename2})") if not np.isfinite(mad_sample_pre): raise ValueError mad_pre = mad_std(data_pre, ignore_nan=True) mad_post = mad_std(data_post, ignore_nan=True) mad_diff = mad_std(diff, ignore_nan=True) diffmask = np.abs(diff) > 3*mad_diff history = cube_post.header['HISTORY'] hasamp = any("'calmode': 'ap'" in x for x in history) or any("'calmode': 'a'" in x for x in history) diffstats = {'mean': np.nanmean(diff), 'max': np.nanmax(diff), 'shape': diff.shape[0], 'ppbeam': ppbeam, 'sum': np.nansum(diff), 'masksum': diff[diffmask].sum(), 'min': np.nanmin(diff), 'median': np.nanmedian(diff), 'mad': mad_diff, 'dr_pre': np.nanmax(data_pre) / mad_std(data_pre, ignore_nan=True), 'dr_post': np.nanmax(data_post) / mad_std(data_post, ignore_nan=True), 'min_pre': np.nanmin(data_pre), 'min_post': np.nanmin(data_post), 'max_pre': np.nanmax(data_pre), 'max_post': np.nanmax(data_post), 'sum_pre': np.nansum(data_pre), 'sum_post': np.nansum(data_post), 'masksum_pre': (data_pre[data_pre > mad_pre*3]).sum(), 'masksum_post': (data_post[data_post > mad_post*3]).sum(), 'mad_pre': mad_pre, 'mad_post': mad_post, 'mad_sample_pre': np.nan, 'mad_sample_post': np.nan, 'std_sample_pre': np.nan, 'std_sample_post': np.nan, 'has_amp': hasamp, } if reg is not None: diffstats.update({ 'mad_sample_pre': mad_sample_pre, 'mad_sample_post': mad_sample_post, 'std_sample_pre': std_sample_pre, 'std_sample_post': std_sample_post, }) return ax1, ax2, ax3, fig, diffstats
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN", 'VELO'] mywcs.wcs.cunit = ['deg', 'deg', 'm/s'] # Create a synthetic X-dimension in km/s xarr = np.linspace(50, 70, 41) # km/s # Define a line width, which will vary across our image # It will increase from 1 km/s to 4 km/s over the X-direction (RA) sigma = np.outer(np.linspace(1, 1.5, 2), np.ones(4)).T # Define a line center, which will vary in the opposite direction, # along increasing Y-direction (declination) centroid = np.outer(np.ones(2), np.linspace(58, 62, 4)).T data = np.exp(-(np.tile(xarr, (2, 4, 1)).T - centroid)**2 / (2. * sigma**2)) cube = SpectralCube(data=data, wcs=mywcs) # Sanity checks: do the moments accurately recover the inputs? assert (np.abs(cube.moment1().to(u.km / u.s).value - centroid).max()) < 1e-5 assert (np.abs(cube.moment2().to(u.km**2 / u.s**2).value - sigma**2).max()) < 1e-5 # Create a pyspeckit cube pcube = pyspeckit.Cube(cube=cube) # For convenience, convert the X-axis to km/s # (WCSLIB automatically converts to m/s even if you give it km/s) pcube.xarr.convert_to_unit(u.km / u.s) # Set up the fitter by doing a preliminary fit pcube.specfit(fittype='gaussian', guesses='moments')
import pylab as pl pl.close('all') apertures = ((1, 2, 3, 4) * u.pc / (8.4 * u.kpc)).to velo = 62.5 * u.km / u.s species_names = list(restfreqs.keys()) frequencies = u.Quantity([restfreqs[key] for key in species_names], unit=u.GHz) for fn in glob.glob(paths.Fpath('tp/tp_concat*fits')): basefn = os.path.basename(fn) outf = paths.Fpath('tp/avgspectra/avg_{0}'.format(basefn)) if not os.path.exists(outf): cube = SpectralCube.read(fn) cube.allow_huge_operations = True mad = cube.mad_std(axis=0) sum = (cube * mad).sum(axis=(1, 2)) mean = sum / np.nansum(mad) mean.write(paths.Fpath( 'tp/avgspectra/weighted_avg_{0}'.format(basefn)), overwrite=True) mn = cube.mean(axis=(1, 2)) mn.write(outf, overwrite=True) for fn in glob.glob(paths.Fpath('tp/tp_concat*fits')): basefn = os.path.basename(fn) #spw = pyspeckit.Spectrum('avspec/weighted_avg_{0}'.format(fn)) sp = pyspeckit.Spectrum(paths.Fpath( 'tp/avgspectra/avg_{0}'.format(basefn)))
def make_comparison_image(filename1, filename2, title1='bsens', title2='cleanest', writediff=False, allow_reproj=False): #fh_pre = fits.open() #fh_post = fits.open() cube_pre = SpectralCube.read(filename1, format='fits' if 'fits' in filename1 else 'casa_image').with_spectral_unit(u.GHz) cube_post = SpectralCube.read(filename2, format='fits' if 'fits' in filename2 else 'casa_image').with_spectral_unit(u.GHz) if 'pbcor' in filename1: assert 'pbcor' in filename2 if 'pbcor' in filename2: assert 'pbcor' in filename1 if allow_reproj: if cube_pre.shape != cube_post.shape or ( cube_post.wcs != cube_pre.wcs and cube_post.wcs.wcs != cube_pre.wcs.wcs): cube_post = cube_post.reproject(cube_pre.header) cube_pre = cube_pre.with_mask(cube_pre != 0 * cube_pre.unit) cube_post = cube_post.with_mask(cube_post != 0 * cube_post.unit) slices = cube_pre.subcube_slices_from_mask(cube_pre.mask & cube_post.mask, spatial_only=True)[1:] # make the cubes match the data; needed for later WCS cutouts cube_pre = cube_pre[:, slices[0], slices[1]] cube_post = cube_post[:, slices[0], slices[1]] #cube_pre = cube_pre.minimal_subcube() #cube_post = cube_post.minimal_subcube() data_pre = cube_pre[0].value data_post = cube_post[0].value data_pre[np.abs(data_pre) < 1e-7] = np.nan data_post[np.abs(data_post) < 1e-7] = np.nan try: diff = (data_post - data_pre) except Exception as ex: print(filename1, filename2, cube_pre.shape, cube_post.shape) raise ex ww = cube_post.wcs beam = cube_post.beam pixscale = wcs.utils.proj_plane_pixel_area(ww) * u.deg**2 ppbeam = (beam.sr / pixscale).decompose() assert ppbeam.unit.is_equivalent(u.dimensionless_unscaled) ppbeam = ppbeam.value if writediff: fits.PrimaryHDU(data=diff, header=cube_post.header).writeto( filename2.split(".fits")[0] + ".preselfcal-diff.fits", overwrite=True) fig = pl.figure(1, figsize=(14, 6)) fig.clf() if fig.get_figheight() != 6: fig.set_figheight(6) if fig.get_figwidth() != 14: fig.set_figwidth(14) minv = np.nanpercentile(data_pre, 0.05) maxv = np.nanpercentile(data_pre, 99.5) if np.abs(minv) > maxv: minv = -maxv norm = visualization.simple_norm( data=diff.squeeze(), stretch='asinh', #min_percent=0.05, max_percent=99.995,) min_cut=minv, max_cut=maxv) if norm.vmax < 0.001: norm.vmax = 0.001 cm = pl.matplotlib.cm.gray cm.set_bad('white', 0) ax1 = pl.subplot(1, 3, 1) ax2 = pl.subplot(1, 3, 2) ax3 = pl.subplot(1, 3, 3) for ax in (ax1, ax2, ax3): ax.cla() ax1.imshow(data_pre, norm=norm, origin='lower', interpolation='nearest', cmap=cm) ax1.set_title(title1) ax2.imshow(data_post, norm=norm, origin='lower', interpolation='nearest', cmap=cm) ax2.set_title(title2) im = ax3.imshow(diff.squeeze(), norm=norm, origin='lower', interpolation='nearest', cmap=cm) ax3.set_title(f"{title2} - {title1}") for ax in (ax1, ax2, ax3): ax.set_xticks([]) ax.set_yticks([]) pl.subplots_adjust(wspace=0.0) cbax = fig.add_axes([0.91, 0.18, 0.03, 0.64]) fig.colorbar(cax=cbax, mappable=im) meta = parse_fn(filename1) reg = get_noise_region(meta['region'], meta['band']) if reg is not None: reglist = regions.read_ds9(reg) composite_region = reduce(operator.or_, reglist) if hasattr(composite_region, 'to_mask'): msk = composite_region.to_mask() else: preg = composite_region.to_pixel(cube_pre.wcs.celestial) msk = preg.to_mask() cutout_pixels_pre = msk.cutout( data_pre, fill_value=np.nan)[msk.data.astype('bool')] mad_sample_pre = mad_std(cutout_pixels_pre, ignore_nan=True) std_sample_pre = np.nanstd(cutout_pixels_pre) if hasattr(composite_region, 'to_mask'): msk = composite_region.to_mask() else: preg = composite_region.to_pixel(cube_post.wcs.celestial) msk = preg.to_mask() cutout_pixels_post = msk.cutout( data_post, fill_value=np.nan)[msk.data.astype('bool')] mad_sample_post = mad_std(cutout_pixels_post, ignore_nan=True) std_sample_post = np.nanstd(cutout_pixels_post) if np.any(np.isnan(mad_sample_pre)): log.warning("mad_sample_pre contains some NaN values") if np.any(np.isnan(mad_sample_post)): log.warning("mad_sample_post contains some NaN values") if len(cutout_pixels_post) != len(cutout_pixels_pre): log.warning( f"cutout pixels are different size in pre vs post ({filename1} : {filename2})" ) if (cube_pre.wcs.celestial != cube_post.wcs.celestial) and ( cube_pre.wcs.celestial.wcs != cube_post.wcs.celestial.wcs): # wcs comparisons stopped working sometime in 2019-2020 - wcs.wcs comparisons appear to work? log.warning( f"post and pre have different celestial WCSes ({filename1} : {filename2})" ) if not np.isfinite(mad_sample_pre): raise ValueError mad_pre = mad_std(data_pre, ignore_nan=True) mad_post = mad_std(data_post, ignore_nan=True) mad_diff = mad_std(diff, ignore_nan=True) diffmask = np.abs(diff) > 3 * mad_diff diffstats = { 'mean': np.nanmean(diff), 'max': np.nanmax(diff), 'shape': diff.shape[0], 'ppbeam': ppbeam, 'sum': np.nansum(diff), 'masksum': diff[diffmask].sum(), 'min': np.nanmin(diff), 'median': np.nanmedian(diff), 'mad': mad_diff, 'dr_pre': np.nanmax(data_pre) / mad_std(data_pre, ignore_nan=True), 'dr_post': np.nanmax(data_post) / mad_std(data_post, ignore_nan=True), 'min_pre': np.nanmin(data_pre), 'min_post': np.nanmin(data_post), 'max_pre': np.nanmax(data_pre), 'max_post': np.nanmax(data_post), 'sum_pre': np.nansum(data_pre), 'sum_post': np.nansum(data_post), 'masksum_pre': (data_pre[data_pre > mad_pre * 3]).sum(), 'masksum_post': (data_post[data_post > mad_post * 3]).sum(), 'mad_pre': mad_pre, 'mad_post': mad_post, 'mad_sample_pre': np.nan, 'mad_sample_post': np.nan, 'std_sample_pre': np.nan, 'std_sample_post': np.nan, } if reg is not None: diffstats.update({ 'mad_sample_pre': mad_sample_pre, 'mad_sample_post': mad_sample_post, 'std_sample_pre': std_sample_pre, 'std_sample_post': std_sample_post, }) return ax1, ax2, ax3, fig, diffstats
plot.plot(x, y.value, drawstyle='steps') plot.set_title(trans) ''' print(f'ymin: {y.min()}') print(f'yvaluemin: {y.value.min()}') print(f'tempymin/ymin: {tempymin/y.value.min()}') ''' def contamlines(plot, contamlinelist): return for i in range(len(files)): print('Getting ready - ' + imgnames[i]) cube = sc.read(files[i]) header = fits.getheader(files[i]) freqs = cube.spectral_axis freqflip = False if freqs[0] > freqs[1]: freqs = freqs[::-1] freqflip = True print('Corrected decreasing frequency axis') else: pass freq_min = freqs[0] * (1 + z) #215*u.GHz freq_max = freqs[(len(freqs) - 1)] * (1 + z) #235*u.GHz assert freq_max > freq_min, 'Decreasing frequency axis'
import paths files = [ 'OrionSourceI_Unknown_1_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_Unknown_2_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_Unknown_3_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_Unknown_4_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_Unknown_5_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_Unknown_8_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_Unknown_9_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_Unknown_10_robust0.5maskedclarkclean10000_medsub_K.fits', 'OrionSourceI_U229.682_robust0.5maskedclarkclean10000_medsub_K.fits', ] allbeamlist = [ SpectralCube.read(paths.dpath('cubes/' + fn)).beams.common_beam() for fn in files ] allbeams = radio_beam.Beams(major=u.Quantity([x.major for x in allbeamlist]), minor=u.Quantity([x.minor for x in allbeamlist]), pa=u.Quantity([x.pa for x in allbeamlist])) refbeam = allbeams.common_beam() refcube = SpectralCube.read(paths.dpath('cubes/' + files[0])) refcube = refcube.convolve_to(refbeam) stackcube = [refcube] for cubefn in files[1:]: reproj_cube = (SpectralCube.read( paths.dpath('cubes/' +
import numpy as np from paths import hpath, mpath, fpath from astropy import log from spectral_cube import SpectralCube, BooleanArrayMask from masked_cubes import (cube303m, cube321m, cube303msm, cube321msm, cube303, cube321, cube303sm, cube321sm, sncube, sncubesm) for suffix in ("", "_smooth"): outpath = 'TemperatureCube_DendrogramObjects{0}.fits' tcube = SpectralCube.read(hpath(outpath.format(suffix))) outpath_leaf = 'TemperatureCube_DendrogramObjects{0}_leaves.fits' tcubeleaf = SpectralCube.read(hpath(outpath_leaf.format(suffix))) integ = tcube.mean(axis=0) integ.hdu.writeto(hpath(outpath.format(suffix)).replace( ".fits", "_integ.fits"), clobber=True) integleaf = tcubeleaf.mean(axis=0) integleaf.hdu.writeto(hpath(outpath_leaf.format(suffix)).replace( ".fits", "_integ.fits"), clobber=True) hdu_template = integ.hdu log.info("Writing Weighted Integrated TemperatureCube") tcubed = tcube.filled_data[:].value weight_cube = cube303sm if 'smooth' in suffix else cube303 weights = weight_cube.filled_data[:].value weights[weights < 0] = 0
def deblend(para, specCubeRef, vmin=4.0, vmax=11.0, f_spcsamp=None, tau_wgt=0.1, n_cpu=None): ''' Deblend hyperfine structures in a cube based on fitted models, i.e., reconstruct the fitted model with Gaussian lines with optical depths accounted for (e.g., similar to CO (J = 0-1)) :param para: <ndarray> The fitted parameters in the order of vel, width, tex, and tau for each velocity slab. (Note: the size of the z axis for para must thus be in the multiple of 4) :param specCubeRef: <SpectralCube.Cube> The reference cube from which the deblended cube is constructed from :param vmin: <float> The lower veloicty limit on the deblended cube in the unit of km/s :param vmax: <float> The upper veloicty limit on the deblended cube in the unit of km/s :param f_spcsamp: <int> The scaling factor for the spectral sampling relative the reference cube (e.g., f_spcsamp = 2 give you twice the spectral resolution) :param tau_wgt: The scaling factor for the input tau (e.g., tau_wgt = 0.1 better represents the true optical depth of a NH3 (1,1) hyperfine group than the "fitted tau") :param n_cpu: <int> The number of cpus to use. If None, defaults to all the cpus available minus one. :return mcube: <SpectralCube.Cube> The deblended cube ''' # open the reference cube file cube = specCubeRef cube = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio') # trim the cube to the specified velocity range cube = cube.spectral_slab(vmin * u.km / u.s, vmax * u.km / u.s) # generate an empty SpectralCube to house the deblended cube if f_spcsamp is None: deblend = np.zeros(cube.shape) hdr = cube.wcs.to_header() wcs_new = cube.wcs else: deblend = np.zeros( (cube.shape[0] * int(f_spcsamp), cube.shape[1], cube.shape[2])) wcs_new = cube.wcs.deepcopy() # adjust the spectral reference value wcs_new.wcs.crpix[2] = wcs_new.wcs.crpix[2] * int(f_spcsamp) # adjust the spaxel size wcs_new.wcs.cdelt[2] = wcs_new.wcs.cdelt[2] / int(f_spcsamp) hdr = wcs_new.to_header() # retain the beam information hdr['BMAJ'] = cube.header['BMAJ'] hdr['BMIN'] = cube.header['BMIN'] hdr['BPA'] = cube.header['BPA'] mcube = SpectralCube(deblend, wcs_new, header=hdr) # convert back to an unit that the ammonia hf model can handle (i.e. Hz) without having to create a # pyspeckit.spectrum.units.SpectroscopicAxis object (which runs rather slow for model building in comparison) mcube = mcube.with_spectral_unit(u.Hz, velocity_convention='radio') xarr = mcube.spectral_axis yy, xx = np.indices(para.shape[1:]) # a pixel is valid as long as it has a single finite value isvalid = np.any(np.isfinite(para), axis=0) valid_pixels = zip(xx[isvalid], yy[isvalid]) def model_a_pixel(xy): x, y = int(xy[0]), int(xy[1]) # nh3_vtau_singlemodel_deblended takes Hz as the spectral unit models = [ nh3_deblended.nh3_vtau_singlemodel_deblended(xarr, Tex=tex, tau=tau * tau_wgt, xoff_v=vel, width=width) for vel, width, tex, tau in zip(para[::4, y, x], para[1::4, y, x], para[2::4, y, x], para[3::4, y, x]) ] mcube._data[:, y, x] = np.nansum(np.array(models), axis=0) return ((x, y), mcube._data[:, y, x]) if n_cpu is None: n_cpu = cpu_count() - 1 else: n_cpu = 1 if n_cpu > 1: print("------------------ deblending cube -----------------") print("number of cpu used: {}".format(n_cpu)) sequence = [(x, y) for x, y in valid_pixels] result = parallel_map(model_a_pixel, sequence, numcores=n_cpu) merged_result = [ core_result for core_result in result if core_result is not None ] for mr in merged_result: ((x, y), model) = mr x = int(x) y = int(y) mcube._data[:, y, x] = model else: for xy in ProgressBar(list(valid_pixels)): model_a_pixel(xy) # convert back to km/s in units before saving mcube = mcube.with_spectral_unit(u.km / u.s, velocity_convention='radio') gc.collect() print("--------------- deblending completed ---------------") return mcube
#"SgrB2_b3_7M_12M_natural.H2CS322-221.image.pbcor.fits", #"SgrB2_b3_7M_12M_natural.H41a.image.pbcor.fits", #"SgrB2_b3_7M_12M_natural.HC3N.image.pbcor.fits", #"SgrB2_b3_7M_12M_natural.HCN.image.pbcor.fits", #"SgrB2_b3_7M_12M_natural.HCOp.image.pbcor.fits", #"SgrB2_b3_7M_12M_natural.HNC.image.pbcor.fits", ): suffix, species, robust = speciesre.search(interferometer_fn).groups() outfilename = ('{species}{suffix}{robust}_TP_7m_12m_feather.fits'.format( species=species, suffix=suffix, robust=robust)) medsubfn = ('{species}{suffix}{robust}_7m_12m_medsub.fits'.format( species=species, suffix=suffix, robust=robust)) if not os.path.exists(medsubfn): cube = (SpectralCube.read(dpath(interferometer_fn)).with_spectral_unit( u.km / u.s, velocity_convention='radio')) cube.beam_threshold = 100 # try to avoid contamination... won't work universally; need to examine # individual cubes and have this as a parameter #med = cube.spectral_slab(90*u.km/u.s, 160*u.km/u.s).median(axis=0).value med = cube.spectral_slab(velocity_ranges[species][0] * u.km / u.s, velocity_ranges[species][1] * u.km / u.s).median(axis=0).value cube.write(medsubfn, overwrite=True) fh = fits.open(medsubfn, mode='update') log.info("Median subtracting") pb = ProgressBar(len(fh[0].data)) for ii, imslice in enumerate(fh[0].data): fh[0].data[ii] = imslice - med fh.flush() pb.update()
'12CO2-1': { 'blue': [30, 50], 'red': [70, 95], }, }, } cutout_dict = {'LacyJet': 'north'} for objname in vrange_dict: for species in vrange_dict[objname]: for shift in vrange_dict[objname][species]: fn = paths.dpath( '12m/cutouts/W51_b6_12M.{0}.image.pbcor_{1}cutout.fits'.format( species, cutout_dict[objname])) cube = SpectralCube.read(fn).with_spectral_unit( u.km / u.s, velocity_convention='radio') cube.beam_threshold = 1.0 med = cube.percentile(25, axis=0) medsub = cube - med vrange = vrange_dict[objname][species][shift] slab = medsub.spectral_slab(*(vrange * u.km / u.s)) m0 = slab.moment0() m0.write( paths.dpath('12m/moments/{objname}_{species}_{shift}' '{v1}_{v2}.fits'.format( objname=objname, species=species,
import pylab as pl molecules = [ 'CFp', 'CH3CN_5-4_3', 'H2CO615-616', 'H2CS', 'H2CS303-202', 'H2CS321-220', 'H41a', 'HC3N', 'HCN', 'HCOp', 'HNC', ] cubes = { mol: SpectralCube.read( 'SgrB2_a_03_7M.{0}.image.pbcor.fits'.format(mol)).with_spectral_unit( u.km / u.s, velocity_convention='radio') for mol in molecules } pcubes = {mol: pyspeckit.Cube(cube=cubes[mol]) for mol in molecules} fig1 = pl.figure(1) fig1.clf() for pcube in pcubes.values(): pcube.plot_spectrum(101, 90, clear=False, figure=fig1)
def neighbourhood_fit_comparison(cube_name, params_name, chunk_size=80000, diff_bic=10, err_map=None, use_ncomp_check=True, reverse_direction=False): ''' Lazily account for spatial continuity by checking the fit of each pixel relative to its neighbours. If delta BIC < -10, will attempt to refit the pixel with that neighbour's model. If there is a difference in the number of components, will also attempt to refit with a different number of components initialized to the neighbour's fit. This is done in serial. Which isn't ideal but accounts for other pixels first being updated. ''' with fits.open(params_name, memmap=False, mode='denywrite') as params_hdu: params_array = params_hdu[0].data uncerts_array = params_hdu[1].data bic_array = params_hdu[2].data.squeeze() ncomp_array = np.isfinite(params_array).sum(0) // 3 cube = SpectralCube.read(cube_name) assert cube.shape[1:] == bic_array.shape if err_map is not None: if hasattr(err_map, 'value'): err_map = err_map.value.copy() assert err_map.shape == bic_array.shape # Number of pixels with valid fits. yposn, xposn = np.where(np.isfinite(bic_array) & (ncomp_array > 0)) if reverse_direction: yposn = yposn[::-1] xposn = xposn[::-1] yshape, xshape = bic_array.shape basename = os.path.basename(cube_name) for i, (y, x) in tqdm(enumerate(zip(yposn, xposn)), ascii=True, desc=f"Rev. fit for: {basename[:15]}", total=yposn.size): err = None if err_map is None else err_map[y, x] # Reload cube to release memory if i % chunk_size == 0 and i != 0: del cube cube = SpectralCube.read(cube_name) # Slice out 3x3 neighbourhood of y, x ymin = max(0, y - 1) ymax = min(yshape, y + 2) xmin = max(0, x - 1) xmax = min(xshape, x + 2) bic_neighb = bic_array[ymin:ymax, xmin:xmax].copy() ncomp_neighb = ncomp_array[ymin:ymax, xmin:xmax] bic_neighb[ncomp_neighb == 0] = np.NaN orig_posn = np.where(bic_neighb == bic_array[y, x]) orig_index = (orig_posn[0][0], orig_posn[1][0]) # If no valid neighbours, skip: if np.isfinite(bic_neighb).sum() == 1: continue # Condition 1: delta BIC if np.nanmax(bic_array[y, x] - bic_neighb) >= diff_bic: argmin = np.unravel_index(np.nanargmin(bic_neighb), (3, 3)) yneighb = y + (argmin[0] - 1) xneighb = x + (argmin[1] - 1) # Refit spec = cube[:, y, x] init_params = params_array[:, yneighb, xneighb] init_params = init_params[np.isfinite(init_params)] # assert init_params.size > 0 out_new = \ refit_multigaussian(spec, init_params, vels=None, vcent=None, err=err, amp_const=None, cent_const=None, sigma_const=None, discrete_fitter=False) if bic_array[y, x] - out_new.bic >= diff_bic: # Update the parameter array params_array[:, y, x] = np.NaN params_array[:len(out_new.params), y, x] = \ [val.value for val in out_new.params.values()] uncerts_array[:, y, x] = np.NaN uncerts_array[:len(out_new.params), y, x] = \ [val.stderr if val.stderr is not None else np.NaN for val in out_new.params.values()] bic_array[y, x] = out_new.bic continue # Condition 2: Change in # of components elif ((ncomp_array[y, x] - ncomp_neighb) != 0).any(): if not use_ncomp_check: continue # We'll do this twice with the largest and smallest number of # components. # The lowest BIC fit will be kept. spec = cube[:, y, x] max_comp = ncomp_neighb.max() min_bic = bic_neighb[ncomp_neighb == max_comp].min() posn = np.where(bic_neighb == min_bic) argmax = (posn[0][0], posn[1][0]) # Skip max if this is the original spectrum if argmax == orig_index: maxcomp_bic = bic_array[y, x] else: yneighb = y + (argmax[0] - 1) xneighb = x + (argmax[1] - 1) # Refit init_params_max = params_array[:, yneighb, xneighb] init_params_max = init_params_max[np.isfinite(init_params_max)] assert init_params_max.size > 0 out_new_max = \ refit_multigaussian(spec, init_params_max, vels=None, vcent=None, err=err, amp_const=None, cent_const=None, sigma_const=None, discrete_fitter=False) maxcomp_bic = out_new_max.bic min_comp = ncomp_neighb[ncomp_neighb > 0].min() min_bic = bic_neighb[ncomp_neighb == min_comp].min() posn = np.where(bic_neighb == min_bic) argmin = (posn[0][0], posn[1][0]) # Skip max if this is the original spectrum if argmin == orig_index: mincomp_bic = bic_array[y, x] else: yneighb = y + (argmin[0] - 1) xneighb = x + (argmin[1] - 1) # Refit init_params_min = params_array[:, yneighb, xneighb] init_params_min = init_params_min[np.isfinite(init_params_min)] assert init_params_min.size > 0 out_new_min = \ refit_multigaussian(spec, init_params_min, vels=None, vcent=None, err=err, amp_const=None, cent_const=None, sigma_const=None, discrete_fitter=False) mincomp_bic = out_new_min.bic diff_maxcomp = (bic_array[y, x] - maxcomp_bic) >= diff_bic diff_mincomp = (bic_array[y, x] - mincomp_bic) >= diff_bic # Original fit is good. if not diff_mincomp and not diff_maxcomp: continue # Both are better than original. Take best. elif diff_mincomp and diff_maxcomp: if maxcomp_bic < mincomp_bic: out_new = out_new_max else: out_new = out_new_min # Update to max component fit. elif diff_maxcomp: out_new = out_new_max # Update to min component fit. else: out_new = out_new_min # Update the parameter array params_array[:, y, x] = np.NaN params_array[:len(out_new.params), y, x] = \ [val.value for val in out_new.params.values()] uncerts_array[:, y, x] = np.NaN uncerts_array[:len(out_new.params), y, x] = \ [val.stderr if val.stderr is not None else np.NaN for val in out_new.params.values()] bic_array[y, x] = out_new.bic # Otherwise no refit is needed. else: continue del cube cube = SpectralCube.read(cube_name) # Grab the celestial header spat_header = cube[0].header del cube # Return a combined HDU that can be written out. params_hdu = fits.PrimaryHDU(params_array, spat_header.copy()) params_hdu.header['BUNIT'] = ("", "Gaussian fit parameters") uncerts_hdu = fits.ImageHDU(uncerts_array, spat_header.copy()) uncerts_hdu.header['BUNIT'] = ("", "Gaussian fit uncertainty") bics_hdu = fits.ImageHDU(bic_array, spat_header.copy()) bics_hdu.header['BUNIT'] = ("", "Gaussian fit BIC") hdu_all = fits.HDUList([params_hdu, uncerts_hdu, bics_hdu]) del params_array del uncerts_array del bic_array return hdu_all
cubemask=profileMask['mask'], numcores=4, sampling=1, xarr=vels) # Mask NaNs contFluxesMasked = np.ma.masked_array(contFluxCube, mask=trueMask['mask']) # Subtract the continuum from the spectra. # Line profile range is not masked. NaNs are masked as TRUE. contSubCube = trueMask['maskedData'] - contFluxCube # --------------------------------------------- # # Build the spectral cube using the contSubCube # # --------------------------------------------- # # NaNs and data to be ignored are FALSE. specCube = SpectralCube(contSubCube, pacsWcs, mask=cubeMask, fill_value=1.) # For convenience, convert the X-axis to km/s # (WCSLIB automatically converts to m/s even if you give it km/s) specCube = specCube.with_spectral_unit(u.km / u.s) # --------------------------------------------- # # Build the pySpecCube using the spectral cube. # # --------------------------------------------- # # NaN values are FALSE in the mask. pyCube = pyspeckit.Cube(cube=specCube, maskmap=falseMask['mask'][0, :, :]) # -------------------------------- # # Set up for line profile fitting. # # -------------------------------- # # Build the guesses array
import os ######## Input parameters here ######## image_filename = '/lustre/roberto/ALMA_IMF/lines/imaging_results/W43-MM2_B6_spw1_12M_sio.image' mask_filename = '/lustre/roberto/ALMA_IMF/lines/imaging_results/W43-MM2_B6_spw1_12M_sio_multi_3sigma' # Don't include .mask mask_threshold = 7.5 # In mJy erosion_dilation = True erosion_iter = 2 dilation_iter = 2 ####################################### print("Now masking the cube at sigma threshold") # Use the code below to mask the cube at a certain sigma threshold cube = SpectralCube.read(image_filename) mask = cube > mask_threshold*u.mJy/u.beam from casatools import image ia = image() print("Now computing boolmask") boolmask = mask.include().compute() print("Now getting coordinates from original image") # Use the code below to output a mask file ia.open(image_filename) cs = ia.coordsys() ia.close() print("Now outputting the mask file") ia.fromarray(outfile=mask_filename+'.mask', pixels=boolmask.astype('float')[:,None,:,:].T, csys=cs.torecord(), overwrite=True) ia.close()