Пример #1
0
def Snu(Te=default_te, nu=95*u.GHz, R=0.1*u.pc, Qlyc=1e45*u.s**-1, beam=4000*u.au,
        angular_beam=0.5*u.arcsec):
    tb = Tb(Te=Te, nu=nu, EM=EM(R=R, Qlyc=Qlyc))
    if beam < R:
        return tb.to(u.mJy,
                     u.brightness_temperature(radio_beam.Beam(angular_beam),
                                              nu))
    else:
        return (tb * (R/beam)**2).to(u.mJy,
                                     u.brightness_temperature(radio_beam.Beam(angular_beam),
                                                              nu))
Пример #2
0
def createmodelgal(path, skeletonfile, params):
    # Start model
    model = GM(skeletonfile)
    # Initialize model
    model.init(radii=params['radii'],
               xpos=params['xpos'],
               ypos=params['ypos'],
               vsys=params['vsys'],
               vrot=params['vrot'],
               vdisp=params['vdisp'],
               vrad=params['vrad'],
               z0=params['z0'],
               inc=params['inc'],
               phi=params['phi'],
               dens=np.array([0.8976, 0.983, 0.7064, 0.3329]))

    model.set_options(ltype=params['ltype'])

    # Compute
    mymodel = model.compute()

    temp_gal = SC.read(mymodel)
    beam = rb.Beam(
        major=params['radii'][-1] * params['beamfactor'][0] * u.arcsec,
        minor=params['radii'][-1] * params['beamfactor'][1] * u.arcsec,
        pa=params['phi'] * u.deg)  # fitted radius must be last one
    temp_gal = temp_gal.convolve_to(beam)
    temp_gal.write(path, overwrite=True)
Пример #3
0
def spatial_smooth(filename,
                   major=None,
                   minor=None,
                   pa=0,
                   path_to_output='.',
                   suffix=None):  # smooth image with 2D Gaussian
    import radio_beam
    from spectral_cube import SpectralCube
    from astropy import units as u

    cube = SpectralCube.read(filename)
    beam = radio_beam.Beam(major=major * u.arcsec,
                           minor=minor * u.arcsec,
                           pa=pa * u.deg)
    smoothcube = cube.convolve_to(beam)

    if suffix is not None:
        newname = filename.split('/')[-1].split('.fits')[0] + '_smooth' + str(
            major) + '_arcsec' + suffix + '.fits'
    else:
        newname = filename.split('/')[-1].split('.fits')[0] + '_smooth_' + str(
            major) + '_arcsec' + '.fits'
    pathname = os.path.join(path_to_output, newname)
    smoothcube.write(pathname, format='fits', overwrite=True)
    print("\n\033[92mSAVED FILE:\033[0m '{}' in '{}'".format(
        newname, path_to_output))
Пример #4
0
    def add_noise(self, signal_to_noise):
        print(
            'In order to add noise please give a region of the signal in the format: (row1,row2,column1,column2)'
        )
        beam = rb.Beam(major=self.params['radii'][-1] *
                       self.params['beamfactor'][0] * u.arcsec,
                       minor=self.params['radii'][-1] *
                       self.params['beamfactor'][1] * u.arcsec,
                       pa=self.params['phi'] * u.deg)
        pickregion(self.output + '/' + 'raw_' + self.name + '.fits', beam)
        region = np.array(input().split(',')).astype(int)
        self.region = region

        print('Adding gaussian noise to galaxy.....')
        self.signal_to_noise = signal_to_noise
        rawfile = self.output + '/' + 'raw_' + self.name + '.fits'
        noisefile = self.output + '/' + 'noise_' + self.name + '.fits'

        SNR = getSNR(region, self.noise_amplifier, self.signal_to_noise,
                     rawfile, beam, noisefile)

        # If the noise amplifier wasn't enough start from scratch
        if abs(round(SNR[0], 1) - self.signal_to_noise) > 0.1:
            N = 0
            # Find the ideal noise amplifier to realise the SNR of user input
            low = 1
            high = 18
            while abs(round(SNR[0], 1) -
                      self.signal_to_noise) > .1 and N < 100:
                self.noise_amplifier = np.random.uniform(low=low, high=high)
                SNR = getSNR(region, self.noise_amplifier,
                             self.signal_to_noise, rawfile, beam, noisefile)
                if (SNR[0] - self.signal_to_noise) > 3: low += 0.2
                if (SNR[0] - self.signal_to_noise) < 3: high -= 0.2
                print(SNR[0], SNR[1], self.noise_amplifier)
                N += 1
            if N == 100: sys.exit('No convergence for noise')
            SN = SNR[0]

        self.params['std'] = SNR[1]

        self.clean_mem()
        print('Memory cleaned')

        c_gal = SC.read(self.output + '/' + 'noise_' + self.name + '.fits')
        # Results
        print('Smoothened galaxy with beamsize ' +
              '(Major Axis, Minor Axis) = ' + f"{self.params['beamfactor']}" +
              f"x {self.params['radii'][-1]}" + 'arcseconds')
        print('FINAL GALAXY written to ',
              self.output + '/' + 'final_' + self.name + '.fits')
        c_gal.write(self.output + '/' + 'final_' + self.name + '.fits',
                    overwrite=True)

        # Store uncertainty
        #self.params['std'] = getσ(c_gal.hdu.data)

        # Final result!
        self.c_gal = c_gal
Пример #5
0
def loglike(x, **params):

    model = GM(
        os.path.dirname(os.path.abspath(__file__)) + '/examples/ngc2403.fits')

    if len(x) == 8:
        model.init(radii=params['radii'],
                   xpos=params['xpos'],
                   ypos=params['ypos'],
                   vsys=params['vsys'],
                   vrot=x[0:4],
                   vrad=params['vrad'],
                   vdisp=x[4:8],
                   z0=params['z0'],
                   inc=params['inc'],
                   phi=params['phi'])
    if len(x) == 9:
        model.init(radii=params['radii'],
                   xpos=params['xpos'],
                   ypos=params['ypos'],
                   vsys=params['vsys'],
                   vrot=x[0:4],
                   vrad=params['vrad'],
                   vdisp=x[4:8],
                   z0=params['z0'],
                   inc=x[8],
                   phi=params['phi'])
    if len(x) == 10:
        model.init(radii=params['radii'],
                   xpos=params['xpos'],
                   ypos=params['ypos'],
                   vsys=params['vsys'],
                   vrot=x[0:4],
                   vrad=params['vrad'],
                   vdisp=x[4:8],
                   z0=params['z0'],
                   inc=x[8],
                   phi=x[9])

    model.set_options(ltype=params['ltype'])
    mymodel = model.compute()

    temp_gal = SC.read(mymodel)
    beam = rb.Beam(
        major=params['radii'][-1] * params['beamfactor'][0] * u.arcsec,
        minor=params['radii'][-1] * params['beamfactor'][1] * u.arcsec,
        pa=params['phi'] * u.deg)  # fitted radius must be last one
    temp_gal = temp_gal.convolve_to(beam)
    mdata = temp_gal.unmasked_data[:, :, :] / temp_gal.unit

    if np.round((time.time() - params['start']) / 3600,
                2) in np.arange(0, 10, 0.1):
        gc.collect()

    return -0.5 * np.sum([
        np.sum(np.abs(params['data'][ch] - mdata[ch])) /
        (params['std'] * params['Nsignal_pix'][ch]) for ch in params['nch']
    ])  # -len(data)*np.log(np.sqrt(2*np.pi*params['std']**2))-0.5*residual(data, mdata)/params['std']
Пример #6
0
def snu_dust(density=1e4*u.cm**-3, Td=40*u.K, radius=4000*u.au,
             distance=8.4*u.kpc, cfreq=95*u.GHz):
    mass = (density * 2.8 * u.Da * 4/3. * radius**3).to(u.M_sun)
    print(mass)
    beam = radio_beam.Beam((radius/distance).to(u.arcsec,
                                                u.dimensionless_angles()))
    flux = dust.snuofmass(nu=cfreq, mass=mass, beamomega=beam, temperature=Td,
                          distance=distance)
    return flux
Пример #7
0
def prep(f='/Users/jkeown/Desktop/DR21_13CO.fits',
         region='CygX_N',
         line='13CO'):
    if line == '13CO':
        freq = 330588. * u.MHz
    else:
        freq = 329331. * u.MHz
    header = fits.getheader(f)
    beam = getres(freq=freq, diameter=15. * u.m)
    header['BMIN'] = beam.to(u.deg).value  # beam size in degrees
    header['BMAJ'] = beam.to(u.deg).value  # beam size in degrees
    if region == 'W3(OH)' or region == 'W3Main' or region == 'M16':
        del header['CD1_2']
        del header['CD2_1']
    data = fits.getdata(f)
    data = data[1200:2200, :, :]
    header['NAXIS3'] = data.shape[0]
    fits.writeto(region + '_' + line + '_test.fits',
                 data=data,
                 header=header,
                 overwrite=True)

    # If desired, convolve map with larger beam
    # or load previously created convolved cube
    cube = SpectralCube.read(region + '_' + line + '_test.fits')
    cube_km_1 = cube.with_spectral_unit(u.km / u.s,
                                        velocity_convention='radio')
    beam = radio_beam.Beam(major=32 * u.arcsec,
                           minor=32 * u.arcsec,
                           pa=0 * u.deg)
    cube_km = cube_km_1.convolve_to(beam)
    cube_km.write(region + '_' + line + '_conv_test.fits',
                  format='fits',
                  overwrite=True)

    #cube_km = SpectralCube.read(region+'_conv_test.fits')

    # Then smooth spectrally
    res = cube_km.spectral_axis[1] - cube_km.spectral_axis[2]
    new_axis = np.arange(cube_km.spectral_axis[-1].value,
                         cube_km.spectral_axis[0].value,
                         res.value * 2) * u.km / u.s
    fwhm_factor = np.sqrt(8 * np.log(2))
    current_resolution = res
    target_resolution = res * 2
    pixel_scale = res
    gaussian_width = ((target_resolution**2 - current_resolution**2)**0.5 /
                      pixel_scale / fwhm_factor)
    kernel = Gaussian1DKernel(gaussian_width)
    new_cube = cube_km.spectral_smooth(kernel)
    interp_cube = new_cube.spectral_interpolate(new_axis,
                                                suppress_smooth_warning=True)
    interp_cube.write(region + '_' + line + '_conv_test_smooth.fits',
                      overwrite=True)
 def ptsrcmodel(ptsrcx, ptsrcy, ptsrcamp, ptsrcwid, kernelpa=52):
     ptsrc_wid_bm = radio_beam.Beam(ptsrcwid * u.arcsec, 0.00001 * u.arcsec,
                                    (kernelpa + 90) * u.deg)
     convbm = observed_beam.convolve(ptsrc_wid_bm)
     assert convbm.pa.value != 0
     ptsrc_bm = convbm.as_kernel(pixscale,
                                 x_size=data.shape[1],
                                 y_size=data.shape[0])
     ptsrcmod = (shift.shift2d(ptsrc_bm, ptsrcx - data.shape[0] / 2,
                               ptsrcy - data.shape[1] / 2) / obsbm_max *
                 ptsrcamp)
     return ptsrcmod
Пример #9
0
def convolve_to_beam(fitsfilename,
                     beam=radio_beam.Beam(0.04 * u.arcsec),
                     distance=5400 * u.pc):
    hdr = fits.getheader(fitsfilename)
    pix_area = (hdr['CDELT1'] * u.cm)**2
    pix_area_arcsec = (pix_area / distance**2).to(u.arcsec**2,
                                                  u.dimensionless_angles())
    kernel = beam.as_kernel(pix_area_arcsec**0.5)

    data = fits.getdata(fitsfilename)
    smoothed = convolve_fft(data, kernel)
    return fits.PrimaryHDU(data=smoothed, header=hdr)
def snu_dust_of_density(density=1e4 * u.cm**-3,
                        Td=40 * u.K,
                        radius=4000 * u.au,
                        distance=distance,
                        cfreq=frequency):
    # from the HII region notebook; not used here...
    mass = (density * 2.8 * u.Da * 4 / 3. * radius**3).to(u.M_sun)
    beam = radio_beam.Beam((radius / distance).to(u.arcsec,
                                                  u.dimensionless_angles()))
    flux = dust.snuofmass(nu=cfreq,
                          mass=mass,
                          beamomega=beam,
                          temperature=Td,
                          distance=distance)
    return flux
Пример #11
0
def non_deconvolved_sources(names, imgs):
    all_data = fits.getdata(
        '/lustre/aoc/students/jotter/dendro_catalogs/IR_matched_catalog_B7.fits'
    )

    beams = []
    good_inds = []  #indices for each dataset with good gaussian fits
    major_obs = []
    minor_obs = []
    pa = []

    for i, data in enumerate(imgs):
        fl = fits.open(data)
        header = fl[0].header
        beams.append(radio_beam.Beam.from_fits_header(header))

        good_inds.append(np.where(all_data['fit_goodness_' + names[i]] == 'y'))
        major_obs.append(all_data['FWHM_major_' + names[i]] * u.arcsecond)
        minor_obs.append(all_data['FWHM_minor_' + names[i]] * u.arcsecond)
        pa.append(all_data['position_angle_' + names[i]] * u.degree)

    ind = reduce(np.intersect1d, good_inds)

    maj_deconv = []
    min_deconv = []
    deconv_inds = []
    for n in range(len(names)):
        maj_deconv.append([])
        min_deconv.append([])
        deconv_inds.append([])
        for j in ind:
            obs_beam = radio_beam.Beam(major=major_obs[n][j],
                                       minor=minor_obs[n][j],
                                       pa=pa[n][j] - 90 * u.degree)
            try:
                src = obs_beam.deconvolve(beams[n])
                maj_deconv[n].append(src.major.value)
                min_deconv[n].append(src.minor.value)
                deconv_inds[n].append(j)
            except ValueError:
                maj_deconv[n].append(np.nan)
                min_deconv[n].append(np.nan)
    int_deconv_inds = reduce(np.intersect1d, deconv_inds)
    return int_deconv_inds
def calc_moments(mol='NH3_11'):
    if mol == 'CS':
        rms = 320 * u.mK
    else:
        rms = 120 * u.mK
    v_lo = 83.0 * u.km / u.s
    v_hi = 90.0 * u.km / u.s
    # FIXME
    filep = PDir.map_name('G285_mosaic', 'NH3_11')
    cube = read_cube(filep)
    beam = radio_beam.Beam.from_fits_header(cube.header)
    cube = cube.spectral_slab(v_lo, v_hi)
    cube = cube.spectral_smooth(convolution.Gaussian1DKernel(4 / 2.355))
    spax = cube.spectral_axis
    # smooth in area and velocity, create mask
    bigger_beam = radio_beam.Beam(major=2 * beam.major,
                                  minor=2 * beam.minor,
                                  pa=beam.pa)
    cube_s = cube.convolve_to(bigger_beam)
    # calculate moments
    filen_fmt = 'data/test_imaging/test_gbt_moments/G285_mosaic_gbt_NH3_11_{0}.fits'
    mom0 = cube.with_mask(cube_s > 1 * rms).moment(order=0)
    write_cube(mom0, PDir.IMG / Path(filen_fmt.format('mom0')))
    mom1 = cube.with_mask(cube_s > 2 * rms).moment(order=1)
    write_cube(mom1, PDir.IMG / Path(filen_fmt.format('mom1')))
    mom2 = cube.with_mask(cube_s > 2 * rms).moment(order=2)
    write_cube(mom2, PDir.IMG / Path(filen_fmt.format('mom2')))
    mom3 = cube.with_mask(cube_s > 2 * rms).moment(order=3)
    write_cube(mom3, PDir.IMG / Path(filen_fmt.format('mom3')))
    momS = cube.with_mask(cube_s > 2 * rms).linewidth_sigma()
    write_cube(momS, PDir.IMG / Path(filen_fmt.format('sigma')))
    momM = cube.with_mask(cube_s > 1 * rms).max(axis=0)
    write_cube(momM, PDir.IMG / Path(filen_fmt.format('max')))
    momV = cube.with_mask(cube_s > 2 * rms).argmax(axis=0).astype(float)
    momV[momV == 0] = np.nan
    chans = np.unique(momV)
    chans = chans[~np.isnan(chans)]
    for ix in chans:
        momV[momV == ix] = spax[int(ix)].value
    momV = spectral_cube.lower_dimensional_structures.Projection(momV,
                                                                 wcs=mom0.wcs)
    write_cube(momV, PDir.IMG / Path(filen_fmt.format('vmax')))
Пример #13
0
import os
import glob

import paths
from spectral_cube import SpectralCube
from astropy import units as u
import radio_beam

sourcename = 'e2e8'

out_beam = radio_beam.Beam(0.35*u.arcsec, 0.35*u.arcsec)

for fn in glob.glob(paths.dpath('12m/cutouts/*.CH3OH*{0}*fits'.format(sourcename))):

    outpath = fn.replace(".fits","_0.35arcsec.fits").replace("cutouts","cutouts/commonres")
    if not os.path.exists(outpath):

        vrcube = SpectralCube.read(fn)
        cube = vrcube.convolve_to(out_beam)
        cube.write(outpath)

cubes = [SpectralCube.read(fn) for fn in glob.glob(paths.dpath('12m/cutouts/commonres/W51_b6_12M.CH3OH*.image.pbcor_{0}cutout_0.35arcsec.fits'
                                                               .format(sourcename)))]
Пример #14
0
colfilename = datapath + '/column_maps/scuba_col_herscheltem.fits'

colfile = fits.open(colfilename)[0]

sgrb2contfile = fits.open(
    paths.Fpath(
        'merge/continuum/SgrB2_selfcal_full_TCTE7m_selfcal5_ampphase_taylorterms_multiscale_deeper_mask2.5mJy.image.tt0.pbcor.fits'
    ))

tbl = table.Table.read(
    paths.tpath("continuum_photometry.ipac"),
    format='ascii.ipac',
)

# Set the beam to be approximately the measured beam size
beam = radio_beam.Beam(11 * u.arcsec)
beam_rad = beam.major.to(u.deg).value

observed_region, _ = reproject.reproject_interp(sgrb2contfile, colfile.header)

# make the WCS grid
yy, xx = np.indices(colfile.data.shape)
inds = np.transpose((xx.ravel(), yy.ravel()))
ra, dec = wcs.WCS(colfile.header).celestial.wcs_pix2world(inds, 0).T
ra = ra.reshape(xx.shape)
dec = dec.reshape(yy.shape)

nonstarforming_mask = np.isfinite(observed_region)

for row in ProgressBar(tbl):
    source_dist = ((ra - row['RA'])**2 + (dec - row['Dec'])**2)**0.5
Пример #15
0
def fit_source(srcID,
               img,
               img_name,
               band,
               fit_bg=False,
               bg_stddev_x=30,
               bg_stddev_y=30,
               bg_mean_x=0,
               bg_mean_y=0,
               zoom=1,
               max_offset_in_beams=1,
               max_radius_in_beams=5,
               nonconv_img=None,
               mask_size=1.5):
    #this function fits a given source, and the background
    #srcID : int
    #name of source to fit in catalogs
    #img : fits file
    #fits file with source to fit
    #img_name : str
    #name of image for the directory where the fit plots will go
    #band : str
    #band of image to fit ('B3', 'B6', or 'B7')
    #fit_bg : bool
    #if False, do not fit background gaussian
    #bg_stddev_x : float
    #eyeballed estimate of stddev of the background source in pixels
    #bg_stddev_y : float
    #same as above in y direction
    #bg_mean_x/y : float
    #pixels away from center (origin) in x/y direction for background gaussian mean guess
    #zoom : float
    #amount of zoom, values greater than 1 are zoom ins

    #ref_data_name = '/home/jotter/nrao/summer_research_2018/tables/dendro_ref_catalog_edited.fits'
    ref_data_name = '/home/jotter/nrao/summer_research_2018/tables/ref_catalog_may21_b7.fits'
    #ref_data_name = '/lustre/cv/observers/cv-12578/orion_disks/summer_research_2018/tables/ref_catalog_may21.fits'
    ref_data = Table.read(ref_data_name)

    fl = fits.open(img)
    header = fl[0].header
    img_data = fl[0].data.squeeze()
    img_wcs = WCS(header).celestial

    beam = radio_beam.Beam.from_fits_header(header)
    pixel_scale = np.abs(
        img_wcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg
    ppbeam = (beam.sr / (pixel_scale**2)).decompose().value

    if nonconv_img is not None:
        flnonconv = fits.open(nonconv_img)
        nonconv_header = flnonconv[0].header
        nonconv_beam = radio_beam.Beam.from_fits_header(nonconv_header)
        ppbeam = (nonconv_beam.sr / (pixel_scale**2)).decompose().value

    #now get ready to fit gaussians
    #start by setting up save directory for images
    gauss_save_dir = '/home/jotter/nrao/gauss_diags_may21/fitbg/' + img_name + '/'
    #gauss_save_dir = f'/lustre/cv/observers/cv-12578/orion_disks/gauss_diags_may21/{img_name}/'

    print('saving plots to ' + gauss_save_dir)
    if not os.path.exists(gauss_save_dir):
        os.makedirs(gauss_save_dir)
    #now make region
    rad = Angle(1, 'arcsecond')  #radius used in region list
    #src_ind = np.where(ref_data['D_ID']==srcID)[0]
    src_ind = np.where(ref_data['B3_Seq'] == srcID)[0]

    ra = ref_data['RA_B3'][src_ind].data[0]
    dec = ref_data['DEC_B3'][src_ind].data[0]
    center_reg = SkyCoord(ra, dec, unit='deg', frame='icrs')
    reg = regions.CircleSkyRegion(
        center=center_reg,
        radius=1 * u.arcsecond,
        meta={
            'text':
            str(ref_data['B3_Seq'][src_ind].data[0]) + '_xstddev_' +
            str(bg_stddev_x) + '_ystddev_' + str(bg_stddev_y)
        })

    region_list = []
    #valid_inds = np.where(np.isnan(ref_data[band+'_detect']) == False)[0]
    for ind in range(len(ref_data)):  #valid_inds:
        if ref_data['B3_Seq'][ind] == srcID:
            continue
        ra_i = ref_data['RA_B3'][ind]
        dec_i = ref_data['DEC_B3'][ind]
        region_i = regions.CircleSkyRegion(center=SkyCoord(ra_i,
                                                           dec_i,
                                                           unit='deg',
                                                           frame='icrs'),
                                           radius=1 * u.arcsecond)
        region_list.append(region_i)

    #print(region_list)
    #print(reg)

    cat_r = Angle(0.5, 'arcsecond') / zoom  #radius for gaussian fitting

    if fit_bg == True:
        gauss_cat, fitim_bg = bg_gaussfit(
            img,
            reg,
            region_list,
            cat_r,
            bg_stddev_x=bg_stddev_x,
            bg_stddev_y=bg_stddev_y,
            bg_mean_x=bg_mean_x,
            bg_mean_y=bg_mean_y,
            savepath=gauss_save_dir,
            max_offset_in_beams=max_offset_in_beams,
            max_offset_in_beams_bg=10,
            max_radius_in_beams=max_radius_in_beams,
            mask_size=mask_size)

        #print('gauss_cat length ',len(gauss_cat))
        #k = list(gauss_cat.keys())[0]
        #if gauss_cat[k]['success'] == False:
        #    gauss_cat = gaussfit_cutoutim(img, fitim_bg, reg, region_list, cat_r, savepath=gauss_save_dir, max_offset_in_beams = max_offset_in_beams, max_radius_in_beams = max_radius_in_beams)
        #    success = gauss_cat[k]['success']
        #    print(F'ALTERNATIVE FIT SUCCESS: {success}')

    else:
        gauss_cat = gaussfit_catalog(img, [reg],
                                     cat_r,
                                     savepath=gauss_save_dir,
                                     max_offset_in_beams=max_offset_in_beams,
                                     max_radius_in_beams=max_radius_in_beams)

    img_table = Table(names=('Seq', 'fwhm_maj_' + band, 'fwhm_maj_err_' + band,
                             'fwhm_min_' + band, 'fwhm_min_err_' + band,
                             'pa_' + band, 'pa_err_' + band,
                             'gauss_amp_' + band, 'gauss_amp_err_' + band,
                             'RA_' + band, 'RA_err_' + band, 'DEC_' + band,
                             'DEC_err_' + band),
                      dtype=('i4', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                             'f8', 'f8', 'f8', 'f8', 'f8'))
    for key in gauss_cat:
        img_table.add_row(
            (srcID, gauss_cat[key]['fwhm_major'],
             gauss_cat[key]['e_fwhm_major'], gauss_cat[key]['fwhm_minor'],
             gauss_cat[key]['e_fwhm_minor'], gauss_cat[key]['pa'],
             gauss_cat[key]['e_pa'], gauss_cat[key]['amplitude'],
             gauss_cat[key]['e_amplitude'], gauss_cat[key]['center_x'],
             gauss_cat[key]['e_center_x'], gauss_cat[key]['center_y'],
             gauss_cat[key]['e_center_y']))

    #now measure deconvovled sizes and aperture flux measurements for each source
    ap_flux_arr = []
    ap_flux_err_arr = []
    fwhm_maj_deconv_arr = []
    fwhm_maj_deconv_err_arr = []
    fwhm_min_deconv_arr = []
    fwhm_min_deconv_err_arr = []
    pa_deconv_arr = []
    pa_deconv_err_arr = []
    snr_arr = []

    for row in range(
            len(img_table)
    ):  #now loop through sources in reference data and make measurements
        ref_ind = np.where(ref_data['B3_Seq'] == img_table['Seq'][row])[0]
        if True == True:  #len(ref_ind > 0):

            measured_source_size = radio_beam.Beam(
                major=img_table['fwhm_maj_' + band][row] * u.arcsec,
                minor=img_table['fwhm_min_' + band][row] * u.arcsec,
                pa=(img_table['pa_' + band][row] - 90) * u.deg)
            try:
                deconv_size = measured_source_size.deconvolve(beam)
                fwhm_maj_deconv_arr.append(deconv_size.major.value)
                fwhm_min_deconv_arr.append(deconv_size.minor.value)
                fwhm_maj_deconv_err_arr.append(img_table['fwhm_maj_err_' +
                                                         band][row])
                fwhm_min_deconv_err_arr.append(img_table['fwhm_min_err_' +
                                                         band][row])
                pa_deconv_arr.append(deconv_size.pa.value)
                pa_deconv_err_arr.append(img_table['pa_err_' + band][row])

            except ValueError:
                fwhm_maj_deconv_arr.append(np.nan)
                fwhm_min_deconv_arr.append(np.nan)
                fwhm_maj_deconv_err_arr.append(np.nan)
                fwhm_min_deconv_err_arr.append(np.nan)
                pa_deconv_arr.append(np.nan)
                pa_deconv_err_arr.append(np.nan)

            pix_major_fwhm = (
                (img_table['fwhm_maj_' + band][row] * u.arcsec).to(u.degree) /
                pixel_scale).decompose()
            pix_minor_fwhm = (
                (img_table['fwhm_min_' + band][row] * u.arcsec).to(u.degree) /
                pixel_scale).decompose()
            center_coord = SkyCoord(img_table['RA_' + band][row],
                                    img_table['DEC_' + band][row],
                                    frame='icrs',
                                    unit=(u.deg, u.deg))
            center_coord_pix = center_coord.to_pixel(img_wcs)
            center_coord_pix_reg = regions.PixCoord(center_coord_pix[0],
                                                    center_coord_pix[1])

            pos_ang = (img_table['pa_' + band][row] - 90) * u.deg

            ellipse_reg = regions.EllipsePixelRegion(center_coord_pix_reg,
                                                     pix_major_fwhm.value * 2,
                                                     pix_minor_fwhm.value * 2,
                                                     angle=pos_ang)
            size = pix_major_fwhm * 2.1
            ap_mask = ellipse_reg.to_mask()
            cutout_mask = ap_mask.cutout(img_data)

            aperture_flux = np.nansum(cutout_mask[ap_mask.data == 1]) / ppbeam
            npix = len(cutout_mask[ap_mask.data == 1])

            #now make annulus for measuring background and error
            annulus_width = 15  #pixels
            annulus_radius = img_table[
                'fwhm_maj_' + band][row] * u.arcsecond  #0.1*u.arcsecond
            annulus_radius_pix = (annulus_radius.to(u.degree) /
                                  pixel_scale).decompose()

            #cutout image
            cutout = Cutout2D(img_data,
                              center_coord_pix,
                              annulus_radius * 2.5,
                              img_wcs,
                              mode='partial')
            cutout_center = regions.PixCoord(cutout.center_cutout[0],
                                             cutout.center_cutout[1])

            #define aperture regions for SNR
            innerann_reg = regions.CirclePixelRegion(cutout_center,
                                                     annulus_radius_pix.value)
            outerann_reg = regions.CirclePixelRegion(
                cutout_center, annulus_radius_pix.value + annulus_width)

            #Make masks from aperture regions
            annulus_mask = mask(outerann_reg, cutout) - mask(
                innerann_reg, cutout)

            # Calculate the SNR and aperture flux sums
            pixels_in_annulus = cutout.data[annulus_mask.astype(
                'bool')]  #pixels within annulus
            bg_rms = median_abs_deviation(pixels_in_annulus)
            ap_bg_rms = bg_rms / np.sqrt(
                npix / ppbeam)  #rms/sqrt(npix/ppbeam) - rms error per beam
            bg_median = np.median(pixels_in_annulus)

            pix_bg = bg_median * npix / ppbeam

            #flux corrections
            ap_flux_bgcorrect = aperture_flux - pix_bg
            ap_flux_correct = ap_flux_bgcorrect + ap_flux_bgcorrect * (
                1 - special.erf(2 * np.sqrt(np.log(2)))
            )  #flux correction for summing within 2*fwhm

            print(
                f'Background: {pix_bg}, Gauss amp: {img_table["gauss_amp_"+band][row]}'
            )
            print(f'peak pixel: {np.nanmax(cutout_mask[ap_mask.data==1])}')

            ap_flux_err_arr.append(ap_bg_rms)
            ap_flux_arr.append(ap_flux_correct)
            snr_arr.append(img_table['gauss_amp_' + band][row] / bg_rms)

    cols = [
        'ap_flux_' + band, 'ap_flux_err_' + band, 'fwhm_maj_deconv_' + band,
        'fwhm_maj_deconv_err_' + band, 'fwhm_min_deconv_' + band,
        'fwhm_min_deconv_err_' + band, 'pa_deconv_' + band,
        'pa_deconv_err_' + band, 'SNR_' + band
    ]
    arrs = [
        ap_flux_arr, ap_flux_err_arr, fwhm_maj_deconv_arr,
        fwhm_maj_deconv_err_arr, fwhm_min_deconv_arr, fwhm_min_deconv_err_arr,
        pa_deconv_arr, pa_deconv_err_arr, snr_arr
    ]
    for c in range(len(cols)):
        img_table.add_column(Column(np.array(arrs[c])), name=cols[c])
    img_table.add_column(Column(img_table['fwhm_maj_deconv_' + band] /
                                img_table['fwhm_min_deconv_' + band]),
                         name='ar_deconv_' + band)

    return img_table
Пример #16
0
threecore_total = data[threecore_mask & (np.isfinite(data))].sum() / ppbeam
print("Total flux in the three 'main cores': {0}".format(threecore_total))
print("Fraction of total flux in the three 'main cores': {0}".format(
    threecore_total / total_signal))
print(" Fraction of recovered flux from BGPS alpha=3.5: {0}".format(
    threecore_total / bgps_scaled_225))
print(" Fraction of recovered flux from BGPS alpha=3: {0}".format(
    threecore_total / bgps_scaled_225_3))
print(" Fraction of recovered flux from BGPS alpha=4: {0}".format(
    threecore_total / bgps_scaled_225_4))
print("Area fraction in 'main cores': {0}".format(threecore_mask.sum() /
                                                  mask.sum()))

planck_217 = fits.open('../../planckwmap/PLCKI_C290.925+14.509_217GHz.fits')
# https://wiki.cosmos.esa.int/planckpla/index.php/Effective_Beams
beam_planck = radio_beam.Beam(4.990 * u.arcmin)
planck_217_flux = (planck_217[0].data * u.K).to(
    u.Jy, beam_planck.jtok_equiv(217 * u.GHz))
pixel_area_planck = np.abs(
    planck_217[0].header['CDELT1'] * planck_217[0].header['CDELT2']) * u.deg**2
ppbeam_planck = (beam_planck.sr / pixel_area_planck).decompose()
planck_12mptg_mask = regions.get_mask(planck_217[0])
planck_12mptg_total = planck_217_flux[planck_12mptg_mask].sum() / ppbeam_planck
print("Total Planck flux in 12m ptg area: {0}".format(planck_12mptg_total))

whole_w51_reg = pyregion.open(paths.rpath('whole_w51_cloud.reg'))
whole_w51_mask_planck = whole_w51_reg.get_mask(planck_217[0])
planck_whole_total = planck_217_flux[whole_w51_mask_planck].sum(
) / ppbeam_planck
print("Total Planck flux in entire cloud: {0}".format(planck_whole_total))
            row['x_cen'], row['y_cen']))

        #mask = rmask == name
        #dend_inds = np.where(mask)

        #view = (slice(None), # all spectral channels
        #        slice(dend_inds[0].min(), dend_inds[0].max()+1),
        #        slice(dend_inds[1].min(), dend_inds[1].max()+1),
        #       )
        #sc = cube[view].with_mask(mask[view[1:]])
        sc = cube.subcube_from_ds9region(SL)
        spec = sc.mean(axis=(1, 2))
        spec.meta['beam'] = radio_beam.Beam(
            major=np.nanmedian([bm.major.to(u.deg).value
                                for bm in spec.beams]),
            minor=np.nanmedian([bm.minor.to(u.deg).value
                                for bm in spec.beams]),
            pa=np.nanmedian([bm.pa.to(u.deg).value for bm in spec.beams]),
        )
        spec.hdu.writeto("spectra/dendro{0:03d}_spw{1}_mean{2}.fits".format(
            name, spw, suffix),
                         clobber=True)

        bgSL = pyregion.parse("fk5; circle({0},{1},1.0\")".format(
            row['x_cen'], row['y_cen']))
        bgsc = cube.subcube_from_ds9region(bgSL)
        npix = np.count_nonzero(np.isfinite(bgsc[0, :, :]))
        bgspec = (bgsc.sum(axis=(1, 2)) - sc.sum(axis=(1, 2))) / npix
        bgspec.meta['beam'] = radio_beam.Beam(
            major=np.nanmedian([bm.major.to(u.deg).value
                                for bm in spec.beams]),
Пример #18
0
def alpha_hist(finaliter_prefix_b3, finaliter_prefix_b6, threshold=5,
               basepath='/home/adam/work/alma-imf/reduction/', las=None):
    image_b3 = SpectralCube.read(f'{finaliter_prefix_b3}.image.tt0.fits', use_dask=False, format='fits').minimal_subcube()
    image_b6 = SpectralCube.read(f'{finaliter_prefix_b6}.image.tt0.fits', use_dask=False, format='fits').minimal_subcube()
    image_b3 = image_b3 * u.beam / image_b3.beam.sr
    image_b6 = image_b6 * u.beam / image_b6.beam.sr

    fieldname = os.path.basename(finaliter_prefix_b6).split("_")[0]
    print()
    print(fieldname)
    print(image_b3)
    print(image_b6)

    if las:
        print(f"LAS {las} unsharp masking")
        t0 = time.time()
        smb3 = image_b3[0].convolve_to(radio_beam.Beam(las), allow_huge=True)
        print(f"Convolution of b3: {time.time() - t0} seconds")
        image_b3 = image_b3 - smb3
        print(f"Subtraction of convolved slice: {time.time()-t0}")
        smb6 = image_b6[0].convolve_to(radio_beam.Beam(las), allow_huge=True)
        image_b6 = image_b6 - smb6
        dt = time.time() - t0
        print(f"LAS subtraction took {dt} seconds")


    noise_region_b3 = regions.read_ds9(f"{basepath}/reduction/noise_estimation_regions/{fieldname}_B3_noise_sampling.reg")
    noise_region_b6 = regions.read_ds9(f"{basepath}/reduction/noise_estimation_regions/{fieldname}_B6_noise_sampling.reg")

    beams = radio_beam.Beams(major=u.Quantity([image_b3.beam.major, image_b6.beam.major]),
                             minor=u.Quantity([image_b3.beam.minor, image_b6.beam.minor]),
                             pa=u.Quantity([image_b3.beam.pa, image_b6.beam.pa]))
    commonbeam = radio_beam.commonbeam.commonbeam(beams)
    print(commonbeam)

    if image_b3.beam.sr < image_b6.beam.sr:
        header = image_b6[0].header
    else:
        header = image_b3[0].header

    print("Convolution and Reprojection")
    t0 = time.time()
    image_b3_repr = image_b3[0].convolve_to(commonbeam, allow_huge=True).reproject(header)
    print(f"B3 reprojection took {time.time()-t0} seconds")
    t0 = time.time()
    image_b6_repr = image_b6[0].convolve_to(commonbeam, allow_huge=True).reproject(header)
    print(f"B6 reprojection took {time.time()-t0} seconds")

    t0 = time.time()
    noiseim_b3 = image_b3.subcube_from_regions(noise_region_b3)[0].convolve_to(commonbeam, allow_huge=True)
    noiseim_b6 = image_b6.subcube_from_regions(noise_region_b6)[0].convolve_to(commonbeam, allow_huge=True)
    print(f"Shape of noiseims; b3={noiseim_b3.shape}, b6={noiseim_b6.shape}.  Subcubes took {time.time()-t0} seconds.")

    t0 = time.time()
    b3_std = stats.mad_std(noiseim_b3, ignore_nan=True)
    b6_std = stats.mad_std(noiseim_b6, ignore_nan=True)
    print(f"mad_std took {time.time()-t0} seconds")

    t0 = time.time()
    mask = (image_b3_repr > threshold*b3_std) & (image_b6_repr > threshold*b6_std)
    alpha_b3_b6 = (np.log(image_b3_repr / image_b6_repr) / np.log(image_b3.wcs.wcs.crval[2] / image_b6.wcs.wcs.crval[2])).value
    alpha_b3_b6[~mask] = np.nan
    print(f"mask & alpha calc took {time.time()-t0} seconds")

    pl.figure(2, figsize=(8,8)).clf()
    ax = pl.gca()
    ax.hist(alpha_b3_b6[mask], bins=np.linspace(-2,5), density=True)
    ax.set_xlabel("Spectral Index $\\alpha$")
    ax.set_ylabel("Fraction of Pixels")

    return mask, alpha_b3_b6, image_b3_repr, image_b6_repr
Пример #19
0
def b6b7_catalog(B6_img, B6_name, B7_img, B7_name, cat_name, nonconv_B6_img=None, nonconv_B7_img=None):
    #creates catalog from one image in each band
    #B3_names, B6_names, B7_names only used for gaussian diag directory names
    
    ref_data_name = '/home/jotter/nrao/summer_research_2018/tables/ref_catalog_may21.fits'
    ref_data = Table.read(ref_data_name)
    ref_arrs = [ref_data['B6_detect'], ref_data['B7_detect']]
  
    band_imgs = [B6_img, B7_img]
    band_names = ['B6', 'B7']
    band_img_names = [B6_name, B7_name]
    band_tables = []
    for b in range(len(band_imgs)): #first loop through different bands
        name = band_names[b]
        img_name = band_img_names[b]
        img = band_imgs[b]
        fl = fits.open(img)
        header = fl[0].header
        img_data = fl[0].data.squeeze()
        img_wcs = WCS(header).celestial

        beam = radio_beam.Beam.from_fits_header(header)
        pixel_scale = np.abs(img_wcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg
        ppbeam = (beam.sr/(pixel_scale**2)).decompose().value

        if name == 'B6' and nonconv_B6_img is not None:
            fl = fits.open(nonconv_B6_img)
            header = fl[0].header
            nonconv_beam = radio_beam.Beam.from_fits_header(header)
            ppbeam = (nonconv_beam.sr/(pixel_scale**2)).decompose().value
        if name == 'B7' and nonconv_B7_img is not None:
            fl = fits.open(nonconv_B7_img)
            header = fl[0].header
            nonconv_beam = radio_beam.Beam.from_fits_header(header)
            ppbeam = (nonconv_beam.sr/(pixel_scale**2)).decompose().value
            
        #now get ready to fit gaussians
        #start by setting up save directory for images
        gauss_save_dir = '/home/jotter/nrao/gauss_diags_may21/'+img_name+'/'
        if not os.path.exists(gauss_save_dir):
            os.makedirs(gauss_save_dir)
        #now make region list

        rad = Angle(1, 'arcsecond') #radius used in region list
        regs = []

        src_inds = np.where(ref_arrs[b] == True)[0]
        print(len(src_inds))

        for ind in src_inds:
            reg = regions.CircleSkyRegion(center=SkyCoord(ref_data['RA_B3'][ind]*u.degree, ref_data['DEC_B3'][ind]*u.degree), radius=rad, meta={'text':str(ref_data['B3_Seq'][ind])})
            reg_pix = reg.to_pixel(img_wcs)
            if reg_pix.center.x > 0 and reg_pix.center.x < len(img_data[0]):
                if reg_pix.center.y > 0 and reg_pix.center.y < len(img_data):
                    if np.isnan(img_data[int(reg_pix.center.x), int(reg_pix.center.y)]) == False:
                        regs.append(reg)

        cat_r = Angle(0.5, 'arcsecond')/2 #radius for gaussian fitting
        print('ok')
        gauss_cat = gaussfit_catalog(img, regs, cat_r, savepath=gauss_save_dir, max_offset_in_beams = 1, max_radius_in_beams = 5)
        #table does not have all columns yet, add others later
        img_table = Table(names=('Seq_B3', 'fwhm_maj_'+name, 'fwhm_maj_err_'+name, 'fwhm_min_'+name, 'fwhm_min_err_'+name, 'pa_'+name, 'pa_err_'+name, 'gauss_amp_'+name, 'gauss_amp_err_'+name,'RA_'+name,'RA_err_'+name, 'DEC_'+name, 'DEC_err_'+name, ), dtype=('i4', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8'))
        for key in gauss_cat:
            img_table.add_row((key, gauss_cat[key]['fwhm_major'], gauss_cat[key]['e_fwhm_major'], gauss_cat[key]['fwhm_minor'], gauss_cat[key]['e_fwhm_minor'], gauss_cat[key]['pa'], gauss_cat[key]['e_pa'], gauss_cat[key]['amplitude'], gauss_cat[key]['e_amplitude'], gauss_cat[key]['center_x'], gauss_cat[key]['e_center_x'], gauss_cat[key]['center_y'], gauss_cat[key]['e_center_y']))
        #now measure deconvovled sizes and aperture flux measurements for each source 
        ap_flux_arr = []
        ap_flux_err_arr = []
        fwhm_maj_deconv_arr = []
        fwhm_maj_deconv_err_arr = []
        fwhm_min_deconv_arr = []
        fwhm_min_deconv_err_arr = []
        pa_deconv_arr = []
        pa_deconv_err_arr = []
        snr_arr = []
        rms_arr = []
 
        for row in range(len(img_table)): #now loop through sources in reference data and make measurements
            ref_ind = np.where(ref_data['B3_Seq'] == img_table['Seq_B3'][row])[0]
            if len(ref_ind > 0):
                #now measuring deconvolved sizes
                measured_source_size = radio_beam.Beam(major=img_table['fwhm_maj_'+name][row]*u.arcsec, minor=img_table['fwhm_min_'+name][row]*u.arcsec, pa=(img_table['pa_'+name][row]-90)*u.degree)
                try:
                    deconv_size = measured_source_size.deconvolve(beam)
                    fwhm_maj_deconv_arr.append(deconv_size.major.value)
                    fwhm_min_deconv_arr.append(deconv_size.minor.value)
                    fwhm_maj_deconv_err_arr.append(img_table['fwhm_maj_err_'+name][row]) #same error as non deconvolved
                    fwhm_min_deconv_err_arr.append(img_table['fwhm_min_err_'+name][row])
                    pa_deconv_arr.append(deconv_size.pa.to(u.deg).value)
                    pa_deconv_err_arr.append(img_table['pa_err_'+name][row])
                except ValueError:
                    fwhm_maj_deconv_arr.append(np.nan)
                    fwhm_min_deconv_arr.append(np.nan)
                    fwhm_maj_deconv_err_arr.append(np.nan)
                    fwhm_min_deconv_err_arr.append(np.nan)
                    pa_deconv_arr.append(np.nan)
                    pa_deconv_err_arr.append(np.nan)
 

                pix_major_fwhm = ((img_table['fwhm_maj_'+name][row]*u.arcsec).to(u.degree)/pixel_scale).decompose()
                pix_minor_fwhm = ((img_table['fwhm_min_'+name][row]*u.arcsec).to(u.degree)/pixel_scale).decompose()
                center_coord = SkyCoord(img_table['RA_'+name][row], img_table['DEC_'+name][row], frame='icrs', unit=(u.deg, u.deg))
                center_coord_pix = center_coord.to_pixel(img_wcs)
                center_coord_pix_reg = regions.PixCoord(center_coord_pix[0], center_coord_pix[1])
                pos_ang = (img_table['pa_'+name][row]-90)*u.deg #must subtract 90 to be consistent

                ellipse_reg = regions.EllipsePixelRegion(center_coord_pix_reg, pix_major_fwhm.value*2, pix_minor_fwhm.value*2, angle=pos_ang)
                ap_mask = ellipse_reg.to_mask()
                cutout_mask = ap_mask.cutout(img_data)
                
                aperture_flux = np.nansum(cutout_mask[ap_mask.data==1])/ppbeam
                npix = len(cutout_mask[ap_mask.data==1])

                #now make annulus for measuring background and error
                annulus_width = 15 #pixels
                annulus_radius = img_table['fwhm_maj_'+name][row]*u.arcsecond#+0.05*u.arcsecond
                annulus_radius_pix = (annulus_radius.to(u.degree)/pixel_scale).decompose()

                #cutout image
                cutout = Cutout2D(img_data, center_coord_pix, annulus_radius*2.5, img_wcs, mode='partial')
                cutout_center = regions.PixCoord(cutout.center_cutout[0], cutout.center_cutout[1])

                #define aperture regions for SNR
                innerann_reg = regions.CirclePixelRegion(cutout_center, annulus_radius_pix.value)
                outerann_reg = regions.CirclePixelRegion(cutout_center, annulus_radius_pix.value+annulus_width)

                #Make masks from aperture regions
                annulus_mask = mask(outerann_reg, cutout) - mask(innerann_reg, cutout)

                # Calculate the SNR and aperture flux sums
                pixels_in_annulus = cutout.data[annulus_mask.astype('bool')]
                bg_rms = median_abs_deviation(pixels_in_annulus)
                print(img_table['Seq_B3'][row])
                print('BG RMS: %f' % (bg_rms))
                ap_bg_rms = bg_rms/np.sqrt(npix/ppbeam) #rms/sqrt(npix/ppbeam) - rms error per beam
                bg_median = np.nanmedian(pixels_in_annulus)

                pix_bg = bg_median*npix/ppbeam

                ap_flux_bgcorrect = aperture_flux - pix_bg
                ap_flux_correct = ap_flux_bgcorrect + ap_flux_bgcorrect*(1 - special.erf(2*np.sqrt(np.log(2)))) #flux correction for summing within 2*fwhm
                
                ap_flux_err_arr.append(ap_bg_rms)
                ap_flux_arr.append(ap_flux_correct)
                snr_arr.append(img_table['gauss_amp_'+name][row]/bg_rms)
                rms_arr.append(bg_rms)
                
        cols = ['ap_flux_'+name, 'ap_flux_err_'+name, 'fwhm_maj_deconv_'+name, 'fwhm_maj_deconv_err_'+name, 'fwhm_min_deconv_'+name, 'fwhm_min_deconv_err_'+name, 'pa_deconv_'+name, 'pa_deconv_err_'+name, 'SNR_'+name, 'RMS_'+name]
        arrs = [ap_flux_arr, ap_flux_err_arr, fwhm_maj_deconv_arr, fwhm_maj_deconv_err_arr, fwhm_min_deconv_arr, fwhm_min_deconv_err_arr, pa_deconv_arr, pa_deconv_err_arr, snr_arr, rms_arr]
        for c in range(len(cols)):
            img_table.add_column(Column(np.array(arrs[c])), name=cols[c])
        img_table.add_column(Column(np.array(fwhm_maj_deconv_arr)/np.array(fwhm_min_deconv_arr)), name='ar_deconv_'+name)
        band_tables.append(img_table) #list of tables for each image
            
    
    B6B7 = join(band_tables[0], band_tables[1], keys='Seq_B3', join_type='outer')
    B6B7.write('/home/jotter/nrao/summer_research_2018/tables/'+cat_name+'.fits',  overwrite=True)
Пример #20
0
def flux_hist(finaliter_prefix_b3,
              finaliter_prefix_b6,
              basepath='/home/adam/work/alma-imf/reduction/',
              las=None):
    image_b3 = SpectralCube.read(f'{finaliter_prefix_b3}.image.tt0.fits',
                                 use_dask=False,
                                 format='fits').minimal_subcube()
    image_b6 = SpectralCube.read(f'{finaliter_prefix_b6}.image.tt0.fits',
                                 use_dask=False,
                                 format='fits').minimal_subcube()
    image_b3 = image_b3 * u.beam / image_b3.beam.sr
    image_b6 = image_b6 * u.beam / image_b6.beam.sr

    fieldname = os.path.basename(finaliter_prefix_b6).split("_")[0]

    if las:
        smb3 = image_b3[0].convolve_to(radio_beam.Beam(las), allow_huge=True)
        image_b3 = image_b3 - smb3
        smb6 = image_b6[0].convolve_to(radio_beam.Beam(las), allow_huge=True)
        image_b6 = image_b6 - smb6

    noise_region_b3 = regions.read_ds9(
        f"{basepath}/reduction/noise_estimation_regions/{fieldname}_B3_noise_sampling.reg"
    )
    noise_region_b6 = regions.read_ds9(
        f"{basepath}/reduction/noise_estimation_regions/{fieldname}_B6_noise_sampling.reg"
    )

    noiseim_b3 = image_b3.subcube_from_regions(noise_region_b3)[0]
    noiseim_b6 = image_b6.subcube_from_regions(noise_region_b6)[0]

    b3_std = stats.mad_std(noiseim_b3, ignore_nan=True)
    b6_std = stats.mad_std(noiseim_b6, ignore_nan=True)
    print(fieldname, b3_std, b6_std)

    fig = pl.figure(2, figsize=(12, 7))
    fig.clf()
    ax = pl.subplot(1, 2, 1)
    b3data = image_b3[0].value
    bins_b3 = np.linspace(np.nanmin(b3data), np.nanmax(b3data), 100)
    bins_b3b = np.linspace(np.nanmin(b3data), np.nanmax(b3data), 10000)
    H, L, P = ax.hist(b3data[np.isfinite(b3data)], bins=bins_b3, density=False)
    #ax.hist(noiseim_b3.value.ravel(), bins=bins_b3)
    ax.set_yscale('log')
    ax.set_ylim(0.5, ax.get_ylim()[1])
    ax.plot(bins_b3b,
            H.max() * np.exp(-bins_b3b**2 / (2 * b3_std.value**2)), 'k')
    ax.set_xlabel("$S_{3mm}$ [Jy/sr]")
    ax.set_ylabel("Number of Pixels")

    axin = fig.add_axes([0.25, 0.6, 0.20, 0.25])
    bins = np.linspace(-5 * b3_std.value, 5 * b3_std.value, 100)
    H, L, P = axin.hist(b3data[(b3data < 5 * b3_std.value)
                               & (b3data > -5 * b3_std.value)],
                        bins=bins,
                        density=False)
    #axin.hist(noiseim_b3.value.ravel(), bins=bins)
    gauss = H.max() * np.exp(-bins**2 / (2 * b3_std.value**2))
    axin.plot(bins, gauss, 'k')
    axin.set_xticklabels([])
    axin.set_yticks(axin.get_yticks()[1:])
    axin2 = fig.add_axes([0.25, 0.5, 0.2, 0.1])
    loc = (L[1:] + L[:-1]) / 2
    axin2.plot(loc,
               H - H.max() * np.exp(-loc**2 / (2 * b3_std.value**2)),
               drawstyle='steps',
               color='k')
    axin2.set_xlim(axin.get_xlim())

    ax = pl.subplot(1, 2, 2)
    b6data = image_b6[0].value
    bins_b6 = np.linspace(np.nanmin(b6data), np.nanmax(b6data), 100)
    bins_b6b = np.linspace(np.nanmin(b6data), np.nanmax(b6data), 10000)
    H, L, P = ax.hist(b6data[np.isfinite(b6data)], bins=bins_b6, density=False)
    ax.plot(bins_b6b,
            H.max() * np.exp(-bins_b6b**2 / (2 * b6_std.value**2)), 'k')
    #ax.hist(noiseim_b6.value.ravel(), bins=bins_b6)
    ax.set_ylim(0.5, ax.get_ylim()[1])
    ax.set_yscale('log')
    ax.yaxis.set_label_position("right")
    ax.yaxis.tick_right()
    ax.set_xlabel("$S_{1mm}$ [Jy/sr]")
    ax.set_ylabel("Number of Pixels")

    axin = fig.add_axes([0.65, 0.6, 0.20, 0.25])
    bins = np.linspace(-5 * b6_std.value, 5 * b6_std.value, 100)
    H, L, P = axin.hist(b6data[(b6data < 5 * b6_std.value)
                               & (b6data > -5 * b6_std.value)],
                        bins=bins,
                        density=False)
    #axin.hist(noiseim_b6.value.ravel(), bins=bins)
    axin.plot(bins, H.max() * np.exp(-bins**2 / (2 * b6_std.value**2)), 'k')
    axin.set_xticklabels([])
    axin.set_yticks(axin.get_yticks()[1:])
    axin2 = fig.add_axes([0.65, 0.5, 0.2, 0.1])
    loc = (L[1:] + L[:-1]) / 2
    axin2.plot(loc,
               H - H.max() * np.exp(-loc**2 / (2 * b6_std.value**2)),
               drawstyle='steps',
               color='k')
    axin2.set_xlim(axin.get_xlim())
#sharc_hdr['BUNIT'] = ('cm^-2', 'N(H2), column density of H2')

colmap_sharc_20 = dust_emissivity.dust.colofsnu(frequencies[0],
                                                imagecube[0,:,:],
                                                temperature=20*u.K,
                                                beta=1.750,
                                               )
colmap_sharc_50 = dust_emissivity.dust.colofsnu(frequencies[0],
                                                imagecube[0,:,:],
                                                temperature=50*u.K,
                                                beta=1.750,
                                               )
fits.writeto(filename=paths.cpath('column_maps/sharc_col_50K.fits'), data=colmap_sharc_50.value, header=scuba_hdr, overwrite=True)
fits.writeto(filename=paths.cpath('column_maps/sharc_col_20K.fits'), data=colmap_sharc_20.value, header=scuba_hdr, overwrite=True)

scuba_beam = radio_beam.Beam(8*u.arcsec)

colmap_scuba_20 = dust_emissivity.dust.colofsnu(frequencies[1],
                                                imagecube[1,:,:],
                                                beta=1.750,
                                                temperature=20*u.K)
colmap_scuba_50 = dust_emissivity.dust.colofsnu(frequencies[1],
                                                imagecube[1,:,:],
                                                beta=1.750,
                                                temperature=50*u.K)

fits.writeto(filename=paths.cpath('column_maps/scuba_col_50K.fits'), data=colmap_scuba_50.value, header=scuba_hdr, overwrite=True)
fits.writeto(filename=paths.cpath('column_maps/scuba_col_20K.fits'), data=colmap_scuba_20.value, header=scuba_hdr, overwrite=True)

pl.figure(1).clf()
pl.hist(colmap_sharc_50[np.isfinite(colmap_sharc_50)], bins=np.logspace(19,24),alpha=0.5, log=True)
Пример #22
0
def gauss_fitter(region='Cepheus_L1251',
                 snr_min=3.0,
                 mol='C2S',
                 vmin=5.0,
                 vmax=10.0,
                 convolve=False,
                 use_old_conv=False,
                 multicore=1,
                 file_extension=None):
    """
    	Fit a Gaussian to non-NH3 emission lines from GAS.
    	It creates a cube for the best-fit Gaussian, a cube 
    	for the best-fit Gaussian with noise added back into 
    	the spectrum, and a parameter map of Tpeak, Vlsr, and FWHM
    
    	Parameters
    	----------
    	region : str
        	Name of region to reduce
    	snr_min : float
        	Lowest signal-to-noise pixels to include in the line-fitting
    	mol : str
        	name of molecule to fit
   	vmin : numpy.float
        	Minimum centroid velocity, in km/s.
    	vmax : numpy.float
        	Maximum centroid velocity, in km/s.
    	convolve : bool or float
        	If not False, specifies the beam-size to convolve the original map with
		Beam-size must be given in arcseconds
    	use_old_conv : bool
        	If True, use an already convolved map with name:
		region + '_' + mol + file_extension + '_conv.fits'
		This convolved map must be in units of km/s
    	multicore : int
		Maximum number of simultaneous processes desired
	file_extension: str
		filename extension 
    	"""
    if file_extension:
        root = file_extension
    else:
        # root = 'base{0}'.format(blorder)
        root = 'all'

    molecules = ['C2S', 'HC7N_22_21', 'HC7N_21_20', 'HC5N']

    MolFile = '{0}/{0}_{2}_{1}.fits'.format(region, root, mol)
    ConvFile = '{0}/{0}_{2}_{1}_conv.fits'.format(region, root, mol)
    GaussOut = '{0}/{0}_{2}_{1}_gauss_cube.fits'.format(region, root, mol)
    GaussNoiseOut = '{0}/{0}_{2}_{1}_gauss_cube_noise.fits'.format(
        region, root, mol)
    ParamOut = '{0}/{0}_{2}_{1}_param_cube.fits'.format(region, root, mol)

    # Load the spectral cube and convert to velocity units
    cube = SpectralCube.read(MolFile)
    cube_km = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')

    # If desired, convolve map with larger beam
    # or load previously created convolved cube
    if convolve:
        cube = SpectralCube.read(MolFile)
        cube_km_1 = cube.with_spectral_unit(u.km / u.s,
                                            velocity_convention='radio')
        beam = radio_beam.Beam(major=convolve * u.arcsec,
                               minor=convolve * u.arcsec,
                               pa=0 * u.deg)
        cube_km = cube_km_1.convolve_to(beam)
        cube_km.write(ConvFile, format='fits', overwrite=True)
    if use_old_conv:
        cube_km = SpectralCube.read(ConvFile)

# Define the spectral axis in km/s
    spectra_x_axis_kms = np.array(cube_km.spectral_axis)

    # Find the channel range corresponding to vmin and vmax
    # -- This is a hold-over from when I originally set up the code to
    #    use a channel range rather than velocity range.
    #    Can change later, but this should work for now.
    low_channel = np.where(spectra_x_axis_kms <= vmax
                           )[0][0] + 1  # Add ones to change index to channel
    high_channel = np.where(spectra_x_axis_kms >= vmin
                            )[0][-1] + 1  # Again, hold-over from older setup
    peak_channels = [low_channel, high_channel]

    # Create cubes for storing the fitted Gaussian profiles
    # and the Gaussians with noise added back into the spectrum
    header = cube_km.header
    cube_gauss = np.array(cube_km.unmasked_data[:, :, :])
    cube_gauss_noise = np.array(cube_km.unmasked_data[:, :, :])
    shape = np.shape(cube_gauss)

    # Set up a cube for storing fitted parameters
    param_cube = np.zeros((6, shape[1], shape[2]))
    param_header = cube_km.header

    # Define the Gaussian profile
    def p_eval(x, a, x0, sigma):
        return a * np.exp(-(x - x0)**2 / (2 * sigma**2))

# Create some arrays full of NANs
# To be used in output cubes if fits fail

    nan_array = np.empty(shape[0])  # For gauss cubes
    nan_array[:] = np.NAN
    nan_array2 = np.empty(param_cube.shape[0])  # For param cubes
    nan_array2[:] = np.NAN

    # Loop through each pixel and find those
    # with SNR above snr_min
    x = []
    y = []
    pixels = 0
    for (i, j), value in np.ndenumerate(cube_gauss[0]):
        spectra = np.array(cube_km.unmasked_data[:, i, j])
        if (False in np.isnan(spectra)):
            rms = np.nanstd(
                np.append(spectra[0:(peak_channels[0] - 1)],
                          spectra[(peak_channels[1] + 1):len(spectra)]))
            if (max(spectra[peak_channels[0]:peak_channels[1]]) /
                    rms) > snr_min:
                pixels += 1
                x.append(i)
                y.append(j)
        else:
            cube_gauss[:, i, j] = nan_array
            param_cube[:, i, j] = nan_array2
            cube_gauss_noise[:, i, j] = nan_array
    print str(pixels) + ' Pixels above SNR=' + str(snr_min)

    # Define a Gaussian fitting function for each pixel
    # i, j are the x,y coordinates of the pixel being fit
    def pix_fit(i, j):
        spectra = np.array(cube_km.unmasked_data[:, i, j])
        # Use the peak brightness Temp within specified channel
        # range as the initial guess for Gaussian height
        max_ch = np.argmax(spectra[peak_channels[0]:peak_channels[1]])
        Tpeak = spectra[peak_channels[0]:peak_channels[1]][max_ch]
        # Use the velocity of the brightness Temp peak as
        # initial guess for Gaussian mean
        vpeak = spectra_x_axis_kms[peak_channels[0]:peak_channels[1]][max_ch]
        rms = np.std(
            np.append(spectra[0:(peak_channels[0] - 1)],
                      spectra[(peak_channels[1] + 1):len(spectra)]))
        err1 = np.zeros(shape[0]) + rms
        # Create a noise spectrum based on rms of off-line channels
        # This will be added to best-fit Gaussian to obtain a noisy Gaussian
        noise = np.random.normal(0., rms, len(spectra_x_axis_kms))
        # Define initial guesses for Gaussian fit
        guess = [Tpeak, vpeak, 0.3]  # [height, mean, sigma]
        try:
            coeffs, covar_mat = curve_fit(p_eval,
                                          xdata=spectra_x_axis_kms,
                                          ydata=spectra,
                                          p0=guess,
                                          sigma=err1,
                                          maxfev=500)
            gauss = np.array(
                p_eval(spectra_x_axis_kms, coeffs[0], coeffs[1], coeffs[2]))
            noisy_gauss = np.array(
                p_eval(spectra_x_axis_kms, coeffs[0], coeffs[1],
                       coeffs[2])) + noise
            params = np.append(coeffs, (covar_mat[0][0]**0.5, covar_mat[1][1]**
                                        0.5, covar_mat[2][2]**0.5))
            # params = ['Tpeak', 'VLSR','sigma','Tpeak_err','VLSR_err','sigma_err']

            # Don't accept fit if fitted parameters are non-physical or too uncertain
            if (params[0] < 0.01) or (params[3] > 1.0) or (
                    params[2] < 0.05) or (params[5] > 0.5) or (params[4] >
                                                               0.75):
                noisy_gauss = nan_array
                gauss = nan_array
                params = nan_array2

            # Don't accept fit if the SNR for fitted spectrum is less than SNR threshold
            #if max(gauss)/rms < snr_min:
            #	noisy_gauss = nan_array
            #	gauss = nan_array
            #	params = nan_array2

        except RuntimeError:
            noisy_gauss = nan_array
            gauss = nan_array
            params = nan_array2

        return i, j, gauss, params, noisy_gauss

# Parallel computation:

    nproc = multicore  # maximum number of simultaneous processes desired
    queue = pprocess.Queue(limit=nproc)
    calc = queue.manage(pprocess.MakeParallel(pix_fit))
    tic = time.time()
    counter = 0

    # Uncomment to see some plots of the fitted spectra
    #for i,j in zip(x,y):
    #pix_fit(i,j)
    #plt.plot(spectra_x_axis_kms, spectra, color='blue', drawstyle='steps')
    #plt.plot(spectra_x_axis_kms, gauss, color='red')
    #plt.show()
    #plt.close()

    # Begin parallel computations
    # Store the best-fit Gaussians and parameters
    # in their correct positions in the previously created cubes
    for i, j in zip(x, y):
        calc(i, j)
    for i, j, gauss_spec, parameters, noisy_gauss_spec in queue:
        cube_gauss[:, i, j] = gauss_spec
        param_cube[:, i, j] = parameters
        cube_gauss_noise[:, i, j] = noisy_gauss_spec
        counter += 1
        print str(counter) + ' of ' + str(pixels) + ' pixels completed \r',
        sys.stdout.flush()
    print "\n %f s for parallel computation." % (time.time() - tic)

    # Save final cubes
    # These will be in km/s units.
    # Spectra will have larger values to the left, lower values to right
    cube_final_gauss = SpectralCube(data=cube_gauss,
                                    wcs=cube_km.wcs,
                                    header=cube_km.header)
    cube_final_gauss.write(GaussOut, format='fits', overwrite=True)
    cube_final_gauss_noise = SpectralCube(data=cube_gauss_noise,
                                          wcs=cube_km.wcs,
                                          header=cube_km.header)
    cube_final_gauss_noise.write(GaussNoiseOut, format='fits', overwrite=True)

    # Construct appropriate header for param_cube
    param_header['NAXIS3'] = len(nan_array2)
    param_header['WCSAXES'] = 3
    param_header['CRPIX3'] = 1
    param_header['CDELT3'] = 1
    param_header['CRVAL3'] = 0
    param_header['PLANE1'] = 'Tpeak'
    param_header['PLANE2'] = 'VLSR'
    param_header['PLANE3'] = 'sigma'
    param_header['PLANE5'] = 'Tpeak_err'
    param_header['PLANE6'] = 'VLSR_err'
    param_header['PLANE7'] = 'sigma_err'

    fits.writeto(ParamOut, param_cube, header=param_header, clobber=True)
    ided_linefreqs.value
])
linefreqs = u.Quantity(linefreqs, u.GHz)

flist = [fn] if 'fn' in locals() else glob.glob(
    paths.dpath('stacked_spectra/OrionSourceI_*robust0.5.fits'))
for fn in flist:

    basefn = os.path.split(fn)[-1]

    if 'B7' in basefn and 'lb' not in basefn:
        continue

    print(fn)

    beam = (radio_beam.Beam(0.1 * u.arcsec, 0.08 * u.arcsec) if 'B3' in fn else
            radio_beam.Beam(0.043 * u.arcsec, 0.034 * u.arcsec) if 'B6' in fn
            else radio_beam.Beam(0.029 * u.arcsec, 0.022 * u.arcsec))

    sp_st = pyspeckit.Spectrum(fn)

    jytok = beam.jtok(sp_st.xarr.mean())
    sp_st.data *= jytok.value
    sp_st.unit = u.K

    pl.figure(0, figsize=(16, 6)).clf()
    sp_st.plotter(figure=pl.figure(0, figsize=(16, 6)),
                  clear=True,
                  ymin=-0.0025 * jytok.value,
                  ymax=0.01 * jytok.value)
Пример #24
0
from constants import mustang_central_frequency, mustang_beam_fwhm

import paths

almafn = paths.root(
    'SgrB2/SgrB2_selfcal_full_TCTE_selfcal5_ampphase_taylorterms_multiscale.image.tt0.pbcor.fits'
)
almafh = fits.open(almafn)[0]
loresfn = paths.root('SgrB2/SgrB2_precon_2_arcsec_pass_9.fits')
loresfn = paths.root('SgrB2/SgrB2_5pass_1_.0.2_10mJy_10mJy_final_smooth4.fits')
loresfn = '/Volumes/external/mgps/Feb5_2019/SgrB2_5pass_1_.0.2_10mJy_10mJy_w_session5_final_smooth4_PlanckCombined.fits'
loresfh_ = fits.open(loresfn)[0]

loresfwhm = mustang_beam_fwhm
loresbm = radio_beam.Beam(loresfwhm)

loresfh_header = loresfh_.header
if 'precon_2_arcsec_pass_9' in loresfn:
    loresfh_header.update(
        wcs.WCS(loresfh_.header)[1415:1815, 958:1358].to_header())
    loresfh = fits.PrimaryHDU(data=loresfh_.data[1415:1815, 958:1358],
                              header=loresfh_header)
elif '5pass_1_.0.2_10mJy' in loresfn:
    xc, yc = 1470, 1990
    loresfh_header.update(
        wcs.WCS(loresfh_.header)[yc - 350:yc + 350,
                                 xc - 350:xc + 350].to_header())
    loresfh = fits.PrimaryHDU(data=loresfh_.data[yc - 350:yc + 350,
                                                 xc - 350:xc + 350],
                              header=loresfh_header)
Пример #25
0
distance    = (8100*u.pc).to(u.m)

# Some pixel & beam area stuff ...
bmaj            = (head['bmaj']*u.deg).to(u.arcsec)
bmin            = (head['bmin']*u.deg).to(u.arcsec)
bmaj_m          = bmaj.to(u.rad).value * distance
bmin_m          = bmin.to(u.rad).value * distance
pix_width       = (head['cdelt2']*u.deg).to(u.rad)
pix_width_pc    = pix_width.value * distance
pix_area        = pix_width_pc**2
fwhm_to_sigma   = 1./(8*np.log(2))**0.5
beam_area       = 2.*np.pi*(bmaj_m*bmin_m*fwhm_to_sigma**2)
pixpbeam        = beam_area/pix_area

# Convert to Brightness temperature (K)
beam    = radio_beam.Beam(bmaj, bmin)
data_K  = (data*(beam.jtok(v).value))
head.set('BUNIT', 'K')
fits.writeto('Brick_SiO_cube_K.fits', data_K, head)

# Convert to column density
Tex         = 30*u.K
Jvtex       = ((h*(v.to(u.Hz)))/k)/(np.e**(((h*(v.to(u.Hz))))/(k*Tex)) - 1 )
Jvtex       = Jvtex.to(u.K)
Jvtbg       = ((h*(v.to(u.Hz)))/k)/(np.e**(((h*(v.to(u.Hz))))/(k*Tbg)) - 1 )
Jvtbg       = Jvtbg.to(u.K)
N           = (1.6e11) *
                (((Tex.value + 0.35)*(np.e**(31.26/Tex.value))) / (np.e**(10.4/Tex.value) - 1))
                * (1/(Jvtex.value - Jvtbg.value))
data_Nsio   = N * data_K
head.set('BUNIT', 'Nsio')
Пример #26
0
def smcube(snrcube,
           header=None,
           fwhm=None,
           vsm=None,
           vsm_type='gauss',
           edgech=None,
           huge=True):
    """
    Smooth an SNRcube to produce a higher signal-to-noise SNRcube.

    Parameters
    ----------
    snrcube : SpectralCube or `~numpy.ndarray`
        The image cube normalized by the estimated RMS noise.  If a numpy array
        is provided the header must also be provided.
    header : `astropy.io.fits.Header`
        The cube FITS header, required if a numpy array is given.
    fwhm : float or :class:`~astropy.units.Quantity`, optional
        Final spatial resolution to smooth to.  If not astropy quantity, assumed
        to be given in arcsec.
        Default: 10 arcsec
    vsm : float or :class:`~astropy.units.Quantity`, optional
        Full width of the spectral smoothing kernel (or FWHM if gaussian).  
        If given as astropy quantity, should be given in velocity units.  
        If not given as astropy quantity, interpreted as number of channels.
        Default: No spectral smoothing is applied.
    vsm_type : string, optional
        What type of spectral smoothing to employ.  Currently three options:
        (1) 'boxcar' - 1D boxcar smoothing, vsm rounded to integer # of chans.
        (2) 'gauss' - 1D gaussian smoothing, vsm is the convolving gaussian FWHM.
        (3) 'gaussfinal' - 1D gaussian smoothing, vsm is the gaussian FWHM
        after convolution, assuming FWHM before convolution is 1 channel.        
        Default: 'gauss'
    edgech : int, optional
        Number of channels at left and right edges of each spectrum to use 
        for rms estimation.
        Default is to use all channels.

    Returns
    -------
    sm_snrcube : SpectralCube
        A cube is SNR units after smoothing to the desired resolution.
    """
    if isinstance(snrcube, SpectralCube):
        hdr = snrcube.header
    elif header is None:
        raise NameError('A header must be provided to smcube procedure')
    else:
        snrcube = SpectralCube(data=snrcube,
                               header=header,
                               wcs=wcs.WCS(header))
        hdr = header
        print(snrcube)

    # -- Spatial smoothing
    if fwhm is not None:
        # Requested final resolution
        if not hasattr(fwhm, 'unit'):
            fwhm = fwhm * u.arcsec
        sm_beam = radio_beam.Beam(major=fwhm, minor=fwhm, pa=0 * u.deg)
        print('Convolving to', sm_beam)
        # From convolve_to method in spectral_cube
        pixscale = wcs.utils.proj_plane_pixel_area(
            snrcube.wcs.celestial)**0.5 * u.deg
        if hasattr(snrcube, 'beam'):
            print('Existing', snrcube.beam)
            convolution_kernel = sm_beam.deconvolve(
                snrcube.beam).as_kernel(pixscale)
        else:
            print('Warning: no existing beam found in input to smcube')
            convolution_kernel = sm_beam.as_kernel(pixscale)
        sm_snrcube = snrcube.spatial_smooth(convolution_kernel,
                                            convolve_fft,
                                            fill_value=0.0,
                                            nan_treatment='fill',
                                            preserve_nan=True,
                                            parallel=False)
    else:
        sm_snrcube = snrcube

    # -- Spectral smoothing
    if vsm is not None:
        fwhm_factor = np.sqrt(8 * np.log(2))
        if hasattr(vsm, 'unit'):
            delta_v = abs(hdr['CDELT3']) * u.m / u.s
            vsm_ch = (vsm / delta_v).decompose().value
        else:
            vsm_ch = vsm
        if vsm_type == 'gauss':
            gaussian_width = vsm_ch / fwhm_factor
            kernel = Gaussian1DKernel(gaussian_width)
            print('Gaussian smoothing with stddev:', gaussian_width,
                  'channels')
        elif vsm_type == 'boxcar':
            box_width = round(vsm_ch)
            kernel = Box1DKernel(box_width)
            print('Boxcar smoothing with width:', box_width, 'channels')
        elif vsm_type == 'gaussfinal':
            if vsm_ch > 1:
                gaussian_width = (vsm_ch**2 - 1)**0.5 / fwhm_factor
                kernel = Gaussian1DKernel(gaussian_width)
                print('Gaussian smoothing with stddev:', gaussian_width,
                      'channels')
            else:
                print('ERROR: requested resolution of', vsm_ch,
                      'chans is less than 1')
        sm2_snrcube = sm_snrcube.spectral_smooth(kernel)
        sm_snrcube = sm2_snrcube

    # -- Renormalize by rms
    newrms = makenoise(sm_snrcube, edge=edgech)
    if huge:
        sm_snrcube.allow_huge_operations = True
        newrms.allow_huge_operations = True
    sm_snrcube = sm_snrcube / newrms
    return sm_snrcube
Пример #27
0
        for spw in (0, 1, 2, 3):
            try:
                cube = SpectralCube.read(tmplt.format(spw, extra2, extra1))
            except IOError:
                print("didn't find {0}".format(
                    tmplt.format(spw, extra2, extra1)))
                continue
            print(cube)
            try:
                beam = radio_beam.Beam.from_fits_header(cube.header)
            except TypeError:
                if hasattr(cube, 'beams'):
                    beam = radio_beam.Beam(
                        major=np.nanmedian(
                            [bm.major.to(u.deg).value for bm in cube.beams]),
                        minor=np.nanmedian(
                            [bm.minor.to(u.deg).value for bm in cube.beams]),
                        pa=np.nanmedian(
                            [bm.pa.to(u.deg).value for bm in cube.beams]),
                    )
                else:
                    beam = None

            for row in tbl:
                name = row['name']

                print("Extracting {0} from {1}".format(name, spw))

                coord = coordinates.SkyCoord(row['PeakRA'],
                                             row['PeakDec'],
                                             frame='fk5',
                                             unit=(u.deg, u.deg))
Пример #28
0
def main():
    """Main function"""

    #===========================
    #==   PARSE ARGS
    #===========================
    logger.info("Get script args ...")
    try:
        args = get_args()
    except Exception as ex:
        logger.error("Failed to get and parse options (err=%s)", str(ex))
        return 1

    filenames = [x.strip() for x in args.filenames.split(',')]
    userbeam = args.userbeam
    bmaj_user = args.bmaj
    bmin_user = args.bmin
    pa_user = args.pa

    print("=== ARGS ===")
    print("filenames")
    print(filenames)
    print("userbeam? %d" % userbeam)
    if userbeam:
        print("Beam (bmaj=%f arcsec,bmin=%f arcsec,pa=%f deg)" %
              (bmaj_user, bmin_user, pa_user))
    print("============")

    #===========================
    #==   READ IMAGES
    #===========================
    filenames_out = []
    data_list = []
    header_list = []
    pixsize_x = []
    pixsize_y = []
    beam_list = []

    for filename in filenames:
        # - Store output filename
        filename_out = Utils.getBaseFileNoExt(filename) + '_conv.fits'
        filenames_out.append(filename_out)

        # - Get image data & header
        #data_alldim, header= Utils.read_fits(filename)
        #nchan = len(data_alldim.shape)

        data, header = Utils.read_fits(filename)
        nchan = len(data.shape)

        logger.info("nchan=%d" % nchan)

        #if nchan==4:
        #	data= data_alldim[0, 0, :, :]
        #else:
        #	data= data_alldim
        data[np.isnan(data)] = 0.0  # replace all NAN pixels with 0
        data_list.append(data)
        header_list.append(header)

        # - Get beam and WCS info
        wcs = WCS(header)
        hasBeamInfo = Utils.hasBeamInfo(header)
        xc = header['CRPIX1']
        yc = header['CRPIX2']
        if nchan == 4:
            ra, dec = wcs.all_pix2world(xc, yc, 0, 0, 0, ra_dec_order=True)
        else:
            ra, dec = wcs.all_pix2world(xc, yc, 0, ra_dec_order=True)
        print("ra=%f, dec=%f" % (ra, dec))
        dx = abs(header['CDELT1'])  # in deg
        dy = abs(header['CDELT2'])  # in deg
        pixsize_x.append(dx)
        pixsize_y.append(dy)

        if hasBeamInfo:
            bmaj = header['BMAJ']  # in deg
            bmin = header['BMIN']  # in deg
            pa = header['BPA'] if 'BPA' in header else 0  # in deg
            beam = radio_beam.Beam(bmaj * u.deg, bmin * u.deg, pa * u.deg)
            beam_list.append(beam)
        else:
            logger.error("No BMAJ/BMIN keyword present in file " + filename +
                         "!")
            return -1

    #===========================
    #==   SET COMMON BEAM
    #===========================
    if userbeam:
        #common_beam_bmaj= bmaj_user.to(u.arcsec).value
        #common_beam_bmin= bmin_user.to(u.arcsec).value
        #common_beam_pa= pa_user.to(u.deg).value
        common_beam_bmaj = bmaj_user
        common_beam_bmin = bmin_user
        common_beam_pa = pa_user
        common_beam = radio_beam.Beam(bmaj_user * u.arcsec,
                                      bmin_user * u.arcsec, pa_user * u.deg)
    else:
        beams = radio_beam.Beams(beams=beam_list)
        common_beam = radio_beam.commonbeam.common_manybeams_mve(beams)
        common_beam_bmaj = common_beam.major.to(u.arcsec).value
        common_beam_bmin = common_beam.minor.to(u.arcsec).value
        common_beam_pa = common_beam.pa.to(u.deg).value

    logger.info(
        "Convolving images to common beam size (bmaj,bmin,pa)=(%s,%s,%s) ..." %
        (str(common_beam_bmaj), str(common_beam_bmin), str(common_beam_pa)))

    #===========================
    #==   CONVOLVE IMAGES
    #===========================
    for index in range(0, len(data_list)):

        # - Find convolving beam for this image
        bmaj, bmin, pa = radio_beam.utils.deconvolve(common_beam,
                                                     beam_list[index])
        bmaj_deg = bmaj.to(u.deg).value
        bmin_deg = bmin.to(u.deg).value
        pa_deg = pa.to(u.deg).value
        conv_beam = radio_beam.Beam(bmaj_deg * u.deg, bmin_deg * u.deg,
                                    pa_deg * u.deg)

        bmaj_arcsec = bmaj.to(u.arcsec).value
        bmin_arcsec = bmin.to(u.arcsec).value
        ny = data_list[index].shape[0]
        nx = data_list[index].shape[1]

        # - Create convolution kernel
        dx = pixsize_x[index]
        dy = pixsize_y[index]
        pixsize = max(dx, dy)
        conv_kernel = conv_beam.as_kernel(pixsize * u.deg)
        conv_kernel.normalize()
        kernel = conv_kernel.array
        logger.info("Convolution kernel size: %d x %d" %
                    (kernel.shape[0], kernel.shape[1]))

        # - Convolve image
        logger.info(
            "Convolving image %d (size=%d,%d) by beam (bmaj,bmin,pa)=(%s,%s,%s) ..."
            % (index + 1, nx, ny, str(bmaj_arcsec), str(bmin_arcsec),
               str(pa_deg)))
        data_conv = cv.filter2D(np.float64(data_list[index]),
                                -1,
                                kernel,
                                borderType=cv.BORDER_CONSTANT)

        # - Write output FITS
        logger.info("Saving convolved image to file %s ..." %
                    filenames_out[index])
        Utils.write_fits(data_conv, filenames_out[index], header_list[index])

    return 0
distance_modulus = 5 * np.log10(5400) - 5
# 13.661968799114842

# http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt
mkabso9v = -3.20
mko9v = distance_modulus + mkabso9v
# 10.461968799114842

# goldader 1994
ak = 2.6
mko9vext = mko9v + ak
# 13.061968799114842

# for e2e
snu_max = 0.6 * u.mJy
beam = radio_beam.Beam(0.34 * u.arcsec)
phys_radius = 0.34 * 5400 * u.au
tbmax = snu_max.to(u.K, u.brightness_temperature(beam, 14.5 * u.GHz))
qmax = qlyc_of_tb(tbmax, radius=phys_radius).decompose()
print("Upper limit continuum if thin = {0}".format(qmax))

# # plot of... not relevant
# pl.plot(np.logspace(40,50),
#         Snu(Te=8500*u.K, nu=14.5*u.GHz, R=110*u.au, Qlyc=np.logspace(40,50)*u.s**-1,
#             beam=phys_radius, angular_beam=beam.major))
# pl.plot(np.logspace(40,50), [0.6e-3]*50)

tbl = Table.read('pecaut_mamajek_table.txt', format='ascii')

wav = np.linspace(100 * u.AA, 10000 * u.AA, 100000)
lycfrac = []
Пример #30
0
# average mass of an H2
mh2 = 2.8 * u.Da

# ALMA freq
almafreq = 96 * u.GHz

# Bolocam freq
bolofreq = 271.1 * u.GHz

# distance to sgrb2
d_sgrb2 = 8 * u.kpc

beta = 2
ntau1_beta2 = (1 / (kappa271 * (almafreq / bolofreq)**beta) / mh2).to(u.cm**-2)
beta = 1
ntau1_beta1 = (1 / (kappa271 * (almafreq / bolofreq)**beta) / mh2).to(u.cm**-2)

print(f"column density at tau=1 for beta=2: {ntau1_beta2}")
print(f"column density at tau=1 for beta=1: {ntau1_beta1}")

beam = radio_beam.Beam(0.05 * u.arcsec)

massperbeam_beta1 = (ntau1_beta1 * mh2 * (beam.sr * d_sgrb2**2)).to(
    u.M_sun, u.dimensionless_angles())
massperbeam_beta2 = (ntau1_beta2 * mh2 * (beam.sr * d_sgrb2**2)).to(
    u.M_sun, u.dimensionless_angles())

print(f"mass per beam at tau=1 for beta=2: {massperbeam_beta2}")
print(f"mass per beam at tau=1 for beta=1: {massperbeam_beta1}")